diff --git a/.gitattributes b/.gitattributes index 6336972b9..4e8490d51 100644 --- a/.gitattributes +++ b/.gitattributes @@ -4,35 +4,29 @@ *.sh linguist-detectable=true *.bash linguist-language=Shell *.func linguist-language=Shell +*.func linguist-detectable=true *.install linguist-language=Shell # --------------------------------------- -# Treat Golang files as Go (for /api/) -api/**/*.go linguist-language=Go - +# Exclude header art from stats # --------------------------------------- -# Treat frontend as JavaScript/TypeScript (optional) -frontend/**/*.ts linguist-language=TypeScript -frontend/**/*.js linguist-language=JavaScript +ct/headers/* linguist-documentation # --------------------------------------- # Exclude documentation from stats +# --------------------------------------- *.md linguist-documentation -docs/** linguist-documentation README.md linguist-documentation CONTRIBUTING.md linguist-documentation SECURITY.md linguist-documentation # --------------------------------------- # Exclude generated/config files -*.json linguist-generated -frontend/public/json/*.json linguist-generated=false -*.lock linguist-generated -*.yml linguist-generated -*.yaml linguist-generated +# --------------------------------------- .github/** linguist-generated .vscode/** linguist-generated # --------------------------------------- # Standard text handling +# --------------------------------------- * text=auto eol=lf diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index b25e618ad..fdd218976 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -12,6 +12,3 @@ # Set default reviewers * @community-scripts/Contributor - -# All changes in frontend -/frontend/ @community-scripts/Frontend-Dev diff --git a/.github/changelogs/2026/03.md b/.github/changelogs/2026/03.md index 9a2e0de42..6d12cec39 100644 --- a/.github/changelogs/2026/03.md +++ b/.github/changelogs/2026/03.md @@ -1,3 +1,531 @@ +## 2026-03-31 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Graylog: set vm.max_map_count on host for OpenSearch [@MickLesk](https://github.com/MickLesk) ([#13441](https://github.com/community-scripts/ProxmoxVE/pull/13441)) + - Koillection: ensure newline before appending to .env.local [@MickLesk](https://github.com/MickLesk) ([#13440](https://github.com/community-scripts/ProxmoxVE/pull/13440)) + +### 💾 Core + + - #### 🔧 Refactor + + - core: skip empty gateway value in network config [@MickLesk](https://github.com/MickLesk) ([#13442](https://github.com/community-scripts/ProxmoxVE/pull/13442)) + +## 2026-03-30 + +### 🆕 New Scripts + + - Bambuddy ([#13411](https://github.com/community-scripts/ProxmoxVE/pull/13411)) + +### 🚀 Updated Scripts + + - #### 💥 Breaking Changes + + - Rename: BirdNET > BirdNET-Go [@MickLesk](https://github.com/MickLesk) ([#13410](https://github.com/community-scripts/ProxmoxVE/pull/13410)) + +## 2026-03-29 + +### 🆕 New Scripts + + - YOURLS ([#13379](https://github.com/community-scripts/ProxmoxVE/pull/13379)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - fix(victoriametrics): use jq to filter releases [@Joery-M](https://github.com/Joery-M) ([#13393](https://github.com/community-scripts/ProxmoxVE/pull/13393)) + - Ollama: add error handling for Intel GPG key imports [@MickLesk](https://github.com/MickLesk) ([#13397](https://github.com/community-scripts/ProxmoxVE/pull/13397)) + - Immich: ignore Redis connection error on maintenance mode disable [@MickLesk](https://github.com/MickLesk) ([#13398](https://github.com/community-scripts/ProxmoxVE/pull/13398)) + - NPM: unmask openresty after migration from package [@MickLesk](https://github.com/MickLesk) ([#13399](https://github.com/community-scripts/ProxmoxVE/pull/13399)) + +## 2026-03-28 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Fix: Update gokapi binary name for v2.2.4+ and add migration step [@krazos](https://github.com/krazos) ([#13377](https://github.com/community-scripts/ProxmoxVE/pull/13377)) + - Fix: update gokapi asset matching for v2.2.4+ naming convention [@krazos](https://github.com/krazos) ([#13369](https://github.com/community-scripts/ProxmoxVE/pull/13369)) + - Tandoor Recipes: Add missing env variable [@tremor021](https://github.com/tremor021) ([#13365](https://github.com/community-scripts/ProxmoxVE/pull/13365)) + + - #### ✨ New Features + + - FileFlows: add option to install Node [@tremor021](https://github.com/tremor021) ([#13368](https://github.com/community-scripts/ProxmoxVE/pull/13368)) + +## 2026-03-27 + +### 🆕 New Scripts + + - Matter-Server ([#13355](https://github.com/community-scripts/ProxmoxVE/pull/13355)) +- GeoPulse ([#13320](https://github.com/community-scripts/ProxmoxVE/pull/13320)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - RevealJS: Switch from gulp to vite [@tremor021](https://github.com/tremor021) ([#13336](https://github.com/community-scripts/ProxmoxVE/pull/13336)) + + - #### ✨ New Features + + - Dispatcharr add custom Postgres port support for upgrade [@MickLesk](https://github.com/MickLesk) ([#13347](https://github.com/community-scripts/ProxmoxVE/pull/13347)) + - Immich: bump to v2.6.3 [@MickLesk](https://github.com/MickLesk) ([#13324](https://github.com/community-scripts/ProxmoxVE/pull/13324)) + +### 🧰 Tools + + - #### ✨ New Features + + - Refactor/Feature-Bump/Security: Update-Cron-LXCs (Now Local Mode!) [@MickLesk](https://github.com/MickLesk) ([#13339](https://github.com/community-scripts/ProxmoxVE/pull/13339)) + +## 2026-03-26 + +### 🆕 New Scripts + + - BirdNET ([#13313](https://github.com/community-scripts/ProxmoxVE/pull/13313)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Immich: Bump to 2.6.2 | use start.sh in service, ensure DB_HOSTNAME in .env | Fix Rights Issue with ZFS Shares [@MickLesk](https://github.com/MickLesk) ([#13199](https://github.com/community-scripts/ProxmoxVE/pull/13199)) + + - #### ✨ New Features + + - SparkyFitness: add garmin microservice as addon [@tomfrenzel](https://github.com/tomfrenzel) ([#12642](https://github.com/community-scripts/ProxmoxVE/pull/12642)) + - Frigate: bump to v0.17.1 & change build order [@MickLesk](https://github.com/MickLesk) ([#13304](https://github.com/community-scripts/ProxmoxVE/pull/13304)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - tools.func: pin npm to 11.11.0 to work around Node.js 22.22.2 regression [@MickLesk](https://github.com/MickLesk) ([#13296](https://github.com/community-scripts/ProxmoxVE/pull/13296)) + + - #### ✨ New Features + + - core: APT/APK Mirror Fallback for CDN Failures [@MickLesk](https://github.com/MickLesk) ([#13316](https://github.com/community-scripts/ProxmoxVE/pull/13316)) + - core/tools: replace generic return 1 exit_codes with more specific exit_codes [@MickLesk](https://github.com/MickLesk) ([#13311](https://github.com/community-scripts/ProxmoxVE/pull/13311)) + + - #### 🔧 Refactor + + - core: use /usr/bin/install to prevent function shadowing [@MickLesk](https://github.com/MickLesk) ([#13299](https://github.com/community-scripts/ProxmoxVE/pull/13299)) + +### 🧰 Tools + + - #### 🐞 Bug Fixes + + - SparkyFitness-Garmin: fix app name [@tomfrenzel](https://github.com/tomfrenzel) ([#13325](https://github.com/community-scripts/ProxmoxVE/pull/13325)) + +## 2026-03-25 + +### 🚀 Updated Scripts + + - #### ✨ New Features + + - Komodo v2: migrate env vars to v2 and update source [@MickLesk](https://github.com/MickLesk) ([#13262](https://github.com/community-scripts/ProxmoxVE/pull/13262)) + +### 💾 Core + + - #### 🔧 Refactor + + - core: make shell command substitutions safe with || true [@MickLesk](https://github.com/MickLesk) ([#13279](https://github.com/community-scripts/ProxmoxVE/pull/13279)) + +## 2026-03-24 + +### 🆕 New Scripts + + - Homebrew (Addon) ([#13249](https://github.com/community-scripts/ProxmoxVE/pull/13249)) +- NextExplorer ([#13252](https://github.com/community-scripts/ProxmoxVE/pull/13252)) + +### 🚀 Updated Scripts + + - #### ✨ New Features + + - Turnkey: modernize turnkey.sh with shared libraries [@MickLesk](https://github.com/MickLesk) ([#13242](https://github.com/community-scripts/ProxmoxVE/pull/13242)) + + - #### 🔧 Refactor + + - chore: replace helper-scripts.com with community-scripts.com [@MickLesk](https://github.com/MickLesk) ([#13244](https://github.com/community-scripts/ProxmoxVE/pull/13244)) + +### 🗑️ Deleted Scripts + + - Remove: Booklore [@MickLesk](https://github.com/MickLesk) ([#13265](https://github.com/community-scripts/ProxmoxVE/pull/13265)) + +## 2026-03-23 + +### 🚀 Updated Scripts + + - #### 🔧 Refactor + + - core: harden shell scripts against injection and insecure permissions [@MickLesk](https://github.com/MickLesk) ([#13239](https://github.com/community-scripts/ProxmoxVE/pull/13239)) + +## 2026-03-22 + +### 🆕 New Scripts + + - versitygw ([#13180](https://github.com/community-scripts/ProxmoxVE/pull/13180)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Adventurelog: pin DRF <3.15 to fix coreapi module removal [@MickLesk](https://github.com/MickLesk) ([#13194](https://github.com/community-scripts/ProxmoxVE/pull/13194)) + + - #### ✨ New Features + + - ConvertX: add libreoffice-writer for ODT/document conversions [@MickLesk](https://github.com/MickLesk) ([#13196](https://github.com/community-scripts/ProxmoxVE/pull/13196)) + + - #### 🔧 Refactor + + - iSponsorblockTV: add AVX CPU check before installation [@MickLesk](https://github.com/MickLesk) ([#13197](https://github.com/community-scripts/ProxmoxVE/pull/13197)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - core: guard against empty IPv6 address in static mode [@MickLesk](https://github.com/MickLesk) ([#13195](https://github.com/community-scripts/ProxmoxVE/pull/13195)) + +## 2026-03-21 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Anytype-server: wait for MongoDB readiness before rs.initiate() [@MickLesk](https://github.com/MickLesk) ([#13165](https://github.com/community-scripts/ProxmoxVE/pull/13165)) + - Frigate: use correct CPU model fallback path [@MickLesk](https://github.com/MickLesk) ([#13164](https://github.com/community-scripts/ProxmoxVE/pull/13164)) + - iSponsorBlockTV: Fix release fetching [@tremor021](https://github.com/tremor021) ([#13157](https://github.com/community-scripts/ProxmoxVE/pull/13157)) + - Isponsorblocktv: use quoted heredoc to prevent unbound variable error during CLI wrapper creation [@Copilot](https://github.com/Copilot) ([#13146](https://github.com/community-scripts/ProxmoxVE/pull/13146)) + + - #### ✨ New Features + + - Headscale: Enable TUN [@tremor021](https://github.com/tremor021) ([#13158](https://github.com/community-scripts/ProxmoxVE/pull/13158)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - core: add missing -searchdomain/-nameserver prefix in base_settings [@MickLesk](https://github.com/MickLesk) ([#13166](https://github.com/community-scripts/ProxmoxVE/pull/13166)) + +## 2026-03-20 + +### 🆕 New Scripts + + - iSponsorBlockTV ([#13123](https://github.com/community-scripts/ProxmoxVE/pull/13123)) +- Alpine-Wakapi ([#13119](https://github.com/community-scripts/ProxmoxVE/pull/13119)) +- teleport ([#13086](https://github.com/community-scripts/ProxmoxVE/pull/13086)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Reactive-Resume: add git dependency for v5.0.13+ [@MickLesk](https://github.com/MickLesk) ([#13133](https://github.com/community-scripts/ProxmoxVE/pull/13133)) + - Scanopy: increase default CPU, RAM, and HDD to prevent OOM during Rust build [@Copilot](https://github.com/Copilot) ([#13130](https://github.com/community-scripts/ProxmoxVE/pull/13130)) + + - #### ✨ New Features + + - Immich: v2.6.1 [@vhsdream](https://github.com/vhsdream) ([#13111](https://github.com/community-scripts/ProxmoxVE/pull/13111)) + - VM's: add input validation and hostname sanitization to all VM scripts [@MickLesk](https://github.com/MickLesk) ([#12973](https://github.com/community-scripts/ProxmoxVE/pull/12973)) + +### 🧰 Tools + + - #### 🔧 Refactor + + - Harden code-server addon install script [@MickLesk](https://github.com/MickLesk) ([#13116](https://github.com/community-scripts/ProxmoxVE/pull/13116)) + +## 2026-03-19 + +### 🚀 Updated Scripts + + - Owncast: increase default disk size from 2GB to 10GB [@Copilot](https://github.com/Copilot) ([#13079](https://github.com/community-scripts/ProxmoxVE/pull/13079)) + + - #### 🐞 Bug Fixes + + - fix: remove extra backslash to match single quoted here-doc [@Zelnes](https://github.com/Zelnes) ([#13108](https://github.com/community-scripts/ProxmoxVE/pull/13108)) + - Reactive-Resume: Upgrade Node to 24 and enable Corepack [@MickLesk](https://github.com/MickLesk) ([#13093](https://github.com/community-scripts/ProxmoxVE/pull/13093)) + - Increase Tracearr RAM; derive APP_VERSION [@MickLesk](https://github.com/MickLesk) ([#13087](https://github.com/community-scripts/ProxmoxVE/pull/13087)) + - ProjectSend: Update application access URL [@tremor021](https://github.com/tremor021) ([#13078](https://github.com/community-scripts/ProxmoxVE/pull/13078)) + - Dispatcharr: use npm install --no-audit --progress=false [@MickLesk](https://github.com/MickLesk) ([#13074](https://github.com/community-scripts/ProxmoxVE/pull/13074)) + - core: reorder hwaccel setup and adjust GPU group usermod [@MickLesk](https://github.com/MickLesk) ([#13072](https://github.com/community-scripts/ProxmoxVE/pull/13072)) + + - #### ✨ New Features + + - tools.func: display pin reason in release-check messages [@MickLesk](https://github.com/MickLesk) ([#13095](https://github.com/community-scripts/ProxmoxVE/pull/13095)) + - NocoDB: Unpin Version to latest [@MickLesk](https://github.com/MickLesk) ([#13094](https://github.com/community-scripts/ProxmoxVE/pull/13094)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - tools.func: use dpkg-query for reliable JDK version detection [@MickLesk](https://github.com/MickLesk) ([#13101](https://github.com/community-scripts/ProxmoxVE/pull/13101)) + +### 📚 Documentation + + - Update link from helper-scripts.com to community-scripts.org [@adnanvaldes](https://github.com/adnanvaldes) ([#13098](https://github.com/community-scripts/ProxmoxVE/pull/13098)) +- github: add PocketBase bot workflow [@MickLesk](https://github.com/MickLesk) ([#13075](https://github.com/community-scripts/ProxmoxVE/pull/13075)) + +## 2026-03-18 + +### 🆕 New Scripts + + - Alpine-Ntfy [@MickLesk](https://github.com/MickLesk) ([#13048](https://github.com/community-scripts/ProxmoxVE/pull/13048)) +- Split-Pro ([#12975](https://github.com/community-scripts/ProxmoxVE/pull/12975)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Tdarr: use curl_with_retry and correct exit code [@MickLesk](https://github.com/MickLesk) ([#13060](https://github.com/community-scripts/ProxmoxVE/pull/13060)) + - reitti: fix: v4 [@CrazyWolf13](https://github.com/CrazyWolf13) ([#13039](https://github.com/community-scripts/ProxmoxVE/pull/13039)) + - Paperless-NGX: increase default RAM to 3GB [@MickLesk](https://github.com/MickLesk) ([#13018](https://github.com/community-scripts/ProxmoxVE/pull/13018)) + - Plex: restart service after update to apply new version [@MickLesk](https://github.com/MickLesk) ([#13017](https://github.com/community-scripts/ProxmoxVE/pull/13017)) + + - #### ✨ New Features + + - tools: centralize GPU group setup via setup_hwaccel [@MickLesk](https://github.com/MickLesk) ([#13044](https://github.com/community-scripts/ProxmoxVE/pull/13044)) + - Termix: add guacd build and systemd integration [@MickLesk](https://github.com/MickLesk) ([#12999](https://github.com/community-scripts/ProxmoxVE/pull/12999)) + + - #### 🔧 Refactor + + - Podman: replace deprecated commands with Quadlets [@MickLesk](https://github.com/MickLesk) ([#13052](https://github.com/community-scripts/ProxmoxVE/pull/13052)) + - Refactor: Jellyfin repo, ffmpeg package and symlinks [@MickLesk](https://github.com/MickLesk) ([#13045](https://github.com/community-scripts/ProxmoxVE/pull/13045)) + - pve-scripts-local: Increase default disk size from 4GB to 10GB [@MickLesk](https://github.com/MickLesk) ([#13009](https://github.com/community-scripts/ProxmoxVE/pull/13009)) + +### 💾 Core + + - #### ✨ New Features + + - tools.func Implement pg_cron setup for setup_postgresql [@MickLesk](https://github.com/MickLesk) ([#13053](https://github.com/community-scripts/ProxmoxVE/pull/13053)) + - tools.func: Implement check_for_gh_tag function [@MickLesk](https://github.com/MickLesk) ([#12998](https://github.com/community-scripts/ProxmoxVE/pull/12998)) + - tools.func: Implement fetch_and_deploy_gh_tag function [@MickLesk](https://github.com/MickLesk) ([#13000](https://github.com/community-scripts/ProxmoxVE/pull/13000)) + +## 2026-03-17 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Gluetun: add OpenVPN process user and cleanup stale config [@MickLesk](https://github.com/MickLesk) ([#13016](https://github.com/community-scripts/ProxmoxVE/pull/13016)) + - Frigate: check OpenVino model files exist before configuring detector and use curl_with_retry instead of default wget [@MickLesk](https://github.com/MickLesk) ([#13019](https://github.com/community-scripts/ProxmoxVE/pull/13019)) + +### 💾 Core + + - #### 🔧 Refactor + + - tools.func: Update `create_self_signed_cert()` [@tremor021](https://github.com/tremor021) ([#13008](https://github.com/community-scripts/ProxmoxVE/pull/13008)) + +## 2026-03-16 + +### 🆕 New Scripts + + - Gluetun ([#12976](https://github.com/community-scripts/ProxmoxVE/pull/12976)) +- Anytype-Server ([#12974](https://github.com/community-scripts/ProxmoxVE/pull/12974)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Immich: use gcc-13 for compilation & add uv python pre-install with retry logic [@MickLesk](https://github.com/MickLesk) ([#12935](https://github.com/community-scripts/ProxmoxVE/pull/12935)) + - Tautulli: add setuptools<81 constraint to update script [@MickLesk](https://github.com/MickLesk) ([#12959](https://github.com/community-scripts/ProxmoxVE/pull/12959)) + - Seerr: add missing build deps [@MickLesk](https://github.com/MickLesk) ([#12960](https://github.com/community-scripts/ProxmoxVE/pull/12960)) + - fix: yubal update [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12961](https://github.com/community-scripts/ProxmoxVE/pull/12961)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - hwaccel: remove ROCm install from AMD APU setup [@MickLesk](https://github.com/MickLesk) ([#12958](https://github.com/community-scripts/ProxmoxVE/pull/12958)) + +## 2026-03-15 + +### 🆕 New Scripts + + - Yamtrack ([#12936](https://github.com/community-scripts/ProxmoxVE/pull/12936)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Wishlist: use --frozen-lockfile for pnpm install [@MickLesk](https://github.com/MickLesk) ([#12892](https://github.com/community-scripts/ProxmoxVE/pull/12892)) + - SparkyFitness: use --legacy-peer-deps for npm install [@MickLesk](https://github.com/MickLesk) ([#12888](https://github.com/community-scripts/ProxmoxVE/pull/12888)) + - Frigate: add fallback for OpenVino labelmap file [@MickLesk](https://github.com/MickLesk) ([#12889](https://github.com/community-scripts/ProxmoxVE/pull/12889)) + + - #### 🔧 Refactor + + - Refactor: ITSM-NG [@MickLesk](https://github.com/MickLesk) ([#12918](https://github.com/community-scripts/ProxmoxVE/pull/12918)) + - core: unify RELEASE variable for check_for_gh_release and fetch_and_deploy [@MickLesk](https://github.com/MickLesk) ([#12917](https://github.com/community-scripts/ProxmoxVE/pull/12917)) + - Standardize NSAPP names across VM scripts [@MickLesk](https://github.com/MickLesk) ([#12924](https://github.com/community-scripts/ProxmoxVE/pull/12924)) + +### 💾 Core + + - #### ✨ New Features + + - core: retry downloads with exponential backoff [@MickLesk](https://github.com/MickLesk) ([#12896](https://github.com/community-scripts/ProxmoxVE/pull/12896)) + +### ❔ Uncategorized + + - [go2rtc] Add ffmpeg dependency to install script [@Copilot](https://github.com/Copilot) ([#12944](https://github.com/community-scripts/ProxmoxVE/pull/12944)) + +## 2026-03-14 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Patchmon: remove v prefix from pinned version [@MickLesk](https://github.com/MickLesk) ([#12891](https://github.com/community-scripts/ProxmoxVE/pull/12891)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - tools.func: don't abort on AMD repo apt update failure [@MickLesk](https://github.com/MickLesk) ([#12890](https://github.com/community-scripts/ProxmoxVE/pull/12890)) + +## 2026-03-13 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Hotfix: Removed clean install usage from original script. [@nickheyer](https://github.com/nickheyer) ([#12870](https://github.com/community-scripts/ProxmoxVE/pull/12870)) + + - #### 🔧 Refactor + + - Discopanel: V2 Support + Script rewrite [@nickheyer](https://github.com/nickheyer) ([#12763](https://github.com/community-scripts/ProxmoxVE/pull/12763)) + +### 🧰 Tools + + - update-apps: fix restore path, add PBS support and improve restore messages [@omertahaoztop](https://github.com/omertahaoztop) ([#12528](https://github.com/community-scripts/ProxmoxVE/pull/12528)) + + - #### 🐞 Bug Fixes + + - fix(pve-privilege-converter): handle already stopped container in manage_states [@liuqitoday](https://github.com/liuqitoday) ([#12765](https://github.com/community-scripts/ProxmoxVE/pull/12765)) + +### 📚 Documentation + + - Update: Docs/website metadata workflow [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#12858](https://github.com/community-scripts/ProxmoxVE/pull/12858)) + +## 2026-03-12 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - manyfold: fix incorrect port in upstream requests by forwarding original host [@anlopo](https://github.com/anlopo) ([#12812](https://github.com/community-scripts/ProxmoxVE/pull/12812)) + - SparkyFitness: install pnpm dependencies from workspace root [@MickLesk](https://github.com/MickLesk) ([#12792](https://github.com/community-scripts/ProxmoxVE/pull/12792)) + - n8n: add build-essential to update dependencies [@MickLesk](https://github.com/MickLesk) ([#12795](https://github.com/community-scripts/ProxmoxVE/pull/12795)) + - Frigate openvino labelmap patch [@semtex1987](https://github.com/semtex1987) ([#12751](https://github.com/community-scripts/ProxmoxVE/pull/12751)) + + - #### 🔧 Refactor + + - Pin Patchmon to 1.4.2 [@vhsdream](https://github.com/vhsdream) ([#12789](https://github.com/community-scripts/ProxmoxVE/pull/12789)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - tools.func: correct PATH escaping in ROCm profile script [@MickLesk](https://github.com/MickLesk) ([#12793](https://github.com/community-scripts/ProxmoxVE/pull/12793)) + + - #### ✨ New Features + + - core: add mode=generated for unattended frontend installs [@MickLesk](https://github.com/MickLesk) ([#12807](https://github.com/community-scripts/ProxmoxVE/pull/12807)) + - core: validate storage availability when loading defaults [@MickLesk](https://github.com/MickLesk) ([#12794](https://github.com/community-scripts/ProxmoxVE/pull/12794)) + + - #### 🔧 Refactor + + - tools.func: support older NVIDIA driver versions with 2 segments (xxx.xxx) [@MickLesk](https://github.com/MickLesk) ([#12796](https://github.com/community-scripts/ProxmoxVE/pull/12796)) + +### 🧰 Tools + + - #### 🐞 Bug Fixes + + - Fix PBS microcode naming [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#12834](https://github.com/community-scripts/ProxmoxVE/pull/12834)) + +### 📂 Github + + - Cleanup: remove old workflow files [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#12818](https://github.com/community-scripts/ProxmoxVE/pull/12818)) +- Cleanup: remove frontend, move JSONs to json/ top-level [@MickLesk](https://github.com/MickLesk) ([#12813](https://github.com/community-scripts/ProxmoxVE/pull/12813)) + +### ❔ Uncategorized + + - Remove json files [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#12830](https://github.com/community-scripts/ProxmoxVE/pull/12830)) + +## 2026-03-11 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - fix: Init telemetry in addon scripts [@MickLesk](https://github.com/MickLesk) ([#12777](https://github.com/community-scripts/ProxmoxVE/pull/12777)) + - Tracearr: Increase default disk variable from 5 to 10 [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#12762](https://github.com/community-scripts/ProxmoxVE/pull/12762)) + - Fix Wireguard Dashboard update [@odin568](https://github.com/odin568) ([#12767](https://github.com/community-scripts/ProxmoxVE/pull/12767)) + +### 🧰 Tools + + - #### ✨ New Features + + - Coder-Code-Server: Check if config file exists [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#12758](https://github.com/community-scripts/ProxmoxVE/pull/12758)) + +## 2026-03-10 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - [Fix] Immich: Pin libvips to 8.17.3 [@vhsdream](https://github.com/vhsdream) ([#12744](https://github.com/community-scripts/ProxmoxVE/pull/12744)) + +## 2026-03-09 + +### 🚀 Updated Scripts + + - Pin Opencloud to 5.2.0 [@vhsdream](https://github.com/vhsdream) ([#12721](https://github.com/community-scripts/ProxmoxVE/pull/12721)) + + - #### 🐞 Bug Fixes + + - [Hotfix] qBittorrent: Disable UPnP port forwarding by default [@vhsdream](https://github.com/vhsdream) ([#12728](https://github.com/community-scripts/ProxmoxVE/pull/12728)) + - [Quickfix] Opencloud: ensure correct case for binary [@vhsdream](https://github.com/vhsdream) ([#12729](https://github.com/community-scripts/ProxmoxVE/pull/12729)) + - Omada: Bump libssl [@MickLesk](https://github.com/MickLesk) ([#12724](https://github.com/community-scripts/ProxmoxVE/pull/12724)) + - openwebui: Ensure required dependencies [@MickLesk](https://github.com/MickLesk) ([#12717](https://github.com/community-scripts/ProxmoxVE/pull/12717)) + - Frigate: try an OpenVino model build fallback [@MickLesk](https://github.com/MickLesk) ([#12704](https://github.com/community-scripts/ProxmoxVE/pull/12704)) + - Change cronjob setup to use www-data user [@opastorello](https://github.com/opastorello) ([#12695](https://github.com/community-scripts/ProxmoxVE/pull/12695)) + - RustDesk Server: Fix check_for_gh_release function call [@tremor021](https://github.com/tremor021) ([#12694](https://github.com/community-scripts/ProxmoxVE/pull/12694)) + + - #### ✨ New Features + + - feat: improve zigbee2mqtt backup handler [@MickLesk](https://github.com/MickLesk) ([#12714](https://github.com/community-scripts/ProxmoxVE/pull/12714)) + + - #### 💥 Breaking Changes + + - Reactive Resume: rewrite for v5 using original repo amruthpilla/reactive-resume [@MickLesk](https://github.com/MickLesk) ([#12705](https://github.com/community-scripts/ProxmoxVE/pull/12705)) + +### 💾 Core + + - #### ✨ New Features + + - tools: add Alpine (apk) support to ensure_dependencies and is_package_installed [@MickLesk](https://github.com/MickLesk) ([#12703](https://github.com/community-scripts/ProxmoxVE/pull/12703)) + - tools.func: extend hwaccel with ROCm [@MickLesk](https://github.com/MickLesk) ([#12707](https://github.com/community-scripts/ProxmoxVE/pull/12707)) + +### 🌐 Website + + - #### ✨ New Features + + - feat: add CopycatWarningToast component for user warnings [@BramSuurdje](https://github.com/BramSuurdje) ([#12733](https://github.com/community-scripts/ProxmoxVE/pull/12733)) + +## 2026-03-08 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - [Fix] Immich: chown install dir before machine-learning update [@vhsdream](https://github.com/vhsdream) ([#12684](https://github.com/community-scripts/ProxmoxVE/pull/12684)) + - [Fix] Scanopy: Build generate-fixtures [@vhsdream](https://github.com/vhsdream) ([#12686](https://github.com/community-scripts/ProxmoxVE/pull/12686)) + - fix: rustdeskserver: use correct repo string [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12682](https://github.com/community-scripts/ProxmoxVE/pull/12682)) + - NZBGet: Fixes for RAR5 handling [@tremor021](https://github.com/tremor021) ([#12675](https://github.com/community-scripts/ProxmoxVE/pull/12675)) + +### 🌐 Website + + - #### 🐞 Bug Fixes + + - LXC-Execute: Fix slug [@tremor021](https://github.com/tremor021) ([#12681](https://github.com/community-scripts/ProxmoxVE/pull/12681)) + ## 2026-03-07 ### 🆕 New Scripts diff --git a/.github/changelogs/2026/04.md b/.github/changelogs/2026/04.md new file mode 100644 index 000000000..8b58dd125 --- /dev/null +++ b/.github/changelogs/2026/04.md @@ -0,0 +1,590 @@ +## 2026-04-30 + +### 🆕 New Scripts + + - Nagios ([#14126](https://github.com/community-scripts/ProxmoxVE/pull/14126)) +- Neko ([#14121](https://github.com/community-scripts/ProxmoxVE/pull/14121)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - alpine-docker: install openssl as core dependency | alpine-komodo: check & install openssl if missing [@MickLesk](https://github.com/MickLesk) ([#14134](https://github.com/community-scripts/ProxmoxVE/pull/14134)) + - endurain: update source references to Codeberg [@MickLesk](https://github.com/MickLesk) ([#14128](https://github.com/community-scripts/ProxmoxVE/pull/14128)) + +### 💾 Core + + - #### 🔧 Refactor + + - tools.func: Manage minor versions for MongoDB 8.x [@tremor021](https://github.com/tremor021) ([#14131](https://github.com/community-scripts/ProxmoxVE/pull/14131)) + +## 2026-04-29 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - GrayLog: MongoDB update to 8.2.x [@tremor021](https://github.com/tremor021) ([#14114](https://github.com/community-scripts/ProxmoxVE/pull/14114)) + - Graylog: Better information in the log file [@tremor021](https://github.com/tremor021) ([#14110](https://github.com/community-scripts/ProxmoxVE/pull/14110)) + + - #### 🔧 Refactor + + - Refactor: checkMK [@MickLesk](https://github.com/MickLesk) ([#14105](https://github.com/community-scripts/ProxmoxVE/pull/14105)) + - PatchMon: Unpin release [@tremor021](https://github.com/tremor021) ([#14097](https://github.com/community-scripts/ProxmoxVE/pull/14097)) + +### 💾 Core + + - #### 🔧 Refactor + + - core: add guidance when storage lacks rootdir support [@MickLesk](https://github.com/MickLesk) ([#14108](https://github.com/community-scripts/ProxmoxVE/pull/14108)) + +## 2026-04-28 + +### 🆕 New Scripts + + - StoryBook ([#14081](https://github.com/community-scripts/ProxmoxVE/pull/14081)) +- CoreDNS ([#14082](https://github.com/community-scripts/ProxmoxVE/pull/14082)) + +### 🚀 Updated Scripts + + - Fix Dawarich Install/Update [@Jerry1098](https://github.com/Jerry1098) ([#14078](https://github.com/community-scripts/ProxmoxVE/pull/14078)) + + - #### ✨ New Features + + - PatchMon Version 2.0.2 Script update [@9technologygroup](https://github.com/9technologygroup) ([#14095](https://github.com/community-scripts/ProxmoxVE/pull/14095)) + +## 2026-04-27 + +### 🚀 Updated Scripts + + - Add pamUsername column to userOrgs table [@JVKeller](https://github.com/JVKeller) ([#14075](https://github.com/community-scripts/ProxmoxVE/pull/14075)) + + - #### 🐞 Bug Fixes + + - Dawarich: run db:migrate before assets:precompile [@MickLesk](https://github.com/MickLesk) ([#14051](https://github.com/community-scripts/ProxmoxVE/pull/14051)) + - TechnitiumDNS: always install .NET 10 if not already present [@MickLesk](https://github.com/MickLesk) ([#14049](https://github.com/community-scripts/ProxmoxVE/pull/14049)) + + - #### 💥 Breaking Changes + + - PatchMon: v2.0.0 migration [@vhsdream](https://github.com/vhsdream) ([#14015](https://github.com/community-scripts/ProxmoxVE/pull/14015)) + +### 💾 Core + + - #### 🔧 Refactor + + - Update build.func - fixed spelling mistake [@m1ckywill](https://github.com/m1ckywill) ([#14047](https://github.com/community-scripts/ProxmoxVE/pull/14047)) + +### 🧰 Tools + + - #### 🐞 Bug Fixes + + - update-lxcs/apps: avoid pct exec on containers mid-shutdown [@MickLesk](https://github.com/MickLesk) ([#14050](https://github.com/community-scripts/ProxmoxVE/pull/14050)) + + - #### ✨ New Features + + - Add patchmon-agent report execution in update script [@heinemannj](https://github.com/heinemannj) ([#14054](https://github.com/community-scripts/ProxmoxVE/pull/14054)) + +## 2026-04-26 + +### 🆕 New Scripts + + - TREK ([#14017](https://github.com/community-scripts/ProxmoxVE/pull/14017)) + +### 🚀 Updated Scripts + + - fix(2fauth): handle stale backup directory on update [@omertahaoztop](https://github.com/omertahaoztop) ([#14018](https://github.com/community-scripts/ProxmoxVE/pull/14018)) + + - #### 🐞 Bug Fixes + + - Increase Frigate default CPU cores from 4 to 8 [@MickLesk](https://github.com/MickLesk) ([#14039](https://github.com/community-scripts/ProxmoxVE/pull/14039)) + - Technitium DNS: Ensure directories exist before running service [@tremor021](https://github.com/tremor021) ([#14030](https://github.com/community-scripts/ProxmoxVE/pull/14030)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - core: Correct deb822 repository flat path detection [@MickLesk](https://github.com/MickLesk) ([#14037](https://github.com/community-scripts/ProxmoxVE/pull/14037)) + +## 2026-04-25 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - VictoriaMetrics: Stop vmagent/vmalert before update [@irishpadres](https://github.com/irishpadres) ([#14016](https://github.com/community-scripts/ProxmoxVE/pull/14016)) + - Domain-Monitor: start apache2 after stop instead of reload [@omertahaoztop](https://github.com/omertahaoztop) ([#14019](https://github.com/community-scripts/ProxmoxVE/pull/14019)) + - Transmute: Fix ffmpeg detection [@tremor021](https://github.com/tremor021) ([#14008](https://github.com/community-scripts/ProxmoxVE/pull/14008)) + + - #### 🔧 Refactor + + - Refactor: Technitium DNS [@tremor021](https://github.com/tremor021) ([#14013](https://github.com/community-scripts/ProxmoxVE/pull/14013)) + +## 2026-04-24 + +### 🆕 New Scripts + + - Apprise-API ([#13934](https://github.com/community-scripts/ProxmoxVE/pull/13934)) +- fireshare ([#13995](https://github.com/community-scripts/ProxmoxVE/pull/13995)) +- Transmute ([#13935](https://github.com/community-scripts/ProxmoxVE/pull/13935)) +- Jitsi-Meet ([#13897](https://github.com/community-scripts/ProxmoxVE/pull/13897)) + +### 🚀 Updated Scripts + + - Update wger.sh [@Soppster1029](https://github.com/Soppster1029) ([#13977](https://github.com/community-scripts/ProxmoxVE/pull/13977)) + + - #### 🔧 Refactor + + - Refactor: Ghostfolio [@MickLesk](https://github.com/MickLesk) ([#13990](https://github.com/community-scripts/ProxmoxVE/pull/13990)) + +## 2026-04-23 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - mealie: start.sh missing after failed update [@MickLesk](https://github.com/MickLesk) ([#13958](https://github.com/community-scripts/ProxmoxVE/pull/13958)) + - twingate-connector: perform real apt upgrade during update flow [@MickLesk](https://github.com/MickLesk) ([#13959](https://github.com/community-scripts/ProxmoxVE/pull/13959)) + + - #### ✨ New Features + + - core: auto-size NODE_OPTIONS heap [@MickLesk](https://github.com/MickLesk) ([#13960](https://github.com/community-scripts/ProxmoxVE/pull/13960)) + + - #### 🔧 Refactor + + - Update scripts to match standard [@tremor021](https://github.com/tremor021) ([#13956](https://github.com/community-scripts/ProxmoxVE/pull/13956)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - tools.func: upgrade Node.js minor/patch on same major version [@MickLesk](https://github.com/MickLesk) ([#13957](https://github.com/community-scripts/ProxmoxVE/pull/13957)) + - core: hotfix - prefer silent mode on PHS env conflict [@MickLesk](https://github.com/MickLesk) ([#13951](https://github.com/community-scripts/ProxmoxVE/pull/13951)) + + - #### 🔧 Refactor + + - core: improve system update information / lxc stack upgrade [@MickLesk](https://github.com/MickLesk) ([#13970](https://github.com/community-scripts/ProxmoxVE/pull/13970)) + +## 2026-04-22 + +### 🆕 New Scripts + + - Dashy ([#13817](https://github.com/community-scripts/ProxmoxVE/pull/13817)) +- Mini-QR ([#13902](https://github.com/community-scripts/ProxmoxVE/pull/13902)) +- ownfoil ([#13904](https://github.com/community-scripts/ProxmoxVE/pull/13904)) +- ERPNext ([#13921](https://github.com/community-scripts/ProxmoxVE/pull/13921)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - add --clear to uv venv in update_script() to prevent interactive prompt [@MickLesk](https://github.com/MickLesk) ([#13926](https://github.com/community-scripts/ProxmoxVE/pull/13926)) + +### 💾 Core + + - #### ✨ New Features + + - core: Add PHS_VERBOSE env var to skip verbose mode prompts [@gormanity](https://github.com/gormanity) ([#13797](https://github.com/community-scripts/ProxmoxVE/pull/13797)) + +## 2026-04-21 + +### 🆕 New Scripts + + - gogs ([#13896](https://github.com/community-scripts/ProxmoxVE/pull/13896)) +- anchor ([#13895](https://github.com/community-scripts/ProxmoxVE/pull/13895)) +- minthcm ([#13903](https://github.com/community-scripts/ProxmoxVE/pull/13903)) +- foldergram ([#13900](https://github.com/community-scripts/ProxmoxVE/pull/13900)) + +### 🚀 Updated Scripts + + - OpenCloud: Pin version to 6.1.0 [@vhsdream](https://github.com/vhsdream) ([#13890](https://github.com/community-scripts/ProxmoxVE/pull/13890)) + + - #### 🐞 Bug Fixes + + - Domain-Locker: Update dependencies [@tremor021](https://github.com/tremor021) ([#13901](https://github.com/community-scripts/ProxmoxVE/pull/13901)) + - homelable: fix install failure by correcting password-reset chmod target [@Copilot](https://github.com/Copilot) ([#13894](https://github.com/community-scripts/ProxmoxVE/pull/13894)) + + - #### ✨ New Features + + - FileFlows: Update dependencies [@tremor021](https://github.com/tremor021) ([#13917](https://github.com/community-scripts/ProxmoxVE/pull/13917)) + +## 2026-04-20 + +### 🆕 New Scripts + + - WhoDB ([#13880](https://github.com/community-scripts/ProxmoxVE/pull/13880)) + +### 🚀 Updated Scripts + + - pangolin: create migration tables before data transfer to prevent role loss [@MickLesk](https://github.com/MickLesk) ([#13874](https://github.com/community-scripts/ProxmoxVE/pull/13874)) + + - #### 🐞 Bug Fixes + + - Pangolin: pre-apply schema migrations to prevent data loss [@MickLesk](https://github.com/MickLesk) ([#13861](https://github.com/community-scripts/ProxmoxVE/pull/13861)) + - ActualBudget: change migration messages to warnings [@MickLesk](https://github.com/MickLesk) ([#13860](https://github.com/community-scripts/ProxmoxVE/pull/13860)) + - slskd: migrate config keys for 0.25.0 breaking change [@MickLesk](https://github.com/MickLesk) ([#13862](https://github.com/community-scripts/ProxmoxVE/pull/13862)) + + - #### ✨ New Features + + - Wanderer: add pocketbase CLI wrapper with env [@MickLesk](https://github.com/MickLesk) ([#13863](https://github.com/community-scripts/ProxmoxVE/pull/13863)) + - feat(homelable): add password reset utility script [@davidsoncabista](https://github.com/davidsoncabista) ([#13798](https://github.com/community-scripts/ProxmoxVE/pull/13798)) + + - #### 🔧 Refactor + + - Several Scripts: Bump NodeJS to align Node.js versions with upstream for 5 scripts [@MickLesk](https://github.com/MickLesk) ([#13875](https://github.com/community-scripts/ProxmoxVE/pull/13875)) + - Refactor: PMG Post Install [@MickLesk](https://github.com/MickLesk) ([#13693](https://github.com/community-scripts/ProxmoxVE/pull/13693)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - core: detect Perl breakage after LXC stack upgrade and improve storage validation [@MickLesk](https://github.com/MickLesk) ([#13879](https://github.com/community-scripts/ProxmoxVE/pull/13879)) + +## 2026-04-19 + +### 🆕 New Scripts + + - nametag ([#13849](https://github.com/community-scripts/ProxmoxVE/pull/13849)) + +## 2026-04-18 + +### 🆕 New Scripts + + - Dagu ([#13830](https://github.com/community-scripts/ProxmoxVE/pull/13830)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - BabyBuddy: set DJANGO_SETTINGS_MODULE before migrate in update [@MickLesk](https://github.com/MickLesk) ([#13836](https://github.com/community-scripts/ProxmoxVE/pull/13836)) + - litellm: add prisma generate and use venv binary directly [@MickLesk](https://github.com/MickLesk) ([#13835](https://github.com/community-scripts/ProxmoxVE/pull/13835)) + - yamtrack: add missing nginx.conf sed edits to update script [@MickLesk](https://github.com/MickLesk) ([#13834](https://github.com/community-scripts/ProxmoxVE/pull/13834)) + +### 🧰 Tools + + - #### 🐞 Bug Fixes + + - SparkyFitness Garmin Microservice: fix update function [@tomfrenzel](https://github.com/tomfrenzel) ([#13824](https://github.com/community-scripts/ProxmoxVE/pull/13824)) + + - #### 🔧 Refactor + + - Clean-Orphan-LVM: check all cluster nodes for VM/CT configs [@MickLesk](https://github.com/MickLesk) ([#13837](https://github.com/community-scripts/ProxmoxVE/pull/13837)) + +## 2026-04-17 + +### 🆕 New Scripts + + - step-ca ([#13775](https://github.com/community-scripts/ProxmoxVE/pull/13775)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - core: pin IGC version to compute-runtime compatible tag (Intel GPU) [@MickLesk](https://github.com/MickLesk) ([#13814](https://github.com/community-scripts/ProxmoxVE/pull/13814)) + - Fix for bambuddy community script update [@abbasegbeyemi](https://github.com/abbasegbeyemi) ([#13816](https://github.com/community-scripts/ProxmoxVE/pull/13816)) + - Umami: Fix update procedure [@tremor021](https://github.com/tremor021) ([#13807](https://github.com/community-scripts/ProxmoxVE/pull/13807)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - core: sanitize mount_fs input — strip spaces and trailing commas [@MickLesk](https://github.com/MickLesk) ([#13806](https://github.com/community-scripts/ProxmoxVE/pull/13806)) + + - #### 🔧 Refactor + + - core: fix some pct create issues (telemetry) + cleanup [@MickLesk](https://github.com/MickLesk) ([#13810](https://github.com/community-scripts/ProxmoxVE/pull/13810)) + +## 2026-04-16 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Add pnpm as a dependency to ghost-cli install [@YourFavoriteKyle](https://github.com/YourFavoriteKyle) ([#13789](https://github.com/community-scripts/ProxmoxVE/pull/13789)) + +### 💾 Core + + - #### ✨ New Features + + - core: wire ENABLE_MKNOD and ALLOW_MOUNT_FS into LXC features [@MickLesk](https://github.com/MickLesk) ([#13796](https://github.com/community-scripts/ProxmoxVE/pull/13796)) + +## 2026-04-15 + +### 🆕 New Scripts + + - iGotify ([#13773](https://github.com/community-scripts/ProxmoxVE/pull/13773)) +- GitHub-Runner ([#13709](https://github.com/community-scripts/ProxmoxVE/pull/13709)) +- Revert "Remove low-install-count CT scripts and installers (#13570)" [@CrazyWolf13](https://github.com/CrazyWolf13) ([#13752](https://github.com/community-scripts/ProxmoxVE/pull/13752)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - [alpine-nextcloud] Update Nginx MIME types to support .mjs files [@GuiltyFox](https://github.com/GuiltyFox) ([#13771](https://github.com/community-scripts/ProxmoxVE/pull/13771)) + - Domain Monitor: Fix file ownership after update [@tremor021](https://github.com/tremor021) ([#13759](https://github.com/community-scripts/ProxmoxVE/pull/13759)) + + - #### 💥 Breaking Changes + + - Reitti: refactor scripts for v4 - remove RabbitMQ and Photon [@MickLesk](https://github.com/MickLesk) ([#13728](https://github.com/community-scripts/ProxmoxVE/pull/13728)) + + - #### 🔧 Refactor + + - Semaphore: add BoltDB to SQLite migration [@tremor021](https://github.com/tremor021) ([#13779](https://github.com/community-scripts/ProxmoxVE/pull/13779)) + +### 📚 Documentation + + - cleanup: remove docs/, update README & CONTRIBUTING, fix repo config [@MickLesk](https://github.com/MickLesk) ([#13770](https://github.com/community-scripts/ProxmoxVE/pull/13770)) + +## 2026-04-14 + +### 🚀 Updated Scripts + + - Immich: Pin photo-processing library revisions [@vhsdream](https://github.com/vhsdream) ([#13748](https://github.com/community-scripts/ProxmoxVE/pull/13748)) + + - #### 🐞 Bug Fixes + + - BentoPDF: Nginx fixes [@tremor021](https://github.com/tremor021) ([#13741](https://github.com/community-scripts/ProxmoxVE/pull/13741)) + - Zerobyte: add git to dependencies to fix bun install failure [@Copilot](https://github.com/Copilot) ([#13721](https://github.com/community-scripts/ProxmoxVE/pull/13721)) + - alpine-nextcloud-install: do not use deprecated nginx config [@AlexanderStein](https://github.com/AlexanderStein) ([#13726](https://github.com/community-scripts/ProxmoxVE/pull/13726)) + + - #### ✨ New Features + + - Mealie: support v3.15+ Nuxt 4 migration [@MickLesk](https://github.com/MickLesk) ([#13731](https://github.com/community-scripts/ProxmoxVE/pull/13731)) + + - #### 🔧 Refactor + + - Lyrion: correct service name and version file in update script [@MickLesk](https://github.com/MickLesk) ([#13734](https://github.com/community-scripts/ProxmoxVE/pull/13734)) + - Changedetection: move env vars from service file to .env [@tremor021](https://github.com/tremor021) ([#13732](https://github.com/community-scripts/ProxmoxVE/pull/13732)) + +## 2026-04-13 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Slskd: Remove stale Soularr lock file on startup and redirect logs to stderr [@MickLesk](https://github.com/MickLesk) ([#13669](https://github.com/community-scripts/ProxmoxVE/pull/13669)) + - Bambuddy: preserve database and archive on update [@Copilot](https://github.com/Copilot) ([#13706](https://github.com/community-scripts/ProxmoxVE/pull/13706)) + + - #### ✨ New Features + + - Immich: Pin version to 2.7.5 [@vhsdream](https://github.com/vhsdream) ([#13715](https://github.com/community-scripts/ProxmoxVE/pull/13715)) + - Bytestash: auto backup/restore data on update [@MickLesk](https://github.com/MickLesk) ([#13707](https://github.com/community-scripts/ProxmoxVE/pull/13707)) + - OpenCloud: pin version to 6.0.0 [@vhsdream](https://github.com/vhsdream) ([#13691](https://github.com/community-scripts/ProxmoxVE/pull/13691)) + + - #### 💥 Breaking Changes + + - Mealie: pin version to v3.14.0 in install and update scripts [@Copilot](https://github.com/Copilot) ([#13724](https://github.com/community-scripts/ProxmoxVE/pull/13724)) + + - #### 🔧 Refactor + + - core: remove unused TEMP_DIR mktemp leak in build_container / clean sonarqube [@MickLesk](https://github.com/MickLesk) ([#13708](https://github.com/community-scripts/ProxmoxVE/pull/13708)) + +## 2026-04-12 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Alpine-Wakapi: Remove container checks in update_script function [@MickLesk](https://github.com/MickLesk) ([#13694](https://github.com/community-scripts/ProxmoxVE/pull/13694)) + + - #### 🔧 Refactor + + - IronClaw: Install keychain dependencies and launch in a DBus session [@MickLesk](https://github.com/MickLesk) ([#13692](https://github.com/community-scripts/ProxmoxVE/pull/13692)) + - MeTube: Allow pnpm build scripts to fix ERR_PNPM_IGNORED_BUILDS [@MickLesk](https://github.com/MickLesk) ([#13668](https://github.com/community-scripts/ProxmoxVE/pull/13668)) + +## 2026-04-11 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Immich: Ensure newline before appending IMMICH_HELMET_FILE to .env [@MickLesk](https://github.com/MickLesk) ([#13667](https://github.com/community-scripts/ProxmoxVE/pull/13667)) + + - #### ✨ New Features + + - BentoPDF: replace http-server with nginx to fix WASM initialization timeout [@MickLesk](https://github.com/MickLesk) ([#13625](https://github.com/community-scripts/ProxmoxVE/pull/13625)) + - Element Synapse: Add MatrixRTC configuration for Element Call support [@MickLesk](https://github.com/MickLesk) ([#13665](https://github.com/community-scripts/ProxmoxVE/pull/13665)) + - RomM: Use ROMM_BASE_PATH from .env for symlinks and nginx config [@MickLesk](https://github.com/MickLesk) ([#13666](https://github.com/community-scripts/ProxmoxVE/pull/13666)) + - Immich: Pin version to 2.7.4 [@vhsdream](https://github.com/vhsdream) ([#13661](https://github.com/community-scripts/ProxmoxVE/pull/13661)) + + - #### 🔧 Refactor + + - Crafty Controller: Wait for credentials file instead of fixed sleep [@MickLesk](https://github.com/MickLesk) ([#13670](https://github.com/community-scripts/ProxmoxVE/pull/13670)) + - Refactor: Alpine-Wakapi [@tremor021](https://github.com/tremor021) ([#13656](https://github.com/community-scripts/ProxmoxVE/pull/13656)) + +## 2026-04-10 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - fix: ensure trailing newline in redis.conf before appending bind directive [@Copilot](https://github.com/Copilot) ([#13647](https://github.com/community-scripts/ProxmoxVE/pull/13647)) + + - #### ✨ New Features + + - Immich: Pin version to 2.7.3 [@vhsdream](https://github.com/vhsdream) ([#13631](https://github.com/community-scripts/ProxmoxVE/pull/13631)) + - Homarr: bind Redis to localhost only [@MickLesk](https://github.com/MickLesk) ([#13552](https://github.com/community-scripts/ProxmoxVE/pull/13552)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - tools.func: prevent script crash when entering GitHub token after rate limit [@MickLesk](https://github.com/MickLesk) ([#13638](https://github.com/community-scripts/ProxmoxVE/pull/13638)) + +### 🧰 Tools + + - #### 🔧 Refactor + + - addons: Filebrowser & Filebrowser-Quantum get warning if host install [@MickLesk](https://github.com/MickLesk) ([#13639](https://github.com/community-scripts/ProxmoxVE/pull/13639)) + +## 2026-04-09 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - boostack: add: git [@CrazyWolf13](https://github.com/CrazyWolf13) ([#13620](https://github.com/community-scripts/ProxmoxVE/pull/13620)) + + - #### ✨ New Features + + - Update OPNsense version from 25.7 to 26.1 [@tdn131](https://github.com/tdn131) ([#13626](https://github.com/community-scripts/ProxmoxVE/pull/13626)) + - CheckMK: Bump Default OS to 13 (trixie) + dynamic codename + fix RELEASE-Tag Fetching [@MickLesk](https://github.com/MickLesk) ([#13610](https://github.com/community-scripts/ProxmoxVE/pull/13610)) + +## 2026-04-08 + +### 🆕 New Scripts + + - IronClaw | Alpine-IronClaw ([#13591](https://github.com/community-scripts/ProxmoxVE/pull/13591)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - immich: disable upgrade-insecure-requests CSP directive [@MickLesk](https://github.com/MickLesk) ([#13600](https://github.com/community-scripts/ProxmoxVE/pull/13600)) + - Immich: v2.7.2 [@vhsdream](https://github.com/vhsdream) ([#13579](https://github.com/community-scripts/ProxmoxVE/pull/13579)) + - Update flaresolverr-install.sh [@maztheman](https://github.com/maztheman) ([#13584](https://github.com/community-scripts/ProxmoxVE/pull/13584)) + + - #### ✨ New Features + + - bambuddy: add mkdir before data restore & add ffmpeg dependency [@MickLesk](https://github.com/MickLesk) ([#13601](https://github.com/community-scripts/ProxmoxVE/pull/13601)) + + - #### 🔧 Refactor + + - feat: update UHF Server script to use setup_ffmpeg [@zackwithak13](https://github.com/zackwithak13) ([#13564](https://github.com/community-scripts/ProxmoxVE/pull/13564)) + +### 💾 Core + + - #### ✨ New Features + + - core: add script page badges to descriptions | change donate URL [@MickLesk](https://github.com/MickLesk) ([#13596](https://github.com/community-scripts/ProxmoxVE/pull/13596)) + +## 2026-04-07 + +### 🗑️ Deleted Scripts + + - Remove low-install-count CT scripts and installers [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#13570](https://github.com/community-scripts/ProxmoxVE/pull/13570)) + +### 💾 Core + + - #### ✨ New Features + + - core: improve resilience for top Proxmox error codes (209, 215, 118, 206) [@MickLesk](https://github.com/MickLesk) ([#13575](https://github.com/community-scripts/ProxmoxVE/pull/13575)) + +## 2026-04-06 + +### 🆕 New Scripts + + - OpenThread Border Router ([#13536](https://github.com/community-scripts/ProxmoxVE/pull/13536)) +- Homelable ([#13539](https://github.com/community-scripts/ProxmoxVE/pull/13539)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Papra: check env before copy [@MickLesk](https://github.com/MickLesk) ([#13553](https://github.com/community-scripts/ProxmoxVE/pull/13553)) + - changedetection: fix: typing_extensions error [@CrazyWolf13](https://github.com/CrazyWolf13) ([#13548](https://github.com/community-scripts/ProxmoxVE/pull/13548)) + - kasm: fix: fetch latest version [@CrazyWolf13](https://github.com/CrazyWolf13) ([#13547](https://github.com/community-scripts/ProxmoxVE/pull/13547)) + +## 2026-04-05 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Grist: remove install:ee step (private repo, not needed for grist-core) [@MickLesk](https://github.com/MickLesk) ([#13526](https://github.com/community-scripts/ProxmoxVE/pull/13526)) + - Nginx Proxy Manager: ensure /tmp/nginx/body exists via openresty service [@MickLesk](https://github.com/MickLesk) ([#13528](https://github.com/community-scripts/ProxmoxVE/pull/13528)) + - MotionEye: run as root to enable SMB share support [@MickLesk](https://github.com/MickLesk) ([#13527](https://github.com/community-scripts/ProxmoxVE/pull/13527)) + +### 💾 Core + + - #### 🔧 Refactor + + - core: silent() function - use return instead of exit to allow || true error handling [@MickLesk](https://github.com/MickLesk) ([#13529](https://github.com/community-scripts/ProxmoxVE/pull/13529)) + +## 2026-04-04 + +### 🧰 Tools + + - #### 🐞 Bug Fixes + + - komodo: set `PERIPHERY_CORE_PUBLIC_KEYS` to default value if absent [@4ndv](https://github.com/4ndv) ([#13519](https://github.com/community-scripts/ProxmoxVE/pull/13519)) + +## 2026-04-03 + +### 🆕 New Scripts + + - netboot.xyz ([#13480](https://github.com/community-scripts/ProxmoxVE/pull/13480)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - OpenWRT-VM: use poweroff instead of halt to properly stop VM [@MickLesk](https://github.com/MickLesk) ([#13504](https://github.com/community-scripts/ProxmoxVE/pull/13504)) + - NginxProxyManager: fix openresty restart by setting user root before reload [@MickLesk](https://github.com/MickLesk) ([#13500](https://github.com/community-scripts/ProxmoxVE/pull/13500)) + + - #### ✨ New Features + + - Crafty Controller: add Java 25 for Minecraft 1.26.1+ [@MickLesk](https://github.com/MickLesk) ([#13502](https://github.com/community-scripts/ProxmoxVE/pull/13502)) + - Wealthfolio: update to v3.2.1 and Node.js 24 [@afadil](https://github.com/afadil) ([#13486](https://github.com/community-scripts/ProxmoxVE/pull/13486)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - core.func: prevent profile.d scripts from aborting on non-zero exit [@MickLesk](https://github.com/MickLesk) ([#13503](https://github.com/community-scripts/ProxmoxVE/pull/13503)) + + - #### ✨ New Features + + - APT Proxy: Support full URLs (http/https with custom ports) [@MickLesk](https://github.com/MickLesk) ([#13474](https://github.com/community-scripts/ProxmoxVE/pull/13474)) + +### 🧰 Tools + + - #### 🐞 Bug Fixes + + - PVE LXC-Updater: pipe apt list through cat to prevent pager hang [@MickLesk](https://github.com/MickLesk) ([#13501](https://github.com/community-scripts/ProxmoxVE/pull/13501)) + +## 2026-04-02 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Grist: Guard backup restore for empty docs/db files [@MickLesk](https://github.com/MickLesk) ([#13472](https://github.com/community-scripts/ProxmoxVE/pull/13472)) + - fix(zigbee2mqtt): suppress grep error when pnpm-workspace.yaml is absent on update [@Copilot](https://github.com/Copilot) ([#13476](https://github.com/community-scripts/ProxmoxVE/pull/13476)) + +### 🧰 Tools + + - #### 🐞 Bug Fixes + + - Cron LXC Updater: Add full PATH for cron environment [@MickLesk](https://github.com/MickLesk) ([#13473](https://github.com/community-scripts/ProxmoxVE/pull/13473)) + +## 2026-04-01 + +### 🆕 New Scripts + + - DrawDB ([#13454](https://github.com/community-scripts/ProxmoxVE/pull/13454)) + +### 🧰 Tools + + - #### 🐞 Bug Fixes + + - Filebrowser: make noauth setup use correct database [@MickLesk](https://github.com/MickLesk) ([#13457](https://github.com/community-scripts/ProxmoxVE/pull/13457)) diff --git a/.github/changelogs/2026/05.md b/.github/changelogs/2026/05.md new file mode 100644 index 000000000..7f86da0f9 --- /dev/null +++ b/.github/changelogs/2026/05.md @@ -0,0 +1,173 @@ +## 2026-05-09 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - FlowiseAI: Migrate to pnpm [@MickLesk](https://github.com/MickLesk) ([#14344](https://github.com/community-scripts/ProxmoxVE/pull/14344)) + - Purge openresty [@lucacome](https://github.com/lucacome) ([#14353](https://github.com/community-scripts/ProxmoxVE/pull/14353)) + - Check for release for Sonarr [@lucacome](https://github.com/lucacome) ([#14354](https://github.com/community-scripts/ProxmoxVE/pull/14354)) + - fix(termix-install.sh): add tmpfiles.d persistence and systemd PIDFile path [@runnylogan](https://github.com/runnylogan) ([#14350](https://github.com/community-scripts/ProxmoxVE/pull/14350)) + - ERPNext: start bench Redis services before bench new-site [@MickLesk](https://github.com/MickLesk) ([#14343](https://github.com/community-scripts/ProxmoxVE/pull/14343)) + - [Hotfix]Jotty: use absolute path when creating data dir [@vhsdream](https://github.com/vhsdream) ([#14355](https://github.com/community-scripts/ProxmoxVE/pull/14355)) + +## 2026-05-08 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - wishlist: pin pnpm to v10 to match engine requirements [@MickLesk](https://github.com/MickLesk) ([#14342](https://github.com/community-scripts/ProxmoxVE/pull/14342)) + - [pelican] fix env copy regression [@LetterN](https://github.com/LetterN) ([#14328](https://github.com/community-scripts/ProxmoxVE/pull/14328)) + - fix(homepage): fix ERR_PNPM_IGNORED_BUILDS error [@Sergih28](https://github.com/Sergih28) ([#14315](https://github.com/community-scripts/ProxmoxVE/pull/14315)) + + - #### ✨ New Features + + - tools.func: add setup_nltk as new function [@MickLesk](https://github.com/MickLesk) ([#14314](https://github.com/community-scripts/ProxmoxVE/pull/14314)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - tools.func: fix meilisearch import-dump background process handling [@MickLesk](https://github.com/MickLesk) ([#14341](https://github.com/community-scripts/ProxmoxVE/pull/14341)) + +## 2026-05-07 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - termix: create /tmp/nginx before nginx -t [@MickLesk](https://github.com/MickLesk) ([#14312](https://github.com/community-scripts/ProxmoxVE/pull/14312)) + - The Lounge: Fix service not starting automaticaly [@tremor021](https://github.com/tremor021) ([#14311](https://github.com/community-scripts/ProxmoxVE/pull/14311)) + - netbird-lxc: fix installation check [@MickLesk](https://github.com/MickLesk) ([#14309](https://github.com/community-scripts/ProxmoxVE/pull/14309)) + - databasus: Backup and secure configuration file [@MickLesk](https://github.com/MickLesk) ([#14308](https://github.com/community-scripts/ProxmoxVE/pull/14308)) + - vm: update disk image URL for Ubuntu 25.04 [@MickLesk](https://github.com/MickLesk) ([#14290](https://github.com/community-scripts/ProxmoxVE/pull/14290)) + + - #### ✨ New Features + + - pangolin: bump version to 1.18.3 [@MickLesk](https://github.com/MickLesk) ([#14297](https://github.com/community-scripts/ProxmoxVE/pull/14297)) + +### 🗑️ Deleted Scripts + + - Remove: LiteLLM [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#14294](https://github.com/community-scripts/ProxmoxVE/pull/14294)) + +### 💾 Core + + - #### ✨ New Features + + - update-apps: some improvements [@MickLesk](https://github.com/MickLesk) ([#14275](https://github.com/community-scripts/ProxmoxVE/pull/14275)) + +## 2026-05-06 + +### 🆕 New Scripts + + - Hoodik ([#14279](https://github.com/community-scripts/ProxmoxVE/pull/14279)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Pelican-Panel: create backup subdirectory before copying storage [@MickLesk](https://github.com/MickLesk) ([#14274](https://github.com/community-scripts/ProxmoxVE/pull/14274)) + - Rustdeskserver: remove redundant else with undefined RELEASE var [@MickLesk](https://github.com/MickLesk) ([#14272](https://github.com/community-scripts/ProxmoxVE/pull/14272)) + +### 🧰 Tools + + - #### 🔧 Refactor + + - AdguardHome-Sync replace ifconfig with hostname -I for IP detection [@MickLesk](https://github.com/MickLesk) ([#14273](https://github.com/community-scripts/ProxmoxVE/pull/14273)) + +## 2026-05-05 + +### 🆕 New Scripts + + - LibreChat ([#14247](https://github.com/community-scripts/ProxmoxVE/pull/14247)) +- Matomo ([#14248](https://github.com/community-scripts/ProxmoxVE/pull/14248)) +- Storyteller ([#14122](https://github.com/community-scripts/ProxmoxVE/pull/14122)) + +### 🧰 Tools + + - Fix container count message in update-apps.sh [@Quotacious](https://github.com/Quotacious) ([#14265](https://github.com/community-scripts/ProxmoxVE/pull/14265)) + +## 2026-05-04 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Databasus: move .env to filesystem root so service starts correctly [@Copilot](https://github.com/Copilot) ([#14252](https://github.com/community-scripts/ProxmoxVE/pull/14252)) + - Databasus: update mongo-tools fallback to 100.16.1 and use now pnpm instead of npm ci [@MickLesk](https://github.com/MickLesk) ([#14240](https://github.com/community-scripts/ProxmoxVE/pull/14240)) + +### 💾 Core + + - #### ✨ New Features + + - tools.func get_latest_gh_tag - add pagination to find prefixed tags beyond first 50 [@MickLesk](https://github.com/MickLesk) ([#14241](https://github.com/community-scripts/ProxmoxVE/pull/14241)) + - tools.func: add GitLab release check/fetch/deploy helpers [@MickLesk](https://github.com/MickLesk) ([#14242](https://github.com/community-scripts/ProxmoxVE/pull/14242)) + +## 2026-05-03 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Hortusfox: fix update issues [@tomfrenzel](https://github.com/tomfrenzel) ([#14214](https://github.com/community-scripts/ProxmoxVE/pull/14214)) + + - #### ✨ New Features + + - Refactor: PeaNUT for v6 [@MickLesk](https://github.com/MickLesk) ([#14224](https://github.com/community-scripts/ProxmoxVE/pull/14224)) + - pangolin: pin version, drop manual SQL, use upstream migrator [@MickLesk](https://github.com/MickLesk) ([#14223](https://github.com/community-scripts/ProxmoxVE/pull/14223)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - core: fix validate_bridge function [@MichaelOultram](https://github.com/MichaelOultram) ([#14206](https://github.com/community-scripts/ProxmoxVE/pull/14206)) + +### 🧰 Tools + + - #### 🐞 Bug Fixes + + - pve/pbs scripts: guard sed against missing /etc/apt/sources.list [@MickLesk](https://github.com/MickLesk) ([#14222](https://github.com/community-scripts/ProxmoxVE/pull/14222)) + +## 2026-05-02 + +### 🆕 New Scripts + + - protonmail-bridge ([#14136](https://github.com/community-scripts/ProxmoxVE/pull/14136)) +- Tube Archivist ([#14123](https://github.com/community-scripts/ProxmoxVE/pull/14123)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Nagios: Ping fix [@tremor021](https://github.com/tremor021) ([#14186](https://github.com/community-scripts/ProxmoxVE/pull/14186)) + - opnsense-vm: retry pvesm alloc on transient zfs 'got timeout' errors [@MickLesk](https://github.com/MickLesk) ([#14157](https://github.com/community-scripts/ProxmoxVE/pull/14157)) + - ImmichFrame: fix update by reinstalling dotnet-sdk before publish [@MickLesk](https://github.com/MickLesk) ([#14158](https://github.com/community-scripts/ProxmoxVE/pull/14158)) + - [FIX]ShelfMark: Use UV sync for shelfmark backend build; update to Python 3.14 [@vhsdream](https://github.com/vhsdream) ([#14170](https://github.com/community-scripts/ProxmoxVE/pull/14170)) + - alpine: remove deb/ubuntu-only resource & storage checks from update-script [@MickLesk](https://github.com/MickLesk) ([#14166](https://github.com/community-scripts/ProxmoxVE/pull/14166)) + - Threadfin: use 'threadfin-app' as app name to avoid version-file clash [@MickLesk](https://github.com/MickLesk) ([#14159](https://github.com/community-scripts/ProxmoxVE/pull/14159)) + +### 💾 Core + + - #### ✨ New Features + + - core: prompt to also run installed addon update scripts (…/bin/update_*) after update_script [@MickLesk](https://github.com/MickLesk) ([#14162](https://github.com/community-scripts/ProxmoxVE/pull/14162)) + +## 2026-05-01 + +### 🆕 New Scripts + + - SoulSync ([#14124](https://github.com/community-scripts/ProxmoxVE/pull/14124)) +- Teable ([#14125](https://github.com/community-scripts/ProxmoxVE/pull/14125)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Step ca update [@heinemannj](https://github.com/heinemannj) ([#14058](https://github.com/community-scripts/ProxmoxVE/pull/14058)) + - paperless-ngx: refresh NLTK data on update [@kurtislanderson](https://github.com/kurtislanderson) ([#14144](https://github.com/community-scripts/ProxmoxVE/pull/14144)) + - [Pelican Panel] stop deleting the public storage [@LetterN](https://github.com/LetterN) ([#14145](https://github.com/community-scripts/ProxmoxVE/pull/14145)) + + - #### 🔧 Refactor + + - Mail-Archiver: update dependencies [@tremor021](https://github.com/tremor021) ([#14152](https://github.com/community-scripts/ProxmoxVE/pull/14152)) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index b3e976091..4578d1973 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -22,6 +22,6 @@ Fixes # - [ ] ✨ **New feature** – Adds new, non-breaking functionality. - [ ] 💥 **Breaking change** – Alters existing functionality in a way that may require updates. - [ ] 🆕 **New script** – A fully functional and tested script or script set. -- [ ] 🌍 **Website update** – Changes to website-related JSON files or metadata. +- [ ] 🌍 **Website update** – Changes to script metadata (PocketBase/website data). - [ ] 🔧 **Refactoring / Code Cleanup** – Improves readability or maintainability without changing functionality. - [ ] 📝 **Documentation update** – Changes to `README`, `AppName.md`, `CONTRIBUTING.md`, or other docs. diff --git a/.github/runner/docker/gh-runner-self.dockerfile b/.github/runner/docker/gh-runner-self.dockerfile deleted file mode 100644 index e5ae072ab..000000000 --- a/.github/runner/docker/gh-runner-self.dockerfile +++ /dev/null @@ -1,68 +0,0 @@ -FROM mcr.microsoft.com/dotnet/runtime-deps:8.0-jammy as build - -ARG TARGETOS -ARG TARGETARCH -ARG DOCKER_VERSION=27.5.1 -ARG BUILDX_VERSION=0.20.1 -ARG RUNNER_ARCH="x64" - -RUN apt update -y && apt install sudo curl unzip -y - -WORKDIR /actions-runner - -RUN RUNNER_VERSION=$(curl -s https://api.github.com/repos/actions/runner/releases/latest | grep "tag_name" | head -n 1 | awk '{print substr($2, 3, length($2)-4)}') \ - && curl -f -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${RUNNER_ARCH}-${RUNNER_VERSION}.tar.gz \ - && tar xzf ./runner.tar.gz \ - && rm runner.tar.gz - -RUN RUNNER_CONTAINER_HOOKS_VERSION=$(curl -s https://api.github.com/repos/actions/runner-container-hooks/releases/latest | grep "tag_name" | head -n 1 | awk '{print substr($2, 3, length($2)-4)}') \ - && curl -f -L -o runner-container-hooks.zip https://github.com/actions/runner-container-hooks/releases/download/v${RUNNER_CONTAINER_HOOKS_VERSION}/actions-runner-hooks-k8s-${RUNNER_CONTAINER_HOOKS_VERSION}.zip \ - && unzip ./runner-container-hooks.zip -d ./k8s \ - && rm runner-container-hooks.zip - -RUN export RUNNER_ARCH=${TARGETARCH} \ - && if [ "$RUNNER_ARCH" = "amd64" ]; then export DOCKER_ARCH=x86_64 ; fi \ - && if [ "$RUNNER_ARCH" = "arm64" ]; then export DOCKER_ARCH=aarch64 ; fi \ - && curl -fLo docker.tgz https://download.docker.com/${TARGETOS}/static/stable/${DOCKER_ARCH}/docker-${DOCKER_VERSION}.tgz \ - && tar zxvf docker.tgz \ - && rm -rf docker.tgz \ - && mkdir -p /usr/local/lib/docker/cli-plugins \ - && curl -fLo /usr/local/lib/docker/cli-plugins/docker-buildx \ - "https://github.com/docker/buildx/releases/download/v${BUILDX_VERSION}/buildx-v${BUILDX_VERSION}.linux-${TARGETARCH}" \ - && chmod +x /usr/local/lib/docker/cli-plugins/docker-buildx - -FROM mcr.microsoft.com/dotnet/runtime-deps:8.0-jammy - -ENV DEBIAN_FRONTEND=noninteractive -ENV RUNNER_MANUALLY_TRAP_SIG=1 -ENV ACTIONS_RUNNER_PRINT_LOG_TO_STDOUT=1 -ENV ImageOS=ubuntu22 - -RUN apt update -y \ - && apt install -y --no-install-recommends sudo lsb-release gpg-agent software-properties-common curl jq unzip \ - && rm -rf /var/lib/apt/lists/* - -RUN add-apt-repository ppa:git-core/ppa \ - && apt update -y \ - && apt install -y git \ - && rm -rf /var/lib/apt/lists/* - -RUN adduser --disabled-password --gecos "" --uid 1001 runner \ - && groupadd docker --gid 123 \ - && usermod -aG sudo runner \ - && usermod -aG docker runner \ - && echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers \ - && echo "Defaults env_keep += \"DEBIAN_FRONTEND\"" >> /etc/sudoers - -# Install own dependencies in final image -RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \ - && apt-get install -y nodejs \ - && apt-get install -y gh jq git - -WORKDIR /home/runner - -COPY --chown=runner:docker --from=build /actions-runner . -COPY --from=build /usr/local/lib/docker/cli-plugins/docker-buildx /usr/local/lib/docker/cli-plugins/docker-buildx -RUN install -o root -g root -m 755 docker/* /usr/bin/ && rm -rf docker - -USER runner diff --git a/.github/workflows/autolabeler.yml b/.github/workflows/autolabeler.yml index 81f381c55..899a8d3c3 100644 --- a/.github/workflows/autolabeler.yml +++ b/.github/workflows/autolabeler.yml @@ -93,7 +93,7 @@ jobs: const websiteRegex = new RegExp(`- \\[(x|X)\\]\\s*${escapedWebsite}`, "i"); if (websiteRegex.test(prBody)) { - const hasJson = prFiles.some((f) => f.filename.startsWith("frontend/public/json/")); + const hasJson = prFiles.some((f) => f.filename.startsWith("json/")); const hasUpdateScript = labelsToAdd.has("update script"); const hasContentLabel = ["bugfix", "feature", "refactor"].some((l) => labelsToAdd.has(l)); diff --git a/.github/workflows/bak/close_template_issue.yml b/.github/workflows/bak/close_template_issue.yml deleted file mode 100644 index b87923bc4..000000000 --- a/.github/workflows/bak/close_template_issue.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: Auto-Close Wrong Template Issues -on: - issues: - types: [opened] - -jobs: - close_tteck_issues: - if: github.repository == 'community-scripts/ProxmoxVE' - runs-on: ubuntu-latest - steps: - - name: Auto-close if wrong Template issue detected - uses: actions/github-script@v7 - with: - script: | - const issue = context.payload.issue; - const content = `${issue.title}\n${issue.body}`; - const issueNumber = issue.number; - - // Regex patterns (case-insensitive, flexible versioning) - const patterns = [ - /Template\s+debian-13-standard_[\d.]+-[\d]+_amd64\.tar\.zst\s*\[(online|local)\]/i, - /Template\s+debian-13-standard_[\d.]+-[\d]+_amd64\.tar\.zst\s+is\s+missing\s+or\s+corrupted/i, - /Container\s+creation\s+failed\.?\s+Checking\s+if\s+template\s+is\s+corrupted\s+or\s+incomplete/i, - /Template\s+is\s+valid,\s+but\s+container\s+creation\s+still\s+failed/i, - /exit\s+code\s+0:\s+while\s+executing\s+command\s+bash\s+-c\s+"\$?\(curl\s+-fsSL\s+https:\/\/raw\.githubusercontent\.com\/[\w/-]+\/create_lxc\.sh\)"/i - ]; - - const matched = patterns.some((regex) => regex.test(content)); - - if (matched) { - const message = "👋 Hello!\n\n" + - "It looks like you are referencing a **container creation issue with a Debian 13 template** (e.g. `debian-13-standard_13.x-x_amd64.tar.zst`).\n\n" + - "We receive many similar reports about this, and it's not related to the scripts themselves but to **a Proxmox base template bug**.\n\n" + - "Please refer to [discussion #8126](https://github.com/community-scripts/ProxmoxVE/discussions/8126) for details.\n" + - "If your issue persists after following the guidance there, feel free to reopen this issue.\n\n" + - "_This issue was automatically closed by a bot._"; - - await github.rest.issues.createComment({ - ...context.repo, - issue_number: issueNumber, - body: message - }); - - await github.rest.issues.addLabels({ - ...context.repo, - issue_number: issueNumber, - labels: ["not planned"] - }); - - await github.rest.issues.update({ - ...context.repo, - issue_number: issueNumber, - state: "closed" - }); - } diff --git a/.github/workflows/bak/crawl-versions.yaml b/.github/workflows/bak/crawl-versions.yaml deleted file mode 100644 index 4f8e9a003..000000000 --- a/.github/workflows/bak/crawl-versions.yaml +++ /dev/null @@ -1,126 +0,0 @@ -name: Crawl Versions from newreleases.io - -on: - workflow_dispatch: - schedule: - # Runs at 12:00 AM and 12:00 PM UTC - - cron: "0 0,12 * * *" - -permissions: - contents: write - pull-requests: write - -jobs: - crawl-versions: - if: github.repository == 'community-scripts/ProxmoxVE' - runs-on: ubuntu-latest - - steps: - - name: Checkout Repository - uses: actions/checkout@v2 - with: - repository: community-scripts/ProxmoxVE - ref: main - - - name: Generate a token - id: generate-token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ vars.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - - - name: Crawl from newreleases.io - env: - token: ${{ secrets.NEWRELEASES_TOKEN }} - run: | - page=1 - projects_file="project_json" - output_file="frontend/public/json/versions.json" - - echo "[]" > $output_file - - while true; do - - echo "Start loop on page: $page" - - projects=$(curl -s -H "X-Key: $token" "https://api.newreleases.io/v1/projects?page=$page") - total_pages=$(echo "$projects" | jq -r '.total_pages') - - if [ -z "$total_pages" ] || [ "$total_pages" -eq 0 ]; then - echo "No pages available. Exiting." - exit 1 - fi - if [ $page == $total_pages ]; then - break - fi - - if [ -z "$projects" ] || ! echo "$projects" | jq -e '.projects' > /dev/null; then - echo "No more projects or invalid response. Exiting." - break - fi - - echo "$projects" > "$projects_file" - - jq -r '.projects[] | "\(.id) \(.name)"' "$projects_file" | while read -r id name; do - version=$(curl -s -H "X-Key: $token" "https://api.newreleases.io/v1/projects/$id/latest-release") - version_data=$(echo "$version" | jq -r '.version // empty') - date=$(echo "$version" | jq -r '.date // empty') - if [ -n "$version_data" ]; then - jq --arg name "$name" --arg version "$version_data" --arg date "$date" \ - '. += [{"name": $name, "version": $version, "date": $date}]' "$output_file" > "$output_file.tmp" && mv "$output_file.tmp" "$output_file" - fi - done - ((page++)) - done - - - name: Commit JSON - env: - GH_TOKEN: ${{ steps.generate-token.outputs.token }} - run: | - git config --global user.email "github-actions[bot]@users.noreply.github.com" - git config --global user.name "GitHub Actions[bot]" - git checkout -b update_versions || git checkout update_versions - git add frontend/public/json/versions.json - if git diff --cached --quiet; then - echo "No changes detected." - echo "changed=false" >> "$GITHUB_ENV" - exit 0 - else - echo "Changes detected:" - git diff --stat --cached - echo "changed=true" >> "$GITHUB_ENV" - fi - git commit -m "Update versions.json" - git push origin update_versions --force - gh pr create --title "[Github Action] Update versions.json" --body "Update versions.json, crawled from newreleases.io" --base main --head update_versions --label "automated pr" - - - name: Approve pull request - if: env.changed == 'true' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - PR_NUMBER=$(gh pr list --head "update_versions" --json number --jq '.[].number') - if [ -n "$PR_NUMBER" ]; then - gh pr review $PR_NUMBER --approve - fi - - - name: Approve pull request and merge - if: env.changed == 'true' - env: - GH_TOKEN: ${{ secrets.PAT_AUTOMERGE }} - run: | - PR_NUMBER=$(gh pr list --head "update_versions" --json number --jq '.[].number') - if [ -n "$PR_NUMBER" ]; then - gh pr review $PR_NUMBER --approve - gh pr merge $PR_NUMBER --squash --admin - fi - - - name: Re-approve pull request after update - if: env.changed == 'true' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - PR_NUMBER=$(gh pr list --head "update_versions" --json number --jq '.[].number') - if [ -n "$PR_NUMBER" ]; then - gh pr review $PR_NUMBER --approve - fi diff --git a/.github/workflows/bak/script-test.yml b/.github/workflows/bak/script-test.yml deleted file mode 100644 index eb53c366d..000000000 --- a/.github/workflows/bak/script-test.yml +++ /dev/null @@ -1,175 +0,0 @@ -name: Run Scripts on PVE Node for testing -permissions: - pull-requests: write -on: - pull_request_target: - branches: - - main - paths: - - "install/**.sh" - - "ct/**.sh" - -jobs: - run-install-script: - if: github.repository == 'community-scripts/ProxmoxVE' - runs-on: pvenode - steps: - - name: Checkout PR branch - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.ref }} - repository: ${{ github.event.pull_request.head.repo.full_name }} - fetch-depth: 0 - - - name: Add Git safe directory - run: | - git config --global --add safe.directory /__w/ProxmoxVE/ProxmoxVE - - - name: Set up GH_TOKEN - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - echo "GH_TOKEN=${GH_TOKEN}" >> $GITHUB_ENV - - - name: Get Changed Files - run: | - CHANGED_FILES=$(gh pr diff ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --name-only) - CHANGED_FILES=$(echo "$CHANGED_FILES" | tr '\n' ' ') - echo "Changed files: $CHANGED_FILES" - echo "SCRIPT=$CHANGED_FILES" >> $GITHUB_ENV - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Get scripts - id: check-install-script - run: | - ALL_FILES=() - ADDED_FILES=() - for FILE in ${{ env.SCRIPT }}; do - if [[ $FILE =~ ^install/.*-install\.sh$ ]] || [[ $FILE =~ ^ct/.*\.sh$ ]]; then - STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//') - if [[ ! " ${ADDED_FILES[@]} " =~ " $STRIPPED_NAME " ]]; then - ALL_FILES+=("$FILE") - ADDED_FILES+=("$STRIPPED_NAME") # Mark this base file as added (without the path) - fi - fi - done - ALL_FILES=$(echo "${ALL_FILES[@]}" | xargs) - echo "$ALL_FILES" - echo "ALL_FILES=$ALL_FILES" >> $GITHUB_ENV - - - name: Run scripts - id: run-install - continue-on-error: true - run: | - set +e - #run for each files in /ct - for FILE in ${{ env.ALL_FILES }}; do - STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//') - echo "Running Test for: $STRIPPED_NAME" - if grep -E -q 'read\s+-r\s+-p\s+".*"\s+\w+' "$FILE"; then - echo "The script contains an interactive prompt. Skipping execution." - continue - fi - if [[ $FILE =~ ^install/.*-install\.sh$ ]]; then - CT_SCRIPT="ct/$STRIPPED_NAME.sh" - if [[ ! -f $CT_SCRIPT ]]; then - echo "No CT script found for $STRIPPED_NAME" - ERROR_MSG="No CT script found for $FILE" - echo "$ERROR_MSG" > result_$STRIPPED_NAME.log - continue - fi - if grep -E -q 'read\s+-r\s+-p\s+".*"\s+\w+' "install/$STRIPPED_NAME-install.sh"; then - echo "The script contains an interactive prompt. Skipping execution." - continue - fi - echo "Found CT script for $STRIPPED_NAME" - chmod +x "$CT_SCRIPT" - RUNNING_FILE=$CT_SCRIPT - elif [[ $FILE =~ ^ct/.*\.sh$ ]]; then - INSTALL_SCRIPT="install/$STRIPPED_NAME-install.sh" - if [[ ! -f $INSTALL_SCRIPT ]]; then - echo "No install script found for $STRIPPED_NAME" - ERROR_MSG="No install script found for $FILE" - echo "$ERROR_MSG" > result_$STRIPPED_NAME.log - continue - fi - echo "Found install script for $STRIPPED_NAME" - chmod +x "$INSTALL_SCRIPT" - RUNNING_FILE=$FILE - if grep -E -q 'read\s+-r\s+-p\s+".*"\s+\w+' "ct/$STRIPPED_NAME.sh"; then - echo "The script contains an interactive prompt. Skipping execution." - continue - fi - fi - git remote add community-scripts https://github.com/community-scripts/ProxmoxVE.git - git fetch community-scripts - rm -f .github/workflows/scripts/app-test/pr-build.func || true - rm -f .github/workflows/scripts/app-test/pr-install.func || true - rm -f .github/workflows/scripts/app-test/pr-alpine-install.func || true - rm -f .github/workflows/scripts/app-test/pr-create-lxc.sh || true - git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-build.func - git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-install.func - git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-alpine-install.func - git checkout community-scripts/main -- .github/workflows/scripts/app-test/pr-create-lxc.sh - chmod +x $RUNNING_FILE - chmod +x .github/workflows/scripts/app-test/pr-create-lxc.sh - chmod +x .github/workflows/scripts/app-test/pr-install.func - chmod +x .github/workflows/scripts/app-test/pr-alpine-install.func - chmod +x .github/workflows/scripts/app-test/pr-build.func - sed -i 's|source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)|source .github/workflows/scripts/app-test/pr-build.func|g' "$RUNNING_FILE" - echo "Executing $RUNNING_FILE" - ERROR_MSG=$(./$RUNNING_FILE 2>&1 > /dev/null) - echo "Finished running $FILE" - if [ -n "$ERROR_MSG" ]; then - echo "ERROR in $STRIPPED_NAME: $ERROR_MSG" - echo "$ERROR_MSG" > result_$STRIPPED_NAME.log - fi - done - set -e # Restore exit-on-error - - - name: Cleanup PVE Node - run: | - containers=$(pct list | tail -n +2 | awk '{print $0 " " $4}' | awk '{print $1}') - - for container_id in $containers; do - status=$(pct status $container_id | awk '{print $2}') - if [[ $status == "running" ]]; then - pct stop $container_id - pct destroy $container_id - fi - done - - - name: Post error comments - run: | - ERROR="false" - SEARCH_LINE=".github/workflows/scripts/app-test/pr-build.func: line 255:" - - # Get all existing comments on the PR - EXISTING_COMMENTS=$(gh pr view ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --json comments --jq '.comments[].body') - - for FILE in ${{ env.ALL_FILES }}; do - STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//') - if [[ ! -f result_$STRIPPED_NAME.log ]]; then - continue - fi - ERROR_MSG=$(cat result_$STRIPPED_NAME.log) - - if [ -n "$ERROR_MSG" ]; then - CLEANED_ERROR_MSG=$(echo "$ERROR_MSG" | sed "s|$SEARCH_LINE.*||") - COMMENT_BODY=":warning: The script _**$FILE**_ failed with the following message:
${CLEANED_ERROR_MSG}
" - - # Check if the comment already exists - if echo "$EXISTING_COMMENTS" | grep -qF "$COMMENT_BODY"; then - echo "Skipping duplicate comment for $FILE" - else - echo "Posting error message for $FILE" - gh pr comment ${{ github.event.pull_request.number }} \ - --repo ${{ github.repository }} \ - --body "$COMMENT_BODY" - ERROR="true" - fi - fi - done - - echo "ERROR=$ERROR" >> $GITHUB_ENV diff --git a/.github/workflows/bak/script_format.yml b/.github/workflows/bak/script_format.yml deleted file mode 100644 index 64a2eda42..000000000 --- a/.github/workflows/bak/script_format.yml +++ /dev/null @@ -1,243 +0,0 @@ -name: Script Format Check -permissions: - pull-requests: write -on: - pull_request_target: - branches: - - main - paths: - - "install/*.sh" - - "ct/*.sh" - -jobs: - run-install-script: - if: github.repository == 'community-scripts/ProxmoxVE' - runs-on: pvenode - steps: - - name: Checkout PR branch (supports forks) - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.ref }} - repository: ${{ github.event.pull_request.head.repo.full_name }} - fetch-depth: 0 - - - name: Add Git safe directory - run: | - git config --global --add safe.directory /__w/ProxmoxVE/ProxmoxVE - - - name: Set up GH_TOKEN - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - echo "GH_TOKEN=${GH_TOKEN}" >> $GITHUB_ENV - - - name: Get Changed Files - run: | - CHANGED_FILES=$(gh pr diff ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --name-only) - CHANGED_FILES=$(echo "$CHANGED_FILES" | tr '\n' ' ') - echo "Changed files: $CHANGED_FILES" - echo "SCRIPT=$CHANGED_FILES" >> $GITHUB_ENV - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Check scripts - id: run-install - continue-on-error: true - run: | - for FILE in ${{ env.SCRIPT }}; do - STRIPPED_NAME=$(basename "$FILE" | sed 's/-install//' | sed 's/\.sh$//') - echo "Running Test for: $STRIPPED_NAME" - FILE_STRIPPED="${FILE##*/}" - LOG_FILE="result_$FILE_STRIPPED.log" - - if [[ $FILE =~ ^ct/.*\.sh$ ]]; then - - FIRST_LINE=$(sed -n '1p' "$FILE") - [[ "$FIRST_LINE" != "#!/usr/bin/env bash" ]] && echo "Line 1 was $FIRST_LINE | Should be: #!/usr/bin/env bash" >> "$LOG_FILE" - SECOND_LINE=$(sed -n '2p' "$FILE") - [[ "$SECOND_LINE" != "source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)" ]] && - echo "Line 2 was $SECOND_LINE | Should be: source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)" >> "$LOG_FILE" - THIRD_LINE=$(sed -n '3p' "$FILE") - if ! [[ "$THIRD_LINE" =~ ^#\ Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ community-scripts\ ORG$ || "$THIRD_LINE" =~ ^Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ tteck$ ]]; then - echo "Line 3 was $THIRD_LINE | Should be: # Copyright (c) 2021-2026 community-scripts ORG" >> "$LOG_FILE" - fi - - EXPECTED_AUTHOR="# Author:" - EXPECTED_LICENSE="# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE" - EXPECTED_SOURCE="# Source:" - EXPECTED_EMPTY="" - - for i in {4..7}; do - LINE=$(sed -n "${i}p" "$FILE") - - case $i in - 4) - [[ $LINE == $EXPECTED_AUTHOR* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_AUTHOR" >> $LOG_FILE - ;; - 5) - [[ "$LINE" == "$EXPECTED_LICENSE" ]] || printf "Line %d was: '%s' | Should be: '%s'\n" "$i" "$LINE" "$EXPECTED_LICENSE" >> $LOG_FILE - ;; - 6) - [[ $LINE == $EXPECTED_SOURCE* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_SOURCE" >> $LOG_FILE - ;; - 7) - [[ -z $LINE ]] || printf "Line %d was: '%s' | Should be empty\n" "$i" "$LINE" >> $LOG_FILE - ;; - esac - done - - - EXPECTED_PREFIXES=( - "APP=" - "var_tags=" - "var_cpu=" # Must be a number - "var_ram=" # Must be a number - "var_disk=" # Must be a number - "var_os=" # Must be debian, alpine, or ubuntu - "var_version=" - "var_unprivileged=" # Must be 0 or 1 - ) - - - for i in {8..15}; do - LINE=$(sed -n "${i}p" "$FILE") - INDEX=$((i - 8)) - - case $INDEX in - 2|3|4) # var_cpu, var_ram, var_disk (must be numbers) - if [[ "$LINE" =~ ^${EXPECTED_PREFIXES[$INDEX]}([0-9]+)$ ]]; then - continue # Valid - else - echo "Line $i was '$LINE' | Should be: '${EXPECTED_PREFIXES[$INDEX]}'" >> "$LOG_FILE" - fi - ;; - 5) # var_os (must be debian, alpine, or ubuntu) - if [[ "$LINE" =~ ^var_os=(debian|alpine|ubuntu)$ ]]; then - continue # Valid - else - echo "Line $i was '$LINE' | Should be: 'var_os=[debian|alpine|ubuntu]'" >> "$LOG_FILE" - fi - ;; - 7) # var_unprivileged (must be 0 or 1) - if [[ "$LINE" =~ ^var_unprivileged=[01]$ ]]; then - continue # Valid - else - echo "Line $i was '$LINE' | Should be: 'var_unprivileged=[0|1]'" >> "$LOG_FILE" - fi - ;; - *) # Other lines (must start with expected prefix) - if [[ "$LINE" == ${EXPECTED_PREFIXES[$INDEX]}* ]]; then - continue # Valid - else - echo "Line $i was '$LINE' | Should start with '${EXPECTED_PREFIXES[$INDEX]}'" >> "$LOG_FILE" - fi - ;; - esac - done - - for i in {16..20}; do - LINE=$(sed -n "${i}p" "$FILE") - EXPECTED=( - "header_info \"$APP\"" - "variables" - "color" - "catch_errors" - "function update_script() {" - ) - [[ "$LINE" != "${EXPECTED[$((i-16))]}" ]] && echo "Line $i was $LINE | Should be: ${EXPECTED[$((i-16))]}" >> "$LOG_FILE" - done - cat "$LOG_FILE" - elif [[ $FILE =~ ^install/.*-install\.sh$ ]]; then - - FIRST_LINE=$(sed -n '1p' "$FILE") - [[ "$FIRST_LINE" != "#!/usr/bin/env bash" ]] && echo "Line 1 was $FIRST_LINE | Should be: #!/usr/bin/env bash" >> "$LOG_FILE" - - SECOND_LINE=$(sed -n '2p' "$FILE") - [[ -n "$SECOND_LINE" ]] && echo "Line 2 should be empty" >> "$LOG_FILE" - - THIRD_LINE=$(sed -n '3p' "$FILE") - if ! [[ "$THIRD_LINE" =~ ^#\ Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ community-scripts\ ORG$ || "$THIRD_LINE" =~ ^Copyright\ \(c\)\ [0-9]{4}-[0-9]{4}\ tteck$ ]]; then - echo "Line 3 was $THIRD_LINE | Should be: # Copyright (c) 2021-2026 community-scripts ORG" >> "$LOG_FILE" - fi - - EXPECTED_AUTHOR="# Author:" - EXPECTED_LICENSE="# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE" - EXPECTED_SOURCE="# Source:" - EXPECTED_EMPTY="" - - for i in {4..7}; do - LINE=$(sed -n "${i}p" "$FILE") - - case $i in - 4) - [[ $LINE == $EXPECTED_AUTHOR* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_AUTHOR" >> $LOG_FILE - ;; - 5) - [[ "$LINE" == "$EXPECTED_LICENSE" ]] || printf "Line %d was: '%s' | Should be: '%s'\n" "$i" "$LINE" "$EXPECTED_LICENSE" >> $LOG_FILE - ;; - 6) - [[ $LINE == $EXPECTED_SOURCE* ]] || printf "Line %d was: '%s' | Should start with: '%s'\n" "$i" "$LINE" "$EXPECTED_SOURCE" >> $LOG_FILE - ;; - 7) - [[ -z $LINE ]] || printf "Line %d was: '%s' | Should be empty\n" "$i" "$LINE" >> $LOG_FILE - ;; - esac - done - - [[ "$(sed -n '8p' "$FILE")" != 'source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"' ]] && echo 'Line 8 should be: source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"' >> "$LOG_FILE" - - for i in {9..14}; do - LINE=$(sed -n "${i}p" "$FILE") - EXPECTED=( - "color" - "verb_ip6" - "catch_errors" - "setting_up_container" - "network_check" - "update_os" - ) - [[ "$LINE" != "${EXPECTED[$((i-9))]}" ]] && echo "Line $i was $LINE | Should be: ${EXPECTED[$((i-9))]}" >> "$LOG_FILE" - done - - [[ -n "$(sed -n '15p' "$FILE")" ]] && echo "Line 15 should be empty" >> "$LOG_FILE" - [[ "$(sed -n '16p' "$FILE")" != 'msg_info "Installing Dependencies"' ]] && echo 'Line 16 should be: msg_info "Installing Dependencies"' >> "$LOG_FILE" - - LAST_3_LINES=$(tail -n 3 "$FILE") - [[ "$LAST_3_LINES" != *"$STD apt-get -y autoremove"* ]] && echo 'Third to last line should be: $STD apt-get -y autoremove' >> "$LOG_FILE" - [[ "$LAST_3_LINES" != *"$STD apt-get -y autoclean"* ]] && echo 'Second to last line should be: $STD apt-get -y clean' >> "$LOG_FILE" - [[ "$LAST_3_LINES" != *'msg_ok "Cleaned"'* ]] && echo 'Last line should be: msg_ok "Cleaned"' >> "$LOG_FILE" - cat "$LOG_FILE" - fi - - done - - - name: Post error comments - run: | - ERROR="false" - for FILE in ${{ env.SCRIPT }}; do - FILE_STRIPPED="${FILE##*/}" - LOG_FILE="result_$FILE_STRIPPED.log" - echo $LOG_FILE - if [[ ! -f $LOG_FILE ]]; then - continue - fi - ERROR_MSG=$(cat $LOG_FILE) - - if [ -n "$ERROR_MSG" ]; then - echo "Posting error message for $FILE" - echo ${ERROR_MSG} - gh pr comment ${{ github.event.pull_request.number }} \ - --repo ${{ github.repository }} \ - --body ":warning: The script _**$FILE**_ has the following formatting errors:
${ERROR_MSG}
" - - - ERROR="true" - fi - done - echo "ERROR=$ERROR" >> $GITHUB_ENV - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Fail if error - if: ${{ env.ERROR == 'true' }} - run: exit 1 diff --git a/.github/workflows/bak/validate-filenames.yml b/.github/workflows/bak/validate-filenames.yml deleted file mode 100644 index 211be06df..000000000 --- a/.github/workflows/bak/validate-filenames.yml +++ /dev/null @@ -1,158 +0,0 @@ -name: Validate filenames - -on: - pull_request_target: - paths: - - "ct/*.sh" - - "install/*.sh" - - "frontend/public/json/*.json" - -jobs: - check-files: - if: github.repository == 'community-scripts/ProxmoxVE' - name: Check changed files - runs-on: ubuntu-latest - permissions: - pull-requests: write - - steps: - - name: Get pull request information - if: github.event_name == 'pull_request_target' - uses: actions/github-script@v7 - id: pr - with: - script: | - const { data: pullRequest } = await github.rest.pulls.get({ - ...context.repo, - pull_number: context.payload.pull_request.number, - }); - return pullRequest; - - - name: Checkout code - uses: actions/checkout@v4 - with: - fetch-depth: 0 # Ensure the full history is fetched for accurate diffing - ref: ${{ github.event_name == 'pull_request_target' && fromJSON(steps.pr.outputs.result).merge_commit_sha || '' }} - - - name: Get changed files - id: changed-files - run: | - if ${{ github.event_name == 'pull_request_target' }}; then - echo "files=$(git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ steps.pr.outputs.result && fromJSON(steps.pr.outputs.result).merge_commit_sha }} | xargs)" >> $GITHUB_OUTPUT - else - echo "files=$(git diff --name-only ${{ github.event.before }} ${{ github.event.after }} | xargs)" >> $GITHUB_OUTPUT - fi - - - name: "Validate filenames in ct and install directory" - if: always() && steps.changed-files.outputs.files != '' - id: check-scripts - run: | - CHANGED_FILES=$(printf "%s\n" ${{ steps.changed-files.outputs.files }} | { grep -E '^(ct|install)/.*\.sh$' || true; }) - - NON_COMPLIANT_FILES="" - for FILE in $CHANGED_FILES; do - BASENAME=$(echo "$(basename "${FILE%.*}")") - if [[ ! "$BASENAME" =~ ^[a-z0-9-]+$ ]]; then - NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE" - fi - done - - if [ -n "$NON_COMPLIANT_FILES" ]; then - echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT - echo "Non-compliant filenames found, change to lowercase:" - for FILE in $NON_COMPLIANT_FILES; do - echo "$FILE" - done - exit 1 - fi - - - name: "Validate filenames in json directory." - if: always() && steps.changed-files.outputs.files != '' - id: check-json - run: | - CHANGED_FILES=$(printf "%s\n" ${{ steps.changed-files.outputs.files }} | { grep -E '^json/.*\.json$' || true; }) - - NON_COMPLIANT_FILES="" - for FILE in $CHANGED_FILES; do - BASENAME=$(echo "$(basename "${FILE%.*}")") - if [[ ! "$BASENAME" =~ ^[a-z0-9-]+$ ]]; then - NON_COMPLIANT_FILES="$NON_COMPLIANT_FILES $FILE" - fi - done - - if [ -n "$NON_COMPLIANT_FILES" ]; then - echo "files=$NON_COMPLIANT_FILES" >> $GITHUB_OUTPUT - echo "Non-compliant filenames found, change to lowercase:" - for FILE in $NON_COMPLIANT_FILES; do - echo "$FILE" - done - exit 1 - fi - - - name: Post results and comment - if: always() && steps.check-scripts.outputs.files != '' && steps.check-json.outputs.files != '' && github.event_name == 'pull_request_target' - uses: actions/github-script@v7 - with: - script: | - const result = "${{ job.status }}" === "success" ? "success" : "failure"; - const nonCompliantFiles = { - script: "${{ steps.check-scripts.outputs.files }}", - JSON: "${{ steps.check-json.outputs.files }}", - }; - - const issueNumber = context.payload.pull_request - ? context.payload.pull_request.number - : null; - const commentIdentifier = "validate-filenames"; - let newCommentBody = `\n### Filename validation\n\n`; - - if (result === "failure") { - newCommentBody += ":x: We found issues in the following changed files:\n\n"; - for (const [check, files] of Object.entries(nonCompliantFiles)) { - if (files) { - newCommentBody += `**${check.charAt(0).toUpperCase() + check.slice(1)} filename invalid:**\n${files - .trim() - .split(" ") - .map((file) => `- ${file}`) - .join("\n")}\n\n`; - } - } - newCommentBody += - "Please change the filenames to lowercase and use only alphanumeric characters and dashes.\n"; - } else { - newCommentBody += `:rocket: All files passed filename validation!\n`; - } - - newCommentBody += `\n\n`; - - if (issueNumber) { - const { data: comments } = await github.rest.issues.listComments({ - ...context.repo, - issue_number: issueNumber, - }); - - const existingComment = comments.find( - (comment) => comment.user.login === "github-actions[bot]", - ); - - if (existingComment) { - if (existingComment.body.includes(commentIdentifier)) { - const re = new RegExp(String.raw`[\s\S]*?`, ""); - newCommentBody = existingComment.body.replace(re, newCommentBody); - } else { - newCommentBody = existingComment.body + '\n\n---\n\n' + newCommentBody; - } - - await github.rest.issues.updateComment({ - ...context.repo, - comment_id: existingComment.id, - body: newCommentBody, - }); - } else { - await github.rest.issues.createComment({ - ...context.repo, - issue_number: issueNumber, - body: newCommentBody, - }); - } - } diff --git a/.github/workflows/close-discussion.yml b/.github/workflows/close-discussion.yml deleted file mode 100644 index 21b3d7c8b..000000000 --- a/.github/workflows/close-discussion.yml +++ /dev/null @@ -1,164 +0,0 @@ -name: Close Discussion on PR Merge - -on: - push: - branches: - - main - -permissions: - contents: read - discussions: write - -jobs: - close-discussion: - if: github.repository == 'community-scripts/ProxmoxVE' - runs-on: ubuntu-latest - - steps: - - name: Checkout Repository - uses: actions/checkout@v4 - - - name: Set Up Node.js - uses: actions/setup-node@v4 - with: - node-version: "20" - - - name: Install Dependencies - run: npm install zx @octokit/graphql - - - name: Close Discussion - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GITHUB_SHA: ${{ github.sha }} - GITHUB_REPOSITORY: ${{ github.repository }} - run: | - npx zx << 'EOF' - import { graphql } from "@octokit/graphql"; - - (async function () { - try { - const token = process.env.GITHUB_TOKEN; - const commitSha = process.env.GITHUB_SHA; - const [owner, repo] = process.env.GITHUB_REPOSITORY.split("/"); - - if (!token || !commitSha || !owner || !repo) { - console.log("Missing required environment variables."); - process.exit(1); - } - - const graphqlWithAuth = graphql.defaults({ - headers: { authorization: `Bearer ${token}` }, - }); - - // Find PR from commit SHA - const searchQuery = ` - query($owner: String!, $repo: String!, $sha: GitObjectID!) { - repository(owner: $owner, name: $repo) { - object(oid: $sha) { - ... on Commit { - associatedPullRequests(first: 1) { - nodes { - number - body - } - } - } - } - } - } - `; - - const prResult = await graphqlWithAuth(searchQuery, { - owner, - repo, - sha: commitSha, - }); - - const pr = prResult.repository.object.associatedPullRequests.nodes[0]; - if (!pr) { - console.log("No PR found for this commit."); - return; - } - - const prNumber = pr.number; - const prBody = pr.body; - - const match = prBody.match(/#(\d+)/); - if (!match) { - console.log("No discussion ID found in PR body."); - return; - } - - const discussionNumber = match[1]; - console.log(`Extracted Discussion Number: ${discussionNumber}`); - - // Fetch GraphQL discussion ID - const discussionQuery = ` - query($owner: String!, $repo: String!, $number: Int!) { - repository(owner: $owner, name: $repo) { - discussion(number: $number) { - id - } - } - } - `; - - let discussionQLId; - try { - const discussionResponse = await graphqlWithAuth(discussionQuery, { - owner, - repo, - number: parseInt(discussionNumber, 10), - }); - - discussionQLId = discussionResponse.repository.discussion.id; - if (!discussionQLId) { - console.log("Failed to fetch discussion GraphQL ID."); - return; - } - } catch (error) { - console.error("Discussion not found or error occurred while fetching discussion:", error); - return; - } - - // Post comment - const commentMutation = ` - mutation($discussionId: ID!, $body: String!) { - addDiscussionComment(input: { discussionId: $discussionId, body: $body }) { - comment { id body } - } - } - `; - - const commentResponse = await graphqlWithAuth(commentMutation, { - discussionId: discussionQLId, - body: `Merged with PR #${prNumber}`, - }); - - const commentId = commentResponse.addDiscussionComment.comment.id; - if (!commentId) { - console.log("Failed to post the comment."); - return; - } - - console.log(`Comment Posted Successfully! Comment ID: ${commentId}`); - - // Mark comment as answer - const markAnswerMutation = ` - mutation($id: ID!) { - markDiscussionCommentAsAnswer(input: { id: $id }) { - discussion { id title } - } - } - `; - - await graphqlWithAuth(markAnswerMutation, { id: commentId }); - - console.log("Comment marked as answer successfully!"); - - } catch (error) { - console.error("Error:", error); - process.exit(1); - } - })(); - EOF diff --git a/.github/workflows/close-tteck-issues.yaml b/.github/workflows/close-tteck-issues.yaml index b359414fc..2d564c1ce 100644 --- a/.github/workflows/close-tteck-issues.yaml +++ b/.github/workflows/close-tteck-issues.yaml @@ -21,7 +21,7 @@ jobs: const message = `Hello, it looks like you are referencing the **old tteck repo**. This repository is no longer used for active scripts. - **Please update your bookmarks** and use: [https://helper-scripts.com](https://helper-scripts.com) + **Please update your bookmarks** and use: [https://community-scripts.com](https://community-scripts.com) Also make sure your Bash command starts with: \`\`\`bash diff --git a/.github/workflows/close_issue_in_dev.yaml b/.github/workflows/close_issue_in_dev.yaml index 688f81f27..d61ca0e0a 100644 --- a/.github/workflows/close_issue_in_dev.yaml +++ b/.github/workflows/close_issue_in_dev.yaml @@ -62,10 +62,10 @@ jobs: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | issues=$(gh issue list --repo community-scripts/ProxmoxVED --json number,title --jq '.[] | {number, title}') - + best_match_score=0 best_match_number=0 - + for issue in $(echo "$issues" | jq -r '. | @base64'); do _jq() { echo ${issue} | base64 --decode | jq -r ${1} @@ -113,7 +113,8 @@ jobs: const http = require('http'); const url = require('url'); - function request(fullUrl, opts) { + function request(fullUrl, opts, redirectsLeft) { + if (redirectsLeft === undefined) redirectsLeft = 5; return new Promise(function(resolve, reject) { const u = url.parse(fullUrl); const isHttps = u.protocol === 'https:'; @@ -128,6 +129,19 @@ jobs: if (body) options.headers['Content-Length'] = Buffer.byteLength(body); const lib = isHttps ? https : http; const req = lib.request(options, function(res) { + // Follow redirects (301/302/307/308) + if ([301, 302, 307, 308].indexOf(res.statusCode) !== -1 && res.headers.location && redirectsLeft > 0) { + res.resume(); + const nextUrl = url.resolve(fullUrl, res.headers.location); + // For 301/302, browsers historically downgrade to GET; preserve method for 307/308. + const nextOpts = Object.assign({}, opts); + if (res.statusCode === 301 || res.statusCode === 302) { + nextOpts.method = 'GET'; + delete nextOpts.body; + } + resolve(request(nextUrl, nextOpts, redirectsLeft - 1)); + return; + } let data = ''; res.on('data', function(chunk) { data += chunk; }); res.on('end', function() { diff --git a/.github/workflows/create-docker-for-runner.yml b/.github/workflows/create-docker-for-runner.yml deleted file mode 100644 index eee54c9e3..000000000 --- a/.github/workflows/create-docker-for-runner.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: Build and Publish Docker Image - -on: - push: - branches: - - main - paths: - - '.github/runner/docker/**' - schedule: - - cron: '0 0 * * *' - -jobs: - build: - if: github.repository == 'community-scripts/ProxmoxVE' - runs-on: ubuntu-latest #To ensure it always builds we use the github runner with all the right tooling - - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Log in to GHCR - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build Docker image - run: | - repo_name=${{ github.repository }} # Get repository name - repo_name_lower=$(echo $repo_name | tr '[:upper:]' '[:lower:]') # Convert to lowercase - docker build -t ghcr.io/$repo_name_lower/gh-runner-self:latest -f .github/runner/docker/gh-runner-self.dockerfile . - - - name: Push Docker image to GHCR - run: | - repo_name=${{ github.repository }} # Get repository name - repo_name_lower=$(echo $repo_name | tr '[:upper:]' '[:lower:]') # Convert to lowercase - docker push ghcr.io/$repo_name_lower/gh-runner-self:latest diff --git a/.github/workflows/delete-json-branch.yml b/.github/workflows/delete-json-branch.yml deleted file mode 100644 index dfd097b01..000000000 --- a/.github/workflows/delete-json-branch.yml +++ /dev/null @@ -1,29 +0,0 @@ - -name: Delete JSON date PR Branch - -on: - pull_request: - types: [closed] - branches: - - main - -jobs: - delete_branch: - if: github.repository == 'community-scripts/ProxmoxVE' - runs-on: ubuntu-latest - steps: - - name: Checkout the code - uses: actions/checkout@v3 - - - name: Delete PR Update Branch - if: github.event.pull_request.merged == true && startsWith(github.event.pull_request.head.ref, 'pr-update-json-') - run: | - PR_BRANCH="${{ github.event.pull_request.head.ref }}" - echo "Deleting branch $PR_BRANCH..." - - # Avoid deleting the default branch (e.g., main) - if [[ "$PR_BRANCH" != "main" ]]; then - git push origin --delete "$PR_BRANCH" - else - echo "Skipping deletion of the main branch" - fi diff --git a/.github/workflows/delete-pocketbase-entry-on-removal.yml b/.github/workflows/delete-pocketbase-entry-on-removal.yml index 473568efc..5e6fcee61 100644 --- a/.github/workflows/delete-pocketbase-entry-on-removal.yml +++ b/.github/workflows/delete-pocketbase-entry-on-removal.yml @@ -1,11 +1,11 @@ -name: Delete PocketBase entry on script/JSON removal +name: Set state to is_deleted in pocketbase on: push: branches: - main paths: - - "frontend/public/json/**" + - "json/**" - "vm/**" - "tools/**" - "turnkey/**" @@ -29,7 +29,7 @@ jobs: slugs="" # Deleted JSON files: get slug from previous commit - deleted_json=$(git diff --name-only --diff-filter=D "$BEFORE" "$AFTER" -- frontend/public/json/ | grep '\.json$' || true) + deleted_json=$(git diff --name-only --diff-filter=D "$BEFORE" "$AFTER" -- json/ | grep '\.json$' || true) for f in $deleted_json; do [[ -z "$f" ]] && continue s=$(git show "$BEFORE:$f" 2>/dev/null | jq -r '.slug // empty' 2>/dev/null || true) @@ -52,15 +52,15 @@ jobs: slugs=$(echo $slugs | xargs -n1 | sort -u | tr '\n' ' ') if [[ -z "$slugs" ]]; then - echo "No deleted JSON or script files to remove from PocketBase." + echo "No deleted JSON or script files to mark as deleted in PocketBase." echo "count=0" >> "$GITHUB_OUTPUT" exit 0 fi echo "$slugs" > slugs_to_delete.txt echo "count=$(echo $slugs | wc -w)" >> "$GITHUB_OUTPUT" - echo "Slugs to delete: $slugs" + echo "Slugs to mark as deleted: $slugs" - - name: Delete from PocketBase + - name: Mark as deleted in PocketBase if: steps.slugs.outputs.count != '0' env: POCKETBASE_URL: ${{ secrets.POCKETBASE_URL }} @@ -75,7 +75,8 @@ jobs: const http = require('http'); const url = require('url'); - function request(fullUrl, opts) { + function request(fullUrl, opts, redirectCount) { + redirectCount = redirectCount || 0; return new Promise(function(resolve, reject) { const u = url.parse(fullUrl); const isHttps = u.protocol === 'https:'; @@ -90,6 +91,13 @@ jobs: if (body) options.headers['Content-Length'] = Buffer.byteLength(body); const lib = isHttps ? https : http; const req = lib.request(options, function(res) { + if (res.statusCode >= 300 && res.statusCode < 400 && res.headers.location) { + if (redirectCount >= 5) return reject(new Error('Too many redirects from ' + fullUrl)); + const redirectUrl = url.resolve(fullUrl, res.headers.location); + res.resume(); + resolve(request(redirectUrl, opts, redirectCount + 1)); + return; + } let data = ''; res.on('data', function(chunk) { data += chunk; }); res.on('end', function() { @@ -123,6 +131,8 @@ jobs: const token = JSON.parse(authRes.body).token; const recordsUrl = apiBase + '/collections/' + encodeURIComponent(coll) + '/records'; + const patchBody = JSON.stringify({ is_deleted: true }); + for (const slug of slugs) { const filter = "(slug='" + slug + "')"; const listRes = await request(recordsUrl + '?filter=' + encodeURIComponent(filter) + '&perPage=1', { @@ -134,14 +144,15 @@ jobs: console.log('No PocketBase record for slug "' + slug + '", skipping.'); continue; } - const delRes = await request(recordsUrl + '/' + existingId, { - method: 'DELETE', - headers: { 'Authorization': token } + const patchRes = await request(recordsUrl + '/' + existingId, { + method: 'PATCH', + headers: { 'Authorization': token, 'Content-Type': 'application/json' }, + body: patchBody }); - if (delRes.ok) { - console.log('Deleted PocketBase record for slug "' + slug + '" (id=' + existingId + ').'); + if (patchRes.ok) { + console.log('Set is_deleted=true for slug "' + slug + '" (id=' + existingId + ').'); } else { - console.warn('DELETE failed for slug "' + slug + '": ' + delRes.statusCode + ' ' + delRes.body); + console.warn('PATCH failed for slug "' + slug + '": ' + patchRes.statusCode + ' ' + patchRes.body); } } console.log('Done.'); diff --git a/.github/workflows/frontend-cicd.yml b/.github/workflows/frontend-cicd.yml deleted file mode 100644 index 243625206..000000000 --- a/.github/workflows/frontend-cicd.yml +++ /dev/null @@ -1,147 +0,0 @@ -# Based on https://github.com/actions/starter-workflows/blob/main/pages/nextjs.yml - -name: Frontend CI/CD - -on: - push: - branches: ["main"] - paths: - - frontend/** - - pull_request: - branches: ["main"] - types: [opened, synchronize, reopened, edited] - paths: - - frontend/** - - workflow_dispatch: - -permissions: - contents: read - -concurrency: - group: pages-${{ github.ref }} - cancel-in-progress: false - -jobs: - test-json-files: - runs-on: ubuntu-latest - defaults: - run: - working-directory: frontend - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: "3.x" - - - name: Test JSON files - run: | - python3 << 'EOF' - import json - import glob - import os - import sys - - def test_json_files(): - # Change to the correct directory - json_dir = "public/json" - if not os.path.exists(json_dir): - print(f"❌ Directory not found: {json_dir}") - return False - - # Find all JSON files - pattern = os.path.join(json_dir, "*.json") - json_files = glob.glob(pattern) - - if not json_files: - print(f"⚠️ No JSON files found in {json_dir}") - return True - - print(f"Testing {len(json_files)} JSON files for valid syntax...") - - invalid_files = [] - - for file_path in json_files: - try: - with open(file_path, 'r', encoding='utf-8') as f: - json.load(f) - print(f"✅ Valid JSON: {file_path}") - except json.JSONDecodeError as e: - print(f"❌ Invalid JSON syntax in: {file_path}") - print(f" Error: {e}") - invalid_files.append(file_path) - except Exception as e: - print(f"⚠️ Error reading: {file_path}") - print(f" Error: {e}") - invalid_files.append(file_path) - - print("\n=== JSON Validation Summary ===") - print(f"Total files tested: {len(json_files)}") - print(f"Valid files: {len(json_files) - len(invalid_files)}") - print(f"Invalid files: {len(invalid_files)}") - - if invalid_files: - print("\n❌ Found invalid JSON file(s):") - for file_path in invalid_files: - print(f" - {file_path}") - return False - else: - print("\n✅ All JSON files have valid syntax!") - return True - - if __name__ == "__main__": - success = test_json_files() - sys.exit(0 if success else 1) - EOF - - build: - if: github.repository == 'community-scripts/ProxmoxVE' - needs: test-json-files - runs-on: ubuntu-latest - defaults: - run: - working-directory: frontend - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: latest - - - name: Install dependencies - run: bun install --frozen-lockfile - - - name: Configure Next.js for pages - uses: actions/configure-pages@v5 - with: - static_site_generator: next - - - name: Build with Next.js - run: bun run build - - - name: Upload artifact - if: github.ref == 'refs/heads/main' - uses: actions/upload-pages-artifact@v3 - with: - path: frontend/out - - deploy: - runs-on: ubuntu-latest - needs: build - if: github.ref == 'refs/heads/main' && github.repository == 'community-scripts/ProxmoxVE' - permissions: - pages: write - id-token: write - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - steps: - - name: Deploy to GitHub Pages - id: deployment - uses: actions/deploy-pages@v4 diff --git a/.github/workflows/pocketbase-bot.yml b/.github/workflows/pocketbase-bot.yml new file mode 100644 index 000000000..3c649b973 --- /dev/null +++ b/.github/workflows/pocketbase-bot.yml @@ -0,0 +1,981 @@ +name: PocketBase Bot + +on: + issue_comment: + types: [created] + +permissions: + issues: write + pull-requests: write + contents: write + +jobs: + pocketbase-bot: + runs-on: self-hosted + + # Only act on /pocketbase commands + if: startsWith(github.event.comment.body, '/pocketbase') + + steps: + - name: Execute PocketBase bot command + env: + POCKETBASE_URL: ${{ secrets.POCKETBASE_URL }} + POCKETBASE_COLLECTION: ${{ secrets.POCKETBASE_COLLECTION }} + POCKETBASE_ADMIN_EMAIL: ${{ secrets.POCKETBASE_ADMIN_EMAIL }} + POCKETBASE_ADMIN_PASSWORD: ${{ secrets.POCKETBASE_ADMIN_PASSWORD }} + COMMENT_BODY: ${{ github.event.comment.body }} + COMMENT_ID: ${{ github.event.comment.id }} + ISSUE_NUMBER: ${{ github.event.issue.number }} + REPO_OWNER: ${{ github.repository_owner }} + REPO_NAME: ${{ github.event.repository.name }} + ACTOR: ${{ github.event.comment.user.login }} + ACTOR_ASSOCIATION: ${{ github.event.comment.author_association }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + FRONTEND_URL: ${{ secrets.FRONTEND_URL }} + REVALIDATE_SECRET: ${{ secrets.REVALIDATE_SECRET }} + run: | + node << 'ENDSCRIPT' + (async function () { + const https = require('https'); + const http = require('http'); + const url = require('url'); + + // ── HTTP helper with redirect following ──────────────────────────── + function request(fullUrl, opts, redirectCount) { + redirectCount = redirectCount || 0; + return new Promise(function (resolve, reject) { + const u = url.parse(fullUrl); + const isHttps = u.protocol === 'https:'; + const body = opts.body; + const options = { + hostname: u.hostname, + port: u.port || (isHttps ? 443 : 80), + path: u.path, + method: opts.method || 'GET', + headers: opts.headers || {} + }; + if (body) options.headers['Content-Length'] = Buffer.byteLength(body); + const lib = isHttps ? https : http; + const req = lib.request(options, function (res) { + if (res.statusCode >= 300 && res.statusCode < 400 && res.headers.location) { + if (redirectCount >= 5) return reject(new Error('Too many redirects from ' + fullUrl)); + const redirectUrl = url.resolve(fullUrl, res.headers.location); + res.resume(); + resolve(request(redirectUrl, opts, redirectCount + 1)); + return; + } + let data = ''; + res.on('data', function (chunk) { data += chunk; }); + res.on('end', function () { + resolve({ ok: res.statusCode >= 200 && res.statusCode < 300, statusCode: res.statusCode, body: data }); + }); + }); + req.on('error', reject); + if (body) req.write(body); + req.end(); + }); + } + + // ── GitHub API helpers ───────────────────────────────────────────── + const owner = process.env.REPO_OWNER; + const repo = process.env.REPO_NAME; + const issueNumber = parseInt(process.env.ISSUE_NUMBER, 10); + const commentId = parseInt(process.env.COMMENT_ID, 10); + const actor = process.env.ACTOR; + + function ghRequest(path, method, body) { + const headers = { + 'Authorization': 'Bearer ' + process.env.GITHUB_TOKEN, + 'Accept': 'application/vnd.github+json', + 'X-GitHub-Api-Version': '2022-11-28', + 'User-Agent': 'PocketBase-Bot' + }; + const bodyStr = body ? JSON.stringify(body) : undefined; + if (bodyStr) headers['Content-Type'] = 'application/json'; + return request('https://api.github.com' + path, { method: method || 'GET', headers, body: bodyStr }); + } + + function encodeContentPath(filePath) { + return filePath.split('/').map(encodeURIComponent).join('/'); + } + + function decodeGitHubContent(content) { + return Buffer.from((content || '').replace(/\n/g, ''), 'base64').toString('utf8'); + } + + function sanitizeBranchPart(value) { + return (value || '') + .toLowerCase() + .replace(/[^a-z0-9._/-]+/g, '-') + .replace(/\/+/g, '/') + .replace(/^-+|-+$/g, ''); + } + + function applyCtDefaultChanges(scriptText, varChanges) { + let nextText = scriptText; + const updatedVars = []; + const unchangedVars = []; + for (const [varName, rawValue] of Object.entries(varChanges)) { + const newValue = String(rawValue); + const pattern = new RegExp('(^\\s*' + varName + '="\\$\\{' + varName + ':-)([^"}]*)(\\}"\\s*$)', 'm'); + const match = nextText.match(pattern); + if (!match) continue; + if (match[2] === newValue) { + unchangedVars.push(varName); + continue; + } + nextText = nextText.replace(pattern, '$1' + newValue + '$3'); + updatedVars.push(varName); + } + return { nextText, updatedVars, unchangedVars }; + } + + async function ensureBranch(defaultBranch, branchName) { + const branchRefRes = await ghRequest('/repos/' + owner + '/' + repo + '/git/ref/heads/' + encodeURIComponent(branchName)); + if (branchRefRes.ok) return; + + const defaultRefRes = await ghRequest('/repos/' + owner + '/' + repo + '/git/ref/heads/' + encodeURIComponent(defaultBranch)); + if (!defaultRefRes.ok) { + throw new Error('Could not read default branch ref: ' + defaultRefRes.body); + } + const defaultRef = JSON.parse(defaultRefRes.body); + const createBranchRes = await ghRequest('/repos/' + owner + '/' + repo + '/git/refs', 'POST', { + ref: 'refs/heads/' + branchName, + sha: defaultRef.object.sha + }); + if (!createBranchRes.ok) { + throw new Error('Could not create branch: ' + createBranchRes.body); + } + } + + async function upsertCtDefaultsPr(slugValue, varChanges) { + const wantedEntries = Object.entries(varChanges || {}).filter(function ([, v]) { + return v !== undefined && v !== null && String(v) !== ''; + }); + if (wantedEntries.length === 0) { + return { status: 'skipped', reason: 'No mapped CT defaults changed.' }; + } + + const repoRes = await ghRequest('/repos/' + owner + '/' + repo); + if (!repoRes.ok) { + throw new Error('Could not read repository metadata: ' + repoRes.body); + } + const repoInfo = JSON.parse(repoRes.body); + const defaultBranch = repoInfo.default_branch; + + const ctPath = 'ct/' + slugValue + '.sh'; + const encodedCtPath = encodeContentPath(ctPath); + const defaultFileRes = await ghRequest('/repos/' + owner + '/' + repo + '/contents/' + encodedCtPath + '?ref=' + encodeURIComponent(defaultBranch)); + if (defaultFileRes.statusCode === 404) { + return { status: 'skipped', reason: 'No matching CT file found at `' + ctPath + '`.' }; + } + if (!defaultFileRes.ok) { + throw new Error('Could not read CT file from default branch: ' + defaultFileRes.body); + } + + const branchName = 'pocketbase-sync/' + sanitizeBranchPart(slugValue || 'unknown'); + await ensureBranch(defaultBranch, branchName); + + const branchFileRes = await ghRequest('/repos/' + owner + '/' + repo + '/contents/' + encodedCtPath + '?ref=' + encodeURIComponent(branchName)); + if (!branchFileRes.ok) { + throw new Error('Could not read CT file from sync branch: ' + branchFileRes.body); + } + const branchFile = JSON.parse(branchFileRes.body); + const currentBranchText = decodeGitHubContent(branchFile.content); + + const updateResult = applyCtDefaultChanges(currentBranchText, Object.fromEntries(wantedEntries)); + if (updateResult.updatedVars.length === 0) { + return { status: 'skipped', reason: 'CT defaults already up to date.', unchangedVars: updateResult.unchangedVars }; + } + + const commitMessage = 'chore(ct): sync ' + slugValue + ' defaults from PocketBase'; + const putRes = await ghRequest('/repos/' + owner + '/' + repo + '/contents/' + encodedCtPath, 'PUT', { + message: commitMessage, + content: Buffer.from(updateResult.nextText, 'utf8').toString('base64'), + sha: branchFile.sha, + branch: branchName + }); + if (!putRes.ok) { + throw new Error('Could not update CT file: ' + putRes.body); + } + + const openPrRes = await ghRequest( + '/repos/' + owner + '/' + repo + '/pulls?state=open&head=' + encodeURIComponent(owner + ':' + branchName) + '&base=' + encodeURIComponent(defaultBranch) + ); + if (!openPrRes.ok) { + throw new Error('Could not query existing PRs: ' + openPrRes.body); + } + const openPrs = JSON.parse(openPrRes.body); + if (openPrs.length > 0) { + return { status: 'updated', prUrl: openPrs[0].html_url, updatedVars: updateResult.updatedVars }; + } + + const prTitle = 'chore(ct): sync ' + slugValue + ' defaults with PocketBase'; + const prBody = + '## Summary\n' + + '- Sync default CT variables for `' + slugValue + '` after `/pocketbase` update.\n' + + '- Updated vars: `' + updateResult.updatedVars.join('`, `') + '`.\n\n' + + '## Source\n' + + '- Triggered by @' + actor + ' via PocketBase bot.\n'; + const createPrRes = await ghRequest('/repos/' + owner + '/' + repo + '/pulls', 'POST', { + title: prTitle, + body: prBody, + head: branchName, + base: defaultBranch + }); + if (!createPrRes.ok) { + throw new Error('Could not create PR: ' + createPrRes.body); + } + const pr = JSON.parse(createPrRes.body); + return { status: 'created', prUrl: pr.html_url, updatedVars: updateResult.updatedVars }; + } + + function formatCtSyncResult(syncResult) { + if (!syncResult) return ''; + if (syncResult.status === 'created') return '\n\n**CT sync PR:** ' + syncResult.prUrl; + if (syncResult.status === 'updated') return '\n\n**CT sync PR updated:** ' + syncResult.prUrl; + if (syncResult.status === 'skipped') return '\n\n**CT sync skipped:** ' + syncResult.reason; + return ''; + } + + async function addReaction(content) { + try { + await ghRequest( + '/repos/' + owner + '/' + repo + '/issues/comments/' + commentId + '/reactions', + 'POST', { content } + ); + } catch (e) { + console.warn('Could not add reaction:', e.message); + } + } + + async function postComment(text) { + const res = await ghRequest( + '/repos/' + owner + '/' + repo + '/issues/' + issueNumber + '/comments', + 'POST', { body: text } + ); + if (!res.ok) console.warn('Could not post comment:', res.body); + } + + // ── Permission check ─────────────────────────────────────────────── + const association = process.env.ACTOR_ASSOCIATION; + if (association !== 'OWNER' && association !== 'MEMBER') { + await addReaction('-1'); + await postComment( + '❌ **PocketBase Bot**: @' + actor + ' is not authorized to use this command.\n' + + 'Only org members (Contributors team) can use `/pocketbase`.' + ); + process.exit(0); + } + + // ── Acknowledge ──────────────────────────────────────────────────── + await addReaction('eyes'); + + // ── Parse command ────────────────────────────────────────────────── + const commentBody = process.env.COMMENT_BODY || ''; + const lines = commentBody.trim().split('\n'); + const firstLine = lines[0].trim(); + const withoutCmd = firstLine.replace(/^\/pocketbase\s+/, '').trim(); + + function extractCodeBlock(body) { + const m = body.match(/```[^\n]*\n([\s\S]*?)```/); + return m ? m[1].trim() : null; + } + const codeBlockValue = extractCodeBlock(commentBody); + + const HELP_TEXT = + '**Show current state:**\n' + + '```\n/pocketbase info\n```\n\n' + + '**Field update (simple):** `/pocketbase field=value [field=value ...]`\n\n' + + '**Field update (HTML/multiline) — value from code block:**\n' + + '````\n' + + '/pocketbase set description\n' + + '```html\n' + + '

Your HTML or multi-line content here

\n' + + '```\n' + + '````\n\n' + + '**Note management:**\n' + + '```\n' + + '/pocketbase note list\n' + + '/pocketbase note add ""\n' + + '/pocketbase note edit "" ""\n' + + '/pocketbase note remove ""\n' + + '```\n\n' + + '**Install method management:**\n' + + '```\n' + + '/pocketbase method list\n' + + '/pocketbase method cpu=4 ram=2048 hdd=20\n' + + '/pocketbase method config_path="/opt/app/.env"\n' + + '/pocketbase method os=debian version=13\n' + + '/pocketbase method add cpu=2 ram=2048 hdd=8 os=debian version=13\n' + + '/pocketbase method remove \n' + + '```\n' + + 'Method fields: `cpu` `ram` `hdd` `os` `version` `config_path` `script`\n\n' + + '**Editable fields:** `name` `description` `logo` `documentation` `website` `project_url` `github` ' + + '`config_path` `port` `default_user` `default_passwd` ' + + '`updateable` `privileged` `has_arm` `is_dev` ' + + '`is_disabled` `disable_message` `is_deleted` `deleted_message`'; + + if (!withoutCmd) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: No slug or command specified.\n\n' + HELP_TEXT); + process.exit(0); + } + + const spaceIdx = withoutCmd.indexOf(' '); + const slug = (spaceIdx === -1 ? withoutCmd : withoutCmd.substring(0, spaceIdx)).trim(); + const rest = spaceIdx === -1 ? '' : withoutCmd.substring(spaceIdx + 1).trim(); + + if (!rest) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: No command specified for slug `' + slug + '`.\n\n' + HELP_TEXT); + process.exit(0); + } + + // ── PocketBase: authenticate ─────────────────────────────────────── + const raw = process.env.POCKETBASE_URL.replace(/\/$/, ''); + const apiBase = /\/api$/i.test(raw) ? raw : raw + '/api'; + const coll = process.env.POCKETBASE_COLLECTION; + + const authRes = await request(apiBase + '/collections/users/auth-with-password', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + identity: process.env.POCKETBASE_ADMIN_EMAIL, + password: process.env.POCKETBASE_ADMIN_PASSWORD + }) + }); + if (!authRes.ok) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: PocketBase authentication failed. CC @' + owner + '/maintainers'); + process.exit(1); + } + const token = JSON.parse(authRes.body).token; + + // ── PocketBase: find record by slug ──────────────────────────────── + const recordsUrl = apiBase + '/collections/' + encodeURIComponent(coll) + '/records'; + const filter = "(slug='" + slug.replace(/'/g, "''") + "')"; + const listRes = await request(recordsUrl + '?filter=' + encodeURIComponent(filter) + '&perPage=1', { + headers: { 'Authorization': token } + }); + const list = JSON.parse(listRes.body); + const record = list.items && list.items[0]; + + if (!record) { + await addReaction('-1'); + await postComment( + '❌ **PocketBase Bot**: No record found for slug `' + slug + '`.\n\n' + + 'Make sure the script was already pushed to PocketBase (JSON must exist and have been synced).' + ); + process.exit(0); + } + + // ── Shared helpers ───────────────────────────────────────────────── + + // Key=value parser: handles unquoted and "quoted" values + function parseKVPairs(str) { + const fields = {}; + let pos = 0; + while (pos < str.length) { + while (pos < str.length && /\s/.test(str[pos])) pos++; + if (pos >= str.length) break; + let keyStart = pos; + while (pos < str.length && str[pos] !== '=' && !/\s/.test(str[pos])) pos++; + const key = str.substring(keyStart, pos).trim(); + if (!key || pos >= str.length || str[pos] !== '=') { pos++; continue; } + pos++; + let value; + if (pos < str.length && str[pos] === '"') { + pos++; + let valStart = pos; + while (pos < str.length && str[pos] !== '"') { + if (str[pos] === '\\') pos++; + pos++; + } + value = str.substring(valStart, pos).replace(/\\"/g, '"'); + if (pos < str.length) pos++; + } else { + let valStart = pos; + while (pos < str.length && !/\s/.test(str[pos])) pos++; + value = str.substring(valStart, pos); + } + fields[key] = value; + } + return fields; + } + + // Token parser for note commands: unquoted-word OR "quoted string" + function parseTokens(str) { + const tokens = []; + let pos = 0; + while (pos < str.length) { + while (pos < str.length && /\s/.test(str[pos])) pos++; + if (pos >= str.length) break; + if (str[pos] === '"') { + pos++; + let start = pos; + while (pos < str.length && str[pos] !== '"') { + if (str[pos] === '\\') pos++; + pos++; + } + tokens.push(str.substring(start, pos).replace(/\\"/g, '"')); + if (pos < str.length) pos++; + } else { + let start = pos; + while (pos < str.length && !/\s/.test(str[pos])) pos++; + tokens.push(str.substring(start, pos)); + } + } + return tokens; + } + + // Read JSON blob from record (handles parsed objects and strings) + function readJsonBlob(val) { + if (Array.isArray(val)) return val; + try { return JSON.parse(val || '[]'); } catch (e) { return []; } + } + + // Frontend cache revalidation (silent, best-effort) + async function revalidate(s) { + const frontendUrl = process.env.FRONTEND_URL; + const secret = process.env.REVALIDATE_SECRET; + if (!frontendUrl || !secret) return; + try { + await request(frontendUrl.replace(/\/$/, '') + '/api/revalidate', { + method: 'POST', + headers: { 'Authorization': 'Bearer ' + secret, 'Content-Type': 'application/json' }, + body: JSON.stringify({ tags: ['scripts', 'script-' + s] }) + }); + } catch (e) { console.warn('Revalidation skipped:', e.message); } + } + + // Format notes list for display + function formatNotesList(arr) { + if (arr.length === 0) return '*None*'; + return arr.map(function (n, i) { + return (i + 1) + '. **`' + (n.type || '?') + '`**: ' + (n.text || ''); + }).join('\n'); + } + + // Format install methods list for display + function formatMethodsList(arr) { + if (arr.length === 0) return '*None*'; + return arr.map(function (im, i) { + const r = im.resources || {}; + const parts = [ + (r.os || '?') + ' ' + (r.version || '?'), + (r.cpu != null ? r.cpu : '?') + 'C / ' + (r.ram != null ? r.ram : '?') + ' MB / ' + (r.hdd != null ? r.hdd : '?') + ' GB' + ]; + if (im.config_path) parts.push('config: `' + im.config_path + '`'); + if (im.script) parts.push('script: `' + im.script + '`'); + return (i + 1) + '. **`' + (im.type || '?') + '`** — ' + parts.join(', '); + }).join('\n'); + } + + // ── Route: dispatch to subcommand handler ────────────────────────── + const infoMatch = rest.match(/^info$/i); + const noteMatch = rest.match(/^note\s+(list|add|edit|remove)\b/i); + const methodMatch = rest.match(/^method\b/i); + const setMatch = rest.match(/^set\s+(\S+)/i); + + if (infoMatch) { + // ── INFO SUBCOMMAND ────────────────────────────────────────────── + const notesArr = readJsonBlob(record.notes); + const methodsArr = readJsonBlob(record.install_methods); + + const out = []; + out.push('ℹ️ **PocketBase Bot**: Info for **`' + slug + '`**\n'); + + out.push('**Basic info:**'); + out.push('- **Name:** ' + (record.name || '—')); + out.push('- **Slug:** `' + slug + '`'); + out.push('- **Port:** ' + (record.port != null ? '`' + record.port + '`' : '—')); + out.push('- **Updateable:** ' + (record.updateable ? 'Yes' : 'No')); + out.push('- **Privileged:** ' + (record.privileged ? 'Yes' : 'No')); + out.push('- **ARM:** ' + (record.has_arm ? 'Yes' : 'No')); + if (record.is_dev) out.push('- **Dev:** Yes'); + if (record.is_disabled) out.push('- **Disabled:** Yes' + (record.disable_message ? ' — ' + record.disable_message : '')); + if (record.is_deleted) out.push('- **Deleted:** Yes' + (record.deleted_message ? ' — ' + record.deleted_message : '')); + out.push(''); + + out.push('**Links:**'); + out.push('- **Website:** ' + (record.website || '—')); + out.push('- **Docs:** ' + (record.documentation || '—')); + out.push('- **Logo:** ' + (record.logo ? '[link](' + record.logo + ')' : '—')); + out.push('- **GitHub:** ' + (record.github || '—')); + if (record.config_path) out.push('- **Config:** `' + record.config_path + '`'); + out.push(''); + + out.push('**Credentials:**'); + out.push('- **User:** ' + (record.default_user || '—')); + out.push('- **Password:** ' + (record.default_passwd ? '*(set)*' : '—')); + out.push(''); + + out.push('**Install methods** (' + methodsArr.length + '):'); + out.push(formatMethodsList(methodsArr)); + out.push(''); + + out.push('**Notes** (' + notesArr.length + '):'); + out.push(formatNotesList(notesArr)); + + await addReaction('+1'); + await postComment(out.join('\n')); + + } else if (noteMatch) { + // ── NOTE SUBCOMMAND ────────────────────────────────────────────── + const noteAction = noteMatch[1].toLowerCase(); + const noteArgsStr = rest.substring(noteMatch[0].length).trim(); + let notesArr = readJsonBlob(record.notes); + + async function patchNotes(arr) { + const res = await request(recordsUrl + '/' + record.id, { + method: 'PATCH', + headers: { 'Authorization': token, 'Content-Type': 'application/json' }, + body: JSON.stringify({ notes: arr }) + }); + if (!res.ok) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: Failed to update notes:\n```\n' + res.body + '\n```'); + process.exit(1); + } + } + + if (noteAction === 'list') { + await addReaction('+1'); + await postComment( + 'ℹ️ **PocketBase Bot**: Notes for **`' + slug + '`** (' + notesArr.length + ' total)\n\n' + + formatNotesList(notesArr) + ); + + } else if (noteAction === 'add') { + const tokens = parseTokens(noteArgsStr); + if (tokens.length < 2) { + await addReaction('-1'); + await postComment( + '❌ **PocketBase Bot**: `note add` requires `` and `""`.\n\n' + + '**Usage:** `/pocketbase ' + slug + ' note add ""`' + ); + process.exit(0); + } + const noteType = tokens[0].toLowerCase(); + const noteText = tokens.slice(1).join(' '); + notesArr.push({ type: noteType, text: noteText }); + await patchNotes(notesArr); + await revalidate(slug); + await addReaction('+1'); + await postComment( + '✅ **PocketBase Bot**: Added note to **`' + slug + '`**\n\n' + + '- **Type:** `' + noteType + '`\n' + + '- **Text:** ' + noteText + '\n\n' + + '*Executed by @' + actor + '*' + ); + + } else if (noteAction === 'edit') { + const tokens = parseTokens(noteArgsStr); + if (tokens.length < 3) { + await addReaction('-1'); + await postComment( + '❌ **PocketBase Bot**: `note edit` requires ``, `""`, and `""`.\n\n' + + '**Usage:** `/pocketbase ' + slug + ' note edit "" ""`\n\n' + + 'Use `/pocketbase ' + slug + ' note list` to see current notes.' + ); + process.exit(0); + } + const noteType = tokens[0].toLowerCase(); + const oldText = tokens[1]; + const newText = tokens[2]; + const idx = notesArr.findIndex(function (n) { + return n.type.toLowerCase() === noteType && n.text === oldText; + }); + if (idx === -1) { + await addReaction('-1'); + await postComment( + '❌ **PocketBase Bot**: No `' + noteType + '` note found with that exact text.\n\n' + + '**Current notes for `' + slug + '`:**\n' + formatNotesList(notesArr) + ); + process.exit(0); + } + notesArr[idx].text = newText; + await patchNotes(notesArr); + await revalidate(slug); + await addReaction('+1'); + await postComment( + '✅ **PocketBase Bot**: Edited note in **`' + slug + '`**\n\n' + + '- **Type:** `' + noteType + '`\n' + + '- **Old:** ' + oldText + '\n' + + '- **New:** ' + newText + '\n\n' + + '*Executed by @' + actor + '*' + ); + + } else if (noteAction === 'remove') { + const tokens = parseTokens(noteArgsStr); + if (tokens.length < 2) { + await addReaction('-1'); + await postComment( + '❌ **PocketBase Bot**: `note remove` requires `` and `""`.\n\n' + + '**Usage:** `/pocketbase ' + slug + ' note remove ""`\n\n' + + 'Use `/pocketbase ' + slug + ' note list` to see current notes.' + ); + process.exit(0); + } + const noteType = tokens[0].toLowerCase(); + const noteText = tokens[1]; + const before = notesArr.length; + notesArr = notesArr.filter(function (n) { + return !(n.type.toLowerCase() === noteType && n.text === noteText); + }); + if (notesArr.length === before) { + await addReaction('-1'); + await postComment( + '❌ **PocketBase Bot**: No `' + noteType + '` note found with that exact text.\n\n' + + '**Current notes for `' + slug + '`:**\n' + formatNotesList(notesArr) + ); + process.exit(0); + } + await patchNotes(notesArr); + await revalidate(slug); + await addReaction('+1'); + await postComment( + '✅ **PocketBase Bot**: Removed note from **`' + slug + '`**\n\n' + + '- **Type:** `' + noteType + '`\n' + + '- **Text:** ' + noteText + '\n\n' + + '*Executed by @' + actor + '*' + ); + } + + } else if (methodMatch) { + // ── METHOD SUBCOMMAND ──────────────────────────────────────────── + const methodArgs = rest.replace(/^method\s*/i, '').trim(); + const methodListMode = !methodArgs || methodArgs.toLowerCase() === 'list'; + let methodsArr = readJsonBlob(record.install_methods); + + // Method field classification + const RESOURCE_KEYS = { cpu: 'number', ram: 'number', hdd: 'number', os: 'string', version: 'string' }; + const METHOD_KEYS = { config_path: 'string', script: 'string' }; + const ALL_METHOD_KEYS = Object.assign({}, RESOURCE_KEYS, METHOD_KEYS); + const RESOURCE_TO_CT_VAR = { cpu: 'var_cpu', ram: 'var_ram', hdd: 'var_disk', os: 'var_os', version: 'var_version' }; + + function applyMethodChanges(method, parsed) { + if (!method.resources) method.resources = {}; + for (const [k, v] of Object.entries(parsed)) { + if (RESOURCE_KEYS[k]) { + method.resources[k] = RESOURCE_KEYS[k] === 'number' ? parseInt(v, 10) : v; + } else if (METHOD_KEYS[k]) { + method[k] = v === '' ? null : v; + } + } + } + + async function patchMethods(arr) { + const res = await request(recordsUrl + '/' + record.id, { + method: 'PATCH', + headers: { 'Authorization': token, 'Content-Type': 'application/json' }, + body: JSON.stringify({ install_methods: arr }) + }); + if (!res.ok) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: Failed to update install methods:\n```\n' + res.body + '\n```'); + process.exit(1); + } + } + + if (methodListMode) { + await addReaction('+1'); + await postComment( + 'ℹ️ **PocketBase Bot**: Install methods for **`' + slug + '`** (' + methodsArr.length + ' total)\n\n' + + formatMethodsList(methodsArr) + ); + + } else { + // Check for add / remove sub-actions + const addMatch = methodArgs.match(/^add\s+(\S+)(?:\s+(.+))?$/i); + const removeMatch = methodArgs.match(/^remove\s+(\S+)$/i); + + if (addMatch) { + // ── METHOD ADD ─────────────────────────────────────────────── + const newType = addMatch[1]; + const parsed = addMatch[2] ? parseKVPairs(addMatch[2]) : {}; + if (methodsArr.some(function (im) { return (im.type || '').toLowerCase() === newType.toLowerCase(); })) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: Install method `' + newType + '` already exists for `' + slug + '`.\n\nUse `/pocketbase ' + slug + ' method list` to see all methods.'); + process.exit(0); + } + const newMethod = { type: newType, resources: { cpu: 1, ram: 512, hdd: 4, os: 'debian', version: '13' } }; + if (addMatch[2]) { + const unknown = Object.keys(parsed).filter(function (k) { return !ALL_METHOD_KEYS[k]; }); + if (unknown.length > 0) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: Unknown method field(s): `' + unknown.join('`, `') + '`\n\n**Allowed:** `' + Object.keys(ALL_METHOD_KEYS).join('`, `') + '`'); + process.exit(0); + } + applyMethodChanges(newMethod, parsed); + } + methodsArr.push(newMethod); + await patchMethods(methodsArr); + await revalidate(slug); + const addCtChanges = {}; + for (const [k, v] of Object.entries(parsed)) { + if (RESOURCE_TO_CT_VAR[k]) addCtChanges[RESOURCE_TO_CT_VAR[k]] = v; + } + let addCtSync = null; + try { + addCtSync = await upsertCtDefaultsPr(slug, addCtChanges); + } catch (e) { + addCtSync = { status: 'skipped', reason: 'CT sync failed: ' + e.message }; + } + await addReaction('+1'); + await postComment( + '✅ **PocketBase Bot**: Added install method **`' + newType + '`** to **`' + slug + '`**\n\n' + + formatMethodsList([newMethod]) + '\n\n' + + formatCtSyncResult(addCtSync) + '\n\n' + + '*Executed by @' + actor + '*' + ); + + } else if (removeMatch) { + // ── METHOD REMOVE ──────────────────────────────────────────── + const removeType = removeMatch[1].toLowerCase(); + const removed = methodsArr.filter(function (im) { return (im.type || '').toLowerCase() === removeType; }); + if (removed.length === 0) { + await addReaction('-1'); + const available = methodsArr.map(function (im) { return im.type || '?'; }); + await postComment('❌ **PocketBase Bot**: No install method `' + removeType + '` found.\n\n**Available:** `' + (available.length ? available.join('`, `') : '(none)') + '`'); + process.exit(0); + } + methodsArr = methodsArr.filter(function (im) { return (im.type || '').toLowerCase() !== removeType; }); + await patchMethods(methodsArr); + await revalidate(slug); + await addReaction('+1'); + await postComment( + '✅ **PocketBase Bot**: Removed install method **`' + removed[0].type + '`** from **`' + slug + '`**\n\n' + + '*Executed by @' + actor + '*' + ); + + } else { + // ── METHOD EDIT ────────────────────────────────────────────── + const editParts = methodArgs.match(/^(\S+)\s+(.+)$/); + if (!editParts) { + await addReaction('-1'); + await postComment( + '❌ **PocketBase Bot**: Invalid `method` syntax.\n\n' + + '**Usage:**\n```\n/pocketbase ' + slug + ' method list\n' + + '/pocketbase ' + slug + ' method cpu=4 ram=2048 hdd=20\n' + + '/pocketbase ' + slug + ' method config_path="/opt/app/.env"\n' + + '/pocketbase ' + slug + ' method add cpu=2 ram=2048 hdd=8\n' + + '/pocketbase ' + slug + ' method remove \n```' + ); + process.exit(0); + } + const targetType = editParts[1].toLowerCase(); + const parsed = parseKVPairs(editParts[2]); + + const unknown = Object.keys(parsed).filter(function (k) { return !ALL_METHOD_KEYS[k]; }); + if (unknown.length > 0) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: Unknown method field(s): `' + unknown.join('`, `') + '`\n\n**Allowed:** `' + Object.keys(ALL_METHOD_KEYS).join('`, `') + '`'); + process.exit(0); + } + if (Object.keys(parsed).length === 0) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: No valid `key=value` pairs found.\n\n**Allowed:** `' + Object.keys(ALL_METHOD_KEYS).join('`, `') + '`'); + process.exit(0); + } + + const idx = methodsArr.findIndex(function (im) { return (im.type || '').toLowerCase() === targetType; }); + if (idx === -1) { + await addReaction('-1'); + const available = methodsArr.map(function (im) { return im.type || '?'; }); + await postComment( + '❌ **PocketBase Bot**: No install method `' + targetType + '` found for `' + slug + '`.\n\n' + + '**Available:** `' + (available.length ? available.join('`, `') : '(none)') + '`\n\n' + + 'Use `/pocketbase ' + slug + ' method list` to see all methods.' + ); + process.exit(0); + } + + applyMethodChanges(methodsArr[idx], parsed); + await patchMethods(methodsArr); + await revalidate(slug); + const editCtChanges = {}; + for (const [k, v] of Object.entries(parsed)) { + if (RESOURCE_TO_CT_VAR[k]) editCtChanges[RESOURCE_TO_CT_VAR[k]] = v; + } + let editCtSync = null; + try { + editCtSync = await upsertCtDefaultsPr(slug, editCtChanges); + } catch (e) { + editCtSync = { status: 'skipped', reason: 'CT sync failed: ' + e.message }; + } + + const changesLines = Object.entries(parsed) + .map(function ([k, v]) { + const unit = k === 'ram' ? ' MB' : k === 'hdd' ? ' GB' : ''; + return '- `' + k + '` → `' + v + unit + '`'; + }).join('\n'); + await addReaction('+1'); + await postComment( + '✅ **PocketBase Bot**: Updated install method **`' + methodsArr[idx].type + '`** for **`' + slug + '`**\n\n' + + '**Changes applied:**\n' + changesLines + '\n\n' + + formatCtSyncResult(editCtSync) + '\n\n' + + '*Executed by @' + actor + '*' + ); + } + } + + } else if (setMatch) { + // ── SET SUBCOMMAND (value from code block) ─────────────────────── + const fieldName = setMatch[1].toLowerCase(); + const SET_ALLOWED = { + name: 'string', description: 'string', logo: 'string', + documentation: 'string', website: 'string', project_url: 'string', github: 'string', + config_path: 'string', disable_message: 'string', deleted_message: 'string' + }; + if (!SET_ALLOWED[fieldName]) { + await addReaction('-1'); + await postComment( + '❌ **PocketBase Bot**: `set` only supports text fields.\n\n' + + '**Allowed:** `' + Object.keys(SET_ALLOWED).join('`, `') + '`\n\n' + + 'For boolean/number fields use `field=value` syntax instead.' + ); + process.exit(0); + } + if (!codeBlockValue) { + await addReaction('-1'); + await postComment( + '❌ **PocketBase Bot**: `set` requires a code block with the value.\n\n' + + '**Usage:**\n````\n/pocketbase ' + slug + ' set ' + fieldName + '\n```\nYour content here (HTML, multiline, special chars all fine)\n```\n````' + ); + process.exit(0); + } + const setPayload = {}; + setPayload[fieldName] = codeBlockValue; + const setPatchRes = await request(recordsUrl + '/' + record.id, { + method: 'PATCH', + headers: { 'Authorization': token, 'Content-Type': 'application/json' }, + body: JSON.stringify(setPayload) + }); + if (!setPatchRes.ok) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: PATCH failed for `' + slug + '`:\n```\n' + setPatchRes.body + '\n```'); + process.exit(1); + } + await revalidate(slug); + const preview = codeBlockValue.length > 300 ? codeBlockValue.substring(0, 300) + '…' : codeBlockValue; + await addReaction('+1'); + await postComment( + '✅ **PocketBase Bot**: Set `' + fieldName + '` for **`' + slug + '`**\n\n' + + '**Value set:**\n```\n' + preview + '\n```\n\n' + + '*Executed by @' + actor + '*' + ); + + } else { + // ── FIELD=VALUE PATH ───────────────────────────────────────────── + const ALLOWED_FIELDS = { + name: 'string', + description: 'string', + logo: 'string', + documentation: 'string', + website: 'string', + project_url: 'string', + github: 'string', + config_path: 'string', + tags: 'string', + port: 'number', + default_user: 'nullable_string', + default_passwd: 'nullable_string', + unprivileged: 'number', + updateable: 'boolean', + privileged: 'boolean', + has_arm: 'boolean', + is_dev: 'boolean', + is_disabled: 'boolean', + disable_message: 'string', + is_deleted: 'boolean', + deleted_message: 'string', + }; + + const parsedFields = parseKVPairs(rest); + + const unknownFields = Object.keys(parsedFields).filter(function (f) { return !ALLOWED_FIELDS[f]; }); + if (unknownFields.length > 0) { + await addReaction('-1'); + await postComment( + '❌ **PocketBase Bot**: Unknown field(s): `' + unknownFields.join('`, `') + '`\n\n' + + '**Allowed fields:** `' + Object.keys(ALLOWED_FIELDS).join('`, `') + '`' + ); + process.exit(0); + } + + if (Object.keys(parsedFields).length === 0) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: Could not parse any valid `field=value` pairs.\n\n' + HELP_TEXT); + process.exit(0); + } + + // Cast values to correct types + const payload = {}; + for (const [key, rawVal] of Object.entries(parsedFields)) { + const type = ALLOWED_FIELDS[key]; + if (type === 'boolean') { + if (rawVal === 'true') payload[key] = true; + else if (rawVal === 'false') payload[key] = false; + else { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: `' + key + '` must be `true` or `false`, got: `' + rawVal + '`'); + process.exit(0); + } + } else if (type === 'number') { + const n = parseInt(rawVal, 10); + if (isNaN(n)) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: `' + key + '` must be a number, got: `' + rawVal + '`'); + process.exit(0); + } + payload[key] = n; + } else if (type === 'nullable_string') { + payload[key] = rawVal === '' ? null : rawVal; + } else { + payload[key] = rawVal; + } + } + + const patchRes = await request(recordsUrl + '/' + record.id, { + method: 'PATCH', + headers: { 'Authorization': token, 'Content-Type': 'application/json' }, + body: JSON.stringify(payload) + }); + if (!patchRes.ok) { + await addReaction('-1'); + await postComment('❌ **PocketBase Bot**: PATCH failed for `' + slug + '`:\n```\n' + patchRes.body + '\n```'); + process.exit(1); + } + await revalidate(slug); + const FIELD_TO_CT_VAR = { tags: 'var_tags', unprivileged: 'var_unprivileged' }; + const fieldCtChanges = {}; + for (const [k, v] of Object.entries(payload)) { + if (FIELD_TO_CT_VAR[k]) fieldCtChanges[FIELD_TO_CT_VAR[k]] = v; + } + let fieldCtSync = null; + try { + fieldCtSync = await upsertCtDefaultsPr(slug, fieldCtChanges); + } catch (e) { + fieldCtSync = { status: 'skipped', reason: 'CT sync failed: ' + e.message }; + } + await addReaction('+1'); + const changesLines = Object.entries(payload) + .map(function ([k, v]) { return '- `' + k + '` → `' + JSON.stringify(v) + '`'; }) + .join('\n'); + await postComment( + '✅ **PocketBase Bot**: Updated **`' + slug + '`** successfully!\n\n' + + '**Changes applied:**\n' + changesLines + '\n\n' + + formatCtSyncResult(fieldCtSync) + '\n\n' + + '*Executed by @' + actor + '*' + ); + } + + console.log('Done.'); + })().catch(function (e) { + console.error('Fatal error:', e.message || e); + process.exit(1); + }); + ENDSCRIPT + shell: bash diff --git a/.github/workflows/push-json-to-pocketbase.yml b/.github/workflows/push-json-to-pocketbase.yml index 3e77f1a55..2b25988ea 100644 --- a/.github/workflows/push-json-to-pocketbase.yml +++ b/.github/workflows/push-json-to-pocketbase.yml @@ -5,7 +5,7 @@ on: branches: - main paths: - - "frontend/public/json/**" + - "json/**" jobs: push-json: @@ -19,7 +19,7 @@ jobs: - name: Get changed JSON files with slug id: changed run: | - changed=$(git diff --name-only "${{ github.event.before }}" "${{ github.event.after }}" -- frontend/public/json/ | grep '\.json$' || true) + changed=$(git diff --name-only "${{ github.event.before }}" "${{ github.event.after }}" -- json/ | grep '\.json$' || true) with_slug="" for f in $changed; do [[ -f "$f" ]] || continue @@ -48,7 +48,8 @@ jobs: const https = require('https'); const http = require('http'); const url = require('url'); - function request(fullUrl, opts) { + function request(fullUrl, opts, redirectCount) { + redirectCount = redirectCount || 0; return new Promise(function(resolve, reject) { const u = url.parse(fullUrl); const isHttps = u.protocol === 'https:'; @@ -63,6 +64,13 @@ jobs: if (body) options.headers['Content-Length'] = Buffer.byteLength(body); const lib = isHttps ? https : http; const req = lib.request(options, function(res) { + if (res.statusCode >= 300 && res.statusCode < 400 && res.headers.location) { + if (redirectCount >= 5) return reject(new Error('Too many redirects from ' + fullUrl)); + const redirectUrl = url.resolve(fullUrl, res.headers.location); + res.resume(); + resolve(request(redirectUrl, opts, redirectCount + 1)); + return; + } let data = ''; res.on('data', function(chunk) { data += chunk; }); res.on('end', function() { @@ -96,7 +104,7 @@ jobs: const recordsUrl = apiBase + '/collections/' + encodeURIComponent(coll) + '/records'; let categoryIdToName = {}; try { - const metadata = JSON.parse(fs.readFileSync('frontend/public/json/metadata.json', 'utf8')); + const metadata = JSON.parse(fs.readFileSync('json/metadata.json', 'utf8')); (metadata.categories || []).forEach(function(cat) { categoryIdToName[cat.id] = cat.name; }); } catch (e) { console.warn('Could not load metadata.json:', e.message); } let typeValueToId = {}; @@ -125,15 +133,15 @@ jobs: var osVersionToId = {}; try { const res = await request(apiBase + '/collections/z_ref_note_types/records?perPage=500', { headers: { 'Authorization': token } }); - if (res.ok) JSON.parse(res.body).items?.forEach(function(item) { if (item.type != null) noteTypeToId[item.type] = item.id; }); + if (res.ok) JSON.parse(res.body).items?.forEach(function(item) { if (item.type != null) { noteTypeToId[item.type] = item.id; noteTypeToId[item.type.toLowerCase()] = item.id; } }); } catch (e) { console.warn('z_ref_note_types:', e.message); } try { const res = await request(apiBase + '/collections/z_ref_install_method_types/records?perPage=500', { headers: { 'Authorization': token } }); - if (res.ok) JSON.parse(res.body).items?.forEach(function(item) { if (item.type != null) installMethodTypeToId[item.type] = item.id; }); + if (res.ok) JSON.parse(res.body).items?.forEach(function(item) { if (item.type != null) { installMethodTypeToId[item.type] = item.id; installMethodTypeToId[item.type.toLowerCase()] = item.id; } }); } catch (e) { console.warn('z_ref_install_method_types:', e.message); } try { const res = await request(apiBase + '/collections/z_ref_os/records?perPage=500', { headers: { 'Authorization': token } }); - if (res.ok) JSON.parse(res.body).items?.forEach(function(item) { if (item.os != null) osToId[item.os] = item.id; }); + if (res.ok) JSON.parse(res.body).items?.forEach(function(item) { if (item.os != null) { osToId[item.os] = item.id; osToId[item.os.toLowerCase()] = item.id; } }); } catch (e) { console.warn('z_ref_os:', e.message); } try { const res = await request(apiBase + '/collections/z_ref_os_version/records?perPage=500&expand=os', { headers: { 'Authorization': token } }); @@ -154,7 +162,7 @@ jobs: name: data.name, slug: data.slug, script_created: data.date_created || data.script_created, - script_updated: data.date_created || data.script_updated, + script_updated: new Date().toISOString().split('T')[0], updateable: data.updateable, privileged: data.privileged, port: data.interface_port != null ? data.interface_port : data.port, @@ -162,9 +170,8 @@ jobs: website: data.website, logo: data.logo, description: data.description, - config_path: data.config_path, - default_user: (data.default_credentials && data.default_credentials.username) || data.default_user, - default_passwd: (data.default_credentials && data.default_credentials.password) || data.default_passwd, + default_user: (data.default_credentials && data.default_credentials.username) || data.default_user || null, + default_passwd: (data.default_credentials && data.default_credentials.password) || data.default_passwd || null, is_dev: false }; var resolvedType = typeValueToId[data.type]; @@ -190,7 +197,7 @@ jobs: var postRes = await request(notesCollUrl, { method: 'POST', headers: { 'Authorization': token, 'Content-Type': 'application/json' }, - body: JSON.stringify({ text: note.text || '', type: typeId }) + body: JSON.stringify({ text: note.text || '', type: typeId, script: scriptId }) }); if (postRes.ok) noteIds.push(JSON.parse(postRes.body).id); } diff --git a/.github/workflows/push-to-gitea.yaml b/.github/workflows/push-to-gitea.yaml deleted file mode 100644 index bd927f4a4..000000000 --- a/.github/workflows/push-to-gitea.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Sync to Gitea - -on: - push: - branches: - - main - -jobs: - sync: - if: github.repository == 'community-scripts/ProxmoxVE' - runs-on: ubuntu-latest - - steps: - - name: Checkout source repo - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Change all links to git.community-scripts.org - run: | - echo "Searching for files containing raw.githubusercontent.com URLs..." - - # Find all files containing GitHub raw URLs, excluding certain directories - files_with_github_urls=$(grep -r "https://raw.githubusercontent.com/community-scripts/ProxmoxVE" . --exclude-dir=.git --exclude-dir=node_modules --exclude-dir=.github/workflows --files-with-matches || true) - - if [ -n "$files_with_github_urls" ]; then - echo "$files_with_github_urls" | while read file; do - if [ -f "$file" ]; then - sed -i 's|https://raw\.githubusercontent\.com/community-scripts/ProxmoxVE/|https://git.community-scripts.org/community-scripts/ProxmoxVE/raw/branch/|g' "$file" - fi - done - else - echo "No files found containing GitHub raw URLs" - fi - - - - - name: Push to Gitea - run: | - git config --global user.name "Push From Github" - git config --global user.email "actions@github.com" - git remote add gitea https://$GITEA_USER:$GITEA_TOKEN@git.community-scripts.org/community-scripts/ProxmoxVE.git - git add . - git commit -m "Sync to Gitea" - git push gitea --all --force - env: - GITEA_USER: ${{ secrets.GITEA_USERNAME }} - GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }} diff --git a/.github/workflows/scripts/app-test/pr-alpine-install.func b/.github/workflows/scripts/app-test/pr-alpine-install.func deleted file mode 100644 index 89c57dcf5..000000000 --- a/.github/workflows/scripts/app-test/pr-alpine-install.func +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2021-2026 community-scripts ORG -# Author: Michel Roegl-Brunner (michelroegl-brunner) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE - -color() { - return -} -catch_errors() { - set -Eeuo pipefail - trap 'error_handler $LINENO "$BASH_COMMAND"' ERR -} - -# This function handles errors -error_handler() { - local line_number="$1" - local command="$2" - SCRIPT_NAME=$(basename "$0") - local error_message="$SCRIPT_NAME: Failure in line $line_number while executing command $command" - echo -e "\n$error_message" - exit 0 -} -verb_ip6() { - STD="" - return -} - -msg_info() { - local msg="$1" - echo -ne "${msg}\n" -} - -msg_ok() { - local msg="$1" - echo -e "${msg}\n" -} - -msg_error() { - - local msg="$1" - echo -e "${msg}\n" -} - -RETRY_NUM=10 -RETRY_EVERY=3 -i=$RETRY_NUM - -setting_up_container() { - while [ $i -gt 0 ]; do - if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" != "" ]; then - break - fi - echo 1>&2 -en "No Network! " - sleep $RETRY_EVERY - i=$((i - 1)) - done - - if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" = "" ]; then - echo 1>&2 -e "\n No Network After $RETRY_NUM Tries" - echo -e "Check Network Settings" - exit 1 - fi - msg_ok "Set up Container OS" - msg_ok "Network Connected: $(hostname -i)" -} - -network_check() { - RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }') - if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to $RESOLVEDIP"; fi - set -e -} - -update_os() { - msg_info "Updating Container OS" - $STD apk -U upgrade - msg_ok "Updated Container OS" -} - -motd_ssh() { - return -} - -customize() { - return -} diff --git a/.github/workflows/scripts/app-test/pr-build.func b/.github/workflows/scripts/app-test/pr-build.func deleted file mode 100644 index 6eadfb60d..000000000 --- a/.github/workflows/scripts/app-test/pr-build.func +++ /dev/null @@ -1,260 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2021-2026 community-scripts ORG -# Author: Michel Roegl-Brunner (michelroegl-brunner) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE - -variables() { - NSAPP=$(echo ${APP,,} | tr -d ' ') # This function sets the NSAPP variable by converting the value of the APP variable to lowercase and removing any spaces. - var_install="${NSAPP}-install" # sets the var_install variable by appending "-install" to the value of NSAPP. - -} - -NEXTID=$(pvesh get /cluster/nextid) -timezone=$(cat /etc/timezone) -header_info() { - return -} - -base_settings() { - # Default Settings - CT_TYPE="1" - DISK_SIZE="4" - CORE_COUNT="1" - RAM_SIZE="1024" - VERBOSE="no" - PW="" - CT_ID=$NEXTID - HN=$NSAPP - BRG="vmbr0" - NET="dhcp" - GATE="" - APT_CACHER="" - APT_CACHER_IP="" - DISABLEIP6="no" - MTU="" - SD="" - NS="" - MAC="" - VLAN="" - SSH="no" - SSH_AUTHORIZED_KEY="" - TAGS="community-script;" - - # Override default settings with variables from ct script - CT_TYPE=${var_unprivileged:-$CT_TYPE} - DISK_SIZE=${var_disk:-$DISK_SIZE} - CORE_COUNT=${var_cpu:-$CORE_COUNT} - RAM_SIZE=${var_ram:-$RAM_SIZE} - VERB=${var_verbose:-$VERBOSE} - TAGS="${TAGS}${var_tags:-}" - - # Since these 2 are only defined outside of default_settings function, we add a temporary fallback. TODO: To align everything, we should add these as constant variables (e.g. OSTYPE and OSVERSION), but that would currently require updating the default_settings function for all existing scripts - if [ -z "$var_os" ]; then - var_os="debian" - fi - if [ -z "$var_version" ]; then - var_version="12" - fi -} - -color() { - # Colors - YW=$(echo "\033[33m") - YWB=$(echo "\033[93m") - BL=$(echo "\033[36m") - RD=$(echo "\033[01;31m") - BGN=$(echo "\033[4;92m") - GN=$(echo "\033[1;92m") - DGN=$(echo "\033[32m") - - # Formatting - CL=$(echo "\033[m") - UL=$(echo "\033[4m") - BOLD=$(echo "\033[1m") - BFR="\\r\\033[K" - HOLD=" " - TAB=" " - - # Icons - CM="${TAB}✔️${TAB}${CL}" - CROSS="${TAB}✖️${TAB}${CL}" - INFO="${TAB}💡${TAB}${CL}" - OS="${TAB}🖥️${TAB}${CL}" - OSVERSION="${TAB}🌟${TAB}${CL}" - CONTAINERTYPE="${TAB}📦${TAB}${CL}" - DISKSIZE="${TAB}💾${TAB}${CL}" - CPUCORE="${TAB}🧠${TAB}${CL}" - RAMSIZE="${TAB}🛠️${TAB}${CL}" - SEARCH="${TAB}🔍${TAB}${CL}" - VERIFYPW="${TAB}🔐${TAB}${CL}" - CONTAINERID="${TAB}🆔${TAB}${CL}" - HOSTNAME="${TAB}🏠${TAB}${CL}" - BRIDGE="${TAB}🌉${TAB}${CL}" - NETWORK="${TAB}📡${TAB}${CL}" - GATEWAY="${TAB}🌐${TAB}${CL}" - DISABLEIPV6="${TAB}🚫${TAB}${CL}" - DEFAULT="${TAB}⚙️${TAB}${CL}" - MACADDRESS="${TAB}🔗${TAB}${CL}" - VLANTAG="${TAB}🏷️${TAB}${CL}" - ROOTSSH="${TAB}🔑${TAB}${CL}" - CREATING="${TAB}🚀${TAB}${CL}" - ADVANCED="${TAB}🧩${TAB}${CL}" -} - -catch_errors() { - set -Eeuo pipefail - trap 'error_handler $LINENO "$BASH_COMMAND"' ERR -} - -# This function handles errors -error_handler() { - local line_number="$1" - local command="$2" - SCRIPT_NAME=$(basename "$0") - local error_message="$SCRIPT_NAME: Failure in line $line_number while executing command $command" - echo -e "\n$error_message" - exit 100 -} - -msg_info() { - local msg="$1" - echo -ne "${msg}\n" -} - -msg_ok() { - local msg="$1" - echo -e "${msg}\n" -} - -msg_error() { - - local msg="$1" - echo -e "${msg}\n" -} -start() { - base_settings - return -} - -build_container() { - # if [ "$VERB" == "yes" ]; then set -x; fi - - if [ "$CT_TYPE" == "1" ]; then - FEATURES="keyctl=1,nesting=1" - else - FEATURES="nesting=1" - fi - TEMP_DIR=$(mktemp -d) - pushd $TEMP_DIR >/dev/null - if [ "$var_os" == "alpine" ]; then - export FUNCTIONS_FILE_PATH="$(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/.github/workflows/scripts/app-test/pr-alpine-install.func)" - else - export FUNCTIONS_FILE_PATH="$(curl -s https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/.github/workflows/scripts/app-test/pr-install.func)" - fi - - export CACHER="$APT_CACHER" - export CACHER_IP="$APT_CACHER_IP" - export tz="" - export DISABLEIPV6="$DISABLEIP6" - export APPLICATION="$APP" - export app="$NSAPP" - export PASSWORD="$PW" - export VERBOSE="$VERB" - export SSH_ROOT="${SSH}" - export SSH_AUTHORIZED_KEY - export CTID="$CT_ID" - export CTTYPE="$CT_TYPE" - export PCT_OSTYPE="$var_os" - export PCT_OSVERSION="$var_version" - export PCT_DISK_SIZE="$DISK_SIZE" - export tz="$timezone" - export PCT_OPTIONS=" - -features $FEATURES - -hostname $HN - -tags $TAGS - $SD - $NS - -net0 name=eth0,bridge=$BRG$MAC,ip=$NET$GATE$VLAN$MTU - -onboot 1 - -cores $CORE_COUNT - -memory $RAM_SIZE - -unprivileged $CT_TYPE - $PW - " - echo "Container ID: $CTID" - - # This executes create_lxc.sh and creates the container and .conf file - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/.github/workflows/scripts/app-test/pr-create-lxc.sh)" - - LXC_CONFIG=/etc/pve/lxc/${CTID}.conf - if [ "$CT_TYPE" == "0" ]; then - cat <>$LXC_CONFIG -# USB passthrough -lxc.cgroup2.devices.allow: a -lxc.cap.drop: -lxc.cgroup2.devices.allow: c 188:* rwm -lxc.cgroup2.devices.allow: c 189:* rwm -lxc.mount.entry: /dev/serial/by-id dev/serial/by-id none bind,optional,create=dir -lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file -lxc.mount.entry: /dev/ttyUSB1 dev/ttyUSB1 none bind,optional,create=file -lxc.mount.entry: /dev/ttyACM0 dev/ttyACM0 none bind,optional,create=file -lxc.mount.entry: /dev/ttyACM1 dev/ttyACM1 none bind,optional,create=file -EOF - fi - - if [ "$CT_TYPE" == "0" ]; then - if [[ "$APP" == "Channels" || "$APP" == "Emby" || "$APP" == "ErsatzTV" || "$APP" == "Frigate" || "$APP" == "Jellyfin" || "$APP" == "Plex" || "$APP" == "Scrypted" || "$APP" == "Tdarr" || "$APP" == "Unmanic" || "$APP" == "Ollama" ]]; then - cat <>$LXC_CONFIG -# VAAPI hardware transcoding -lxc.cgroup2.devices.allow: c 226:0 rwm -lxc.cgroup2.devices.allow: c 226:128 rwm -lxc.cgroup2.devices.allow: c 29:0 rwm -lxc.mount.entry: /dev/fb0 dev/fb0 none bind,optional,create=file -lxc.mount.entry: /dev/dri dev/dri none bind,optional,create=dir -lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file -EOF - fi - else - if [[ "$APP" == "Channels" || "$APP" == "Emby" || "$APP" == "ErsatzTV" || "$APP" == "Frigate" || "$APP" == "Jellyfin" || "$APP" == "Plex" || "$APP" == "Scrypted" || "$APP" == "Tdarr" || "$APP" == "Unmanic" || "$APP" == "Ollama" ]]; then - if [[ -e "/dev/dri/renderD128" ]]; then - if [[ -e "/dev/dri/card0" ]]; then - cat <>$LXC_CONFIG -# VAAPI hardware transcoding -dev0: /dev/dri/card0,gid=44 -dev1: /dev/dri/renderD128,gid=104 -EOF - else - cat <>$LXC_CONFIG -# VAAPI hardware transcoding -dev0: /dev/dri/card1,gid=44 -dev1: /dev/dri/renderD128,gid=104 -EOF - fi - fi - fi - fi - - # This starts the container and executes -install.sh - msg_info "Starting LXC Container" - pct start "$CTID" - msg_ok "Started LXC Container" - - if [[ ! -f "/root/actions-runner/_work/ProxmoxVE/ProxmoxVE/install/$var_install.sh" ]]; then - msg_error "No install script found for $APP" - exit 1 - fi - if [ "$var_os" == "alpine" ]; then - sleep 3 - pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories -http://dl-cdn.alpinelinux.org/alpine/latest-stable/main -http://dl-cdn.alpinelinux.org/alpine/latest-stable/community -EOF' - pct exec "$CTID" -- ash -c "apk add bash >/dev/null" - fi - lxc-attach -n "$CTID" -- bash -c "$(cat /root/actions-runner/_work/ProxmoxVE/ProxmoxVE/install/$var_install.sh)" - -} - -description() { - IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1) -} diff --git a/.github/workflows/scripts/app-test/pr-create-lxc.sh b/.github/workflows/scripts/app-test/pr-create-lxc.sh deleted file mode 100644 index 4012599c9..000000000 --- a/.github/workflows/scripts/app-test/pr-create-lxc.sh +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2021-2026 community-scripts ORG -# Author: Michel Roegl-Brunner (michelroegl-brunner) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE - -color() { - return -} -catch_errors() { - set -Eeuo pipefail - trap 'error_handler $LINENO "$BASH_COMMAND"' ERR -} - -# This function handles errors -error_handler() { - local exit_code="$?" - local line_number="$1" - local command="$2" - local error_message="Failure in line $line_number: exit code $exit_code: while executing command $command" - echo -e "\n$error_message" - exit 100 -} -verb_ip6() { - return -} - -msg_info() { - local msg="$1" - echo -ne "${msg}\n" -} - -msg_ok() { - local msg="$1" - echo -e "${msg}\n" -} - -msg_error() { - - local msg="$1" - echo -e "${msg}\n" -} - -VALIDCT=$(pvesm status -content rootdir | awk 'NR>1') -if [ -z "$VALIDCT" ]; then - msg_error "Unable to detect a valid Container Storage location." - exit 1 -fi -VALIDTMP=$(pvesm status -content vztmpl | awk 'NR>1') -if [ -z "$VALIDTMP" ]; then - msg_error "Unable to detect a valid Template Storage location." - exit 1 -fi - -function select_storage() { - local CLASS=$1 - local CONTENT - local CONTENT_LABEL - case $CLASS in - container) - CONTENT='rootdir' - CONTENT_LABEL='Container' - ;; - template) - CONTENT='vztmpl' - CONTENT_LABEL='Container template' - ;; - *) false || { - msg_error "Invalid storage class." - exit 201 - } ;; - esac - - # This Queries all storage locations - local -a MENU - while read -r line; do - local TAG=$(echo $line | awk '{print $1}') - local TYPE=$(echo $line | awk '{printf "%-10s", $2}') - local FREE=$(echo $line | numfmt --field 4-6 --from-unit=K --to=iec --format %.2f | awk '{printf( "%9sB", $6)}') - local ITEM="Type: $TYPE Free: $FREE " - local OFFSET=2 - if [[ $((${#ITEM} + $OFFSET)) -gt ${MSG_MAX_LENGTH:-} ]]; then - local MSG_MAX_LENGTH=$((${#ITEM} + $OFFSET)) - fi - MENU+=("$TAG" "$ITEM" "OFF") - done < <(pvesm status -content $CONTENT | awk 'NR>1') - - # Select storage location - if [ $((${#MENU[@]} / 3)) -eq 1 ]; then - printf ${MENU[0]} - else - msg_error "STORAGE ISSUES!" - exit 202 - fi -} - -[[ "${CTID:-}" ]] || { - msg_error "You need to set 'CTID' variable." - exit 203 -} -[[ "${PCT_OSTYPE:-}" ]] || { - msg_error "You need to set 'PCT_OSTYPE' variable." - exit 204 -} - -# Test if ID is valid -[ "$CTID" -ge "100" ] || { - msg_error "ID cannot be less than 100." - exit 205 -} - -# Test if ID is in use -if pct status $CTID &>/dev/null; then - echo -e "ID '$CTID' is already in use." - unset CTID - msg_error "Cannot use ID that is already in use." - exit 206 -fi - -TEMPLATE_STORAGE=$(select_storage template) || exit - -CONTAINER_STORAGE=$(select_storage container) || exit - -pveam update >/dev/null - -TEMPLATE_SEARCH=${PCT_OSTYPE}-${PCT_OSVERSION:-} -mapfile -t TEMPLATES < <(pveam available -section system | sed -n "s/.*\($TEMPLATE_SEARCH.*\)/\1/p" | sort -t - -k 2 -V) -[ ${#TEMPLATES[@]} -gt 0 ] || { - msg_error "Unable to find a template when searching for '$TEMPLATE_SEARCH'." - exit 207 -} -TEMPLATE="${TEMPLATES[-1]}" - -TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" - -if ! pveam list "$TEMPLATE_STORAGE" | grep -q "$TEMPLATE"; then - [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH" - pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null || - { - msg_error "A problem occurred while downloading the LXC template." - exit 208 - } -fi - -grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid -grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid - -PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}}) -[[ " ${PCT_OPTIONS[@]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}") - -if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" &>/dev/null; then - [[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH" - - pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null || - { - msg_error "A problem occurred while re-downloading the LXC template." - exit 208 - } - - if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" &>/dev/null; then - msg_error "A problem occurred while trying to create container after re-downloading template." - exit 200 - fi -fi diff --git a/.github/workflows/scripts/app-test/pr-install.func b/.github/workflows/scripts/app-test/pr-install.func deleted file mode 100644 index 1709a1c16..000000000 --- a/.github/workflows/scripts/app-test/pr-install.func +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2021-2026 community-scripts ORG -# Author: Michel Roegl-Brunner (michelroegl-brunner) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE - -color() { - return -} - -catch_errors() { - set -Euo pipefail - trap 'error_handler $LINENO "$BASH_COMMAND"' ERR -} - -error_handler() { - local line_number="$1" - local command="$2" - local error_message="Failure in line $line_number while executing command '$command'" - echo -e "\n$error_message\n" >&2 - exit 1 -} - -verb_ip6() { - STD="silent" - silent() { - "$@" >/dev/null 2>&1 || error_handler "${BASH_LINENO[0]}" "$*" - } - return -} - -msg_info() { - local msg="$1" - echo -ne "${msg}\n" -} - -msg_ok() { - local msg="$1" - echo -e "${msg}\n" -} - -msg_error() { - - local msg="$1" - echo -e "${msg}\n" -} - -RETRY_NUM=10 -RETRY_EVERY=3 -setting_up_container() { - - sed -i "/$LANG/ s/\(^# \)//" /etc/locale.gen - locale_line=$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print $1}' | head -n 1) - echo "LANG=${locale_line}" >/etc/default/locale - locale-gen >/dev/null - export LANG=${locale_line} - echo $tz >/etc/timezone - ln -sf /usr/share/zoneinfo/$tz /etc/localtime - - for ((i = RETRY_NUM; i > 0; i--)); do - if [ "$(hostname -I)" != "" ]; then - break - fi - sleep $RETRY_EVERY - done - if [ "$(hostname -I)" = "" ]; then - echo 1>&2 -e "\nNo Network After $RETRY_NUM Tries" - echo -e "Check Network Settings" - exit 101 - fi - rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED - systemctl disable -q --now systemd-networkd-wait-online.service -} - -network_check() { - RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }') - if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to $RESOLVEDIP"; fi - set -e -} - -update_os() { - export DEBIAN_FRONTEND=noninteractive - apt-get update >/dev/null 2>&1 - apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade >/dev/null - rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED -} - -motd_ssh() { - return -} - -customize() { - return -} diff --git a/.github/workflows/scripts/update-json.sh b/.github/workflows/scripts/update-json.sh deleted file mode 100644 index 1711f7550..000000000 --- a/.github/workflows/scripts/update-json.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -FILE=$1 -TODAY=$(date -u +"%Y-%m-%d") - -if [[ -z "$FILE" ]]; then - echo "No file specified. Exiting." - exit 1 -fi - -if [[ ! -f "$FILE" ]]; then - echo "File $FILE not found. Exiting." - exit 1 -fi - -DATE_IN_JSON=$(jq -r '.date_created' "$FILE" 2>/dev/null || echo "") - -if [[ "$DATE_IN_JSON" != "$TODAY" ]]; then - jq --arg date "$TODAY" '.date_created = $date' "$FILE" >tmp.json && mv tmp.json "$FILE" -fi diff --git a/.github/workflows/scripts/update_json_date.sh b/.github/workflows/scripts/update_json_date.sh deleted file mode 100644 index 13305de83..000000000 --- a/.github/workflows/scripts/update_json_date.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -# Verzeichnis, das die JSON-Dateien enthält -json_dir="./json/*.json" - -current_date=$(date +"%Y-%m-%d") - -for json_file in $json_dir; do - if [[ -f "$json_file" ]]; then - current_json_date=$(jq -r '.date_created' "$json_file") - - if [[ "$current_json_date" != "$current_date" ]]; then - echo "Updating $json_file with date $current_date" - jq --arg date "$current_date" '.date_created = $date' "$json_file" >temp.json && mv temp.json "$json_file" - - git add "$json_file" - git commit -m "Update date_created to $current_date in $json_file" - else - echo "Date in $json_file is already up to date." - fi - fi -done -git push origin HEAD diff --git a/.github/workflows/update-json-date.yml b/.github/workflows/update-json-date.yml deleted file mode 100644 index 9757d4d5d..000000000 --- a/.github/workflows/update-json-date.yml +++ /dev/null @@ -1,152 +0,0 @@ -name: Update JSON Date - -on: - push: - branches: - - main - paths: - - "frontend/public/json/**.json" - workflow_dispatch: - -jobs: - update-app-files: - if: github.repository == 'community-scripts/ProxmoxVE' - runs-on: ubuntu-latest - - permissions: - contents: write - pull-requests: write - - steps: - - name: Generate a token - id: generate-token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ vars.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - - - name: Generate a token for PR approval and merge - id: generate-token-merge - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.APP_ID_APPROVE_AND_MERGE }} - private-key: ${{ secrets.APP_KEY_APPROVE_AND_MERGE }} - - - name: Generate dynamic branch name - id: timestamp - run: echo "BRANCH_NAME=pr-update-json-$(date +'%Y%m%d%H%M%S')" >> $GITHUB_ENV - - - name: Set up GH_TOKEN - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - echo "GH_TOKEN=${GH_TOKEN}" >> $GITHUB_ENV - - - name: Checkout Repository - uses: actions/checkout@v4 - with: - fetch-depth: 2 # Ensure we have the last two commits - - - name: Get Previous Commit - id: prev_commit - run: | - PREV_COMMIT=$(git rev-parse HEAD^) - echo "Previous commit: $PREV_COMMIT" - echo "prev_commit=$PREV_COMMIT" >> $GITHUB_ENV - - - name: Get Newly Added JSON Files - id: new_json_files - run: | - git diff --name-only --diff-filter=A ${{ env.prev_commit }} HEAD | grep '^frontend/public/json/.*\.json$' > new_files.txt || true - echo "New files detected:" - cat new_files.txt || echo "No new files." - - - name: Disable file mode changes - run: git config core.fileMode false - - - name: Set up Git - run: | - git config --global user.name "GitHub Actions" - git config --global user.email "github-actions[bot]@users.noreply.github.com" - - - name: Change JSON Date - id: change-json-date - run: | - current_date=$(date +"%Y-%m-%d") - while IFS= read -r file; do - # Skip empty lines - [[ -z "$file" ]] && continue - - if [[ -f "$file" ]]; then - echo "Processing $file..." - current_json_date=$(jq -r '.date_created // empty' "$file") - if [[ -z "$current_json_date" || "$current_json_date" != "$current_date" ]]; then - echo "Updating $file with date $current_date" - jq --arg date "$current_date" '.date_created = $date' "$file" > temp.json && mv temp.json "$file" - else - echo "Date in $file is already up to date." - fi - else - echo "Warning: File $file not found!" - fi - done < new_files.txt - rm new_files.txt - - - name: Check if there are any changes - run: | - echo "Checking for changes..." - git add -A - git status - if git diff --cached --quiet; then - echo "No changes detected." - echo "changed=false" >> "$GITHUB_ENV" - else - echo "Changes detected:" - git diff --stat --cached - echo "changed=true" >> "$GITHUB_ENV" - fi - - # Step 7: Commit and create PR if changes exist - - name: Commit and create PR if changes exist - if: env.changed == 'true' - run: | - - - git commit -m "Update date in json" - git checkout -b ${{ env.BRANCH_NAME }} - git push origin ${{ env.BRANCH_NAME }} - - gh pr create --title "[core] update date in json" \ - --body "This PR is auto-generated by a GitHub Action to update the date in json." \ - --head ${{ env.BRANCH_NAME }} \ - --base main \ - --label "automated pr" - env: - GH_TOKEN: ${{ steps.generate-token.outputs.token }} - - - name: Approve pull request - if: env.changed == 'true' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - PR_NUMBER=$(gh pr list --head "${{ env.BRANCH_NAME }}" --json number --jq '.[].number') - if [ -n "$PR_NUMBER" ]; then - gh pr review $PR_NUMBER --approve - fi - - - name: Approve pull request and merge - if: env.changed == 'true' - env: - GH_TOKEN: ${{ steps.generate-token-merge.outputs.token }} - run: | - git config --global user.name "github-actions-automege[bot]" - git config --global user.email "github-actions-automege[bot]@users.noreply.github.com" - PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number') - if [ -n "$PR_NUMBER" ]; then - gh pr review $PR_NUMBER --approve - gh pr merge $PR_NUMBER --squash --admin - fi - - - name: No changes detected - if: env.changed == 'false' - run: echo "No changes to commit. Workflow completed successfully." diff --git a/.github/workflows/update-script-timestamp-on-sh-change.yml b/.github/workflows/update-script-timestamp-on-sh-change.yml index 5716e6b60..916df6a58 100644 --- a/.github/workflows/update-script-timestamp-on-sh-change.yml +++ b/.github/workflows/update-script-timestamp-on-sh-change.yml @@ -83,7 +83,8 @@ jobs: const http = require('http'); const url = require('url'); - function request(fullUrl, opts) { + function request(fullUrl, opts, redirectCount) { + redirectCount = redirectCount || 0; return new Promise(function(resolve, reject) { const u = url.parse(fullUrl); const isHttps = u.protocol === 'https:'; @@ -98,6 +99,13 @@ jobs: if (body) options.headers['Content-Length'] = Buffer.byteLength(body); const lib = isHttps ? https : http; const req = lib.request(options, function(res) { + if (res.statusCode >= 300 && res.statusCode < 400 && res.headers.location) { + if (redirectCount >= 5) return reject(new Error('Too many redirects from ' + fullUrl)); + const redirectUrl = url.resolve(fullUrl, res.headers.location); + res.resume(); + resolve(request(redirectUrl, opts, redirectCount + 1)); + return; + } let data = ''; res.on('data', function(chunk) { data += chunk; }); res.on('end', function() { @@ -147,13 +155,21 @@ jobs: console.log('Slug not in DB, skipping: ' + slug); continue; } + const today = new Date().toISOString().split('T')[0]; + const patchBody = { + script_updated: today, + last_update_commit: process.env.PR_URL || process.env.COMMIT_URL || '' + }; + // When a dev script is merged into main, promote it to production + if (record.is_dev === true) { + patchBody.is_dev = false; + patchBody.script_created = today; + console.log('Promoting dev script to production: ' + slug); + } const patchRes = await request(recordsUrl + '/' + record.id, { method: 'PATCH', headers: { 'Authorization': token, 'Content-Type': 'application/json' }, - body: JSON.stringify({ - name: record.name || record.slug, - last_update_commit: process.env.PR_URL || process.env.COMMIT_URL || '' - }) + body: JSON.stringify(patchBody) }); if (!patchRes.ok) { console.warn('PATCH failed for slug ' + slug + ': ' + patchRes.body); diff --git a/.github/workflows/update-versions-github.yml b/.github/workflows/update-versions-github.yml deleted file mode 100644 index fc6d2e135..000000000 --- a/.github/workflows/update-versions-github.yml +++ /dev/null @@ -1,236 +0,0 @@ -name: Update GitHub Versions (New) - -on: - workflow_dispatch: - schedule: - # Runs 4x daily: 00:00, 06:00, 12:00, 18:00 UTC - - cron: "0 0,6,12,18 * * *" - -permissions: - contents: write - pull-requests: write - -env: - VERSIONS_FILE: frontend/public/json/github-versions.json - BRANCH_NAME: automated/update-github-versions - AUTOMATED_PR_LABEL: "automated pr" - -jobs: - update-github-versions: - if: github.repository == 'community-scripts/ProxmoxVE' - runs-on: ubuntu-latest - - steps: - - name: Generate a token - id: generate-token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ vars.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - - - name: Generate a token for PR approval and merge - id: generate-token-merge - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.APP_ID_APPROVE_AND_MERGE }} - private-key: ${{ secrets.APP_KEY_APPROVE_AND_MERGE }} - - - name: Checkout Repository - uses: actions/checkout@v4 - with: - ref: main - - - name: Extract GitHub versions from install scripts - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - set -euo pipefail - - echo "=========================================" - echo " Extracting GitHub versions from scripts" - echo "=========================================" - - # Initialize versions array - versions_json="[]" - - # Function to add a version entry - add_version() { - local slug="$1" - local repo="$2" - local version="$3" - local pinned="$4" - local date="$5" - - versions_json=$(echo "$versions_json" | jq \ - --arg slug "$slug" \ - --arg repo "$repo" \ - --arg version "$version" \ - --argjson pinned "$pinned" \ - --arg date "$date" \ - '. += [{"slug": $slug, "repo": $repo, "version": $version, "pinned": $pinned, "date": $date}]') - } - - # Get list of slugs from JSON files - echo "" - echo "=== Scanning JSON files for slugs ===" - - for json_file in frontend/public/json/*.json; do - [[ ! -f "$json_file" ]] && continue - - # Skip non-app JSON files - basename_file=$(basename "$json_file") - case "$basename_file" in - metadata.json|versions.json|github-versions.json|dependency-check.json|update-apps.json) - continue - ;; - esac - - # Extract slug from JSON - slug=$(jq -r '.slug // empty' "$json_file" 2>/dev/null) - [[ -z "$slug" ]] && continue - - # Find corresponding script (install script or addon script) - install_script="" - if [[ -f "install/${slug}-install.sh" ]]; then - install_script="install/${slug}-install.sh" - elif [[ -f "tools/addon/${slug}.sh" ]]; then - install_script="tools/addon/${slug}.sh" - else - continue - fi - - # Look for fetch_and_deploy_gh_release calls - # Pattern: fetch_and_deploy_gh_release "app" "owner/repo" ["mode"] ["version"] - while IFS= read -r line; do - # Skip commented lines - [[ "$line" =~ ^[[:space:]]*# ]] && continue - - # Extract repo and version from fetch_and_deploy_gh_release - if [[ "$line" =~ fetch_and_deploy_gh_release[[:space:]]+\"[^\"]*\"[[:space:]]+\"([^\"]+)\"([[:space:]]+\"([^\"]+)\")?([[:space:]]+\"([^\"]+)\")? ]]; then - repo="${BASH_REMATCH[1]}" - mode="${BASH_REMATCH[3]:-tarball}" - pinned_version="${BASH_REMATCH[5]:-latest}" - - # Check if version is pinned (not "latest" and not empty) - is_pinned=false - target_version="" - - if [[ -n "$pinned_version" && "$pinned_version" != "latest" ]]; then - is_pinned=true - target_version="$pinned_version" - fi - - # Fetch version from GitHub - if [[ "$is_pinned" == "true" ]]; then - # For pinned versions, verify it exists and get date - response=$(gh api "repos/${repo}/releases/tags/${target_version}" 2>/dev/null || echo '{}') - if echo "$response" | jq -e '.tag_name' > /dev/null 2>&1; then - version=$(echo "$response" | jq -r '.tag_name') - date=$(echo "$response" | jq -r '.published_at // empty') - add_version "$slug" "$repo" "$version" "true" "$date" - echo "[$slug] ✓ $version (pinned)" - else - echo "[$slug] ⚠ pinned version $target_version not found" - fi - else - # Fetch latest release - response=$(gh api "repos/${repo}/releases/latest" 2>/dev/null || echo '{}') - if echo "$response" | jq -e '.tag_name' > /dev/null 2>&1; then - version=$(echo "$response" | jq -r '.tag_name') - date=$(echo "$response" | jq -r '.published_at // empty') - add_version "$slug" "$repo" "$version" "false" "$date" - echo "[$slug] ✓ $version" - else - # Try tags as fallback - version=$(gh api "repos/${repo}/tags" --jq '.[0].name // empty' 2>/dev/null || echo "") - if [[ -n "$version" ]]; then - add_version "$slug" "$repo" "$version" "false" "" - echo "[$slug] ✓ $version (from tags)" - else - echo "[$slug] ⚠ no version found" - fi - fi - fi - - break # Only first match per script - fi - done < <(grep 'fetch_and_deploy_gh_release' "$install_script" 2>/dev/null || true) - - done - - # Save versions file - echo "$versions_json" | jq --arg date "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ - '{generated: $date, versions: (. | sort_by(.slug))}' > "$VERSIONS_FILE" - - total=$(echo "$versions_json" | jq 'length') - echo "" - echo "=========================================" - echo " Total versions extracted: $total" - echo "=========================================" - - - name: Check for changes - id: check-changes - run: | - # Check if file is new (untracked) or has changes - if [[ ! -f "$VERSIONS_FILE" ]]; then - echo "changed=false" >> "$GITHUB_OUTPUT" - echo "Versions file was not created" - elif ! git ls-files --error-unmatch "$VERSIONS_FILE" &>/dev/null; then - # File exists but is not tracked - it's new - echo "changed=true" >> "$GITHUB_OUTPUT" - echo "New file created: $VERSIONS_FILE" - elif git diff --quiet "$VERSIONS_FILE" 2>/dev/null; then - echo "changed=false" >> "$GITHUB_OUTPUT" - echo "No changes detected" - else - echo "changed=true" >> "$GITHUB_OUTPUT" - echo "Changes detected:" - git diff --stat "$VERSIONS_FILE" 2>/dev/null || true - fi - - - name: Commit and push changes - if: steps.check-changes.outputs.changed == 'true' - run: | - git config --global user.name "github-actions[bot]" - git config --global user.email "github-actions[bot]@users.noreply.github.com" - git add "$VERSIONS_FILE" - git commit -m "chore: update github-versions.json" - git checkout -b $BRANCH_NAME || git checkout $BRANCH_NAME - git push origin $BRANCH_NAME --force - - - name: Create pull request if not exists - if: steps.check-changes.outputs.changed == 'true' - env: - GH_TOKEN: ${{ steps.generate-token.outputs.token }} - run: | - PR_EXISTS=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number') - if [ -z "$PR_EXISTS" ]; then - gh pr create --title "[Github Action] Update github-versions.json" \ - --body "This PR is auto-generated by a Github Action to update the github-versions.json file." \ - --head $BRANCH_NAME \ - --base main \ - --label "$AUTOMATED_PR_LABEL" - fi - - - name: Approve pull request - if: steps.check-changes.outputs.changed == 'true' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number') - if [ -n "$PR_NUMBER" ]; then - gh pr review $PR_NUMBER --approve - fi - - - name: Approve pull request and merge - if: steps.check-changes.outputs.changed == 'true' - env: - GH_TOKEN: ${{ steps.generate-token-merge.outputs.token }} - run: | - git config --global user.name "github-actions-automege[bot]" - git config --global user.email "github-actions-automege[bot]@users.noreply.github.com" - PR_NUMBER=$(gh pr list --head "${BRANCH_NAME}" --json number --jq '.[].number') - if [ -n "$PR_NUMBER" ]; then - gh pr review $PR_NUMBER --approve - gh pr merge $PR_NUMBER --squash --admin - fi diff --git a/.gitignore b/.gitignore index 9701f0ea1..f762e9ee1 100644 --- a/.gitignore +++ b/.gitignore @@ -2,39 +2,14 @@ .DS_Store Thumbs.db -# Editor & IDE files (keeping .vscode settings but ignoring unnecessary metadata) +# Editor & IDE files !.vscode/ .vscode/*.workspace .vscode/*.tmp -# Log and Cache files +# Log files logs/ *.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* - -# Python-specific exclusions -__pycache__/ -*.pyc -*.pyo -*.pyd -*.venv/ -venv/ -env/ -*.env - -# Node.js dependencies (frontend folder was excluded, but keeping this rule for reference) -frontend/node_modules/ -frontend/.svelte-kit/ -frontend/.turbo/ -frontend/.vite/ -frontend/build/ - -# API and Backend specific exclusions -api/.env -api/__pycache__/ -api/*.sqlite3 # Install scripts and temporary files install/tmp/ @@ -48,7 +23,7 @@ vm/*.vmdk vm/*.iso vm/*.bak -# Miscellaneous temporary or unnecessary files +# Miscellaneous temporary files *.bak *.swp *.swo @@ -56,22 +31,7 @@ vm/*.bak *.tmp *.backup -# JSON configuration backups +# JSON temporary files +json/ json/*.bak json/*.tmp -json/.vscode/ - -# Ignore compiled binaries or packaged artifacts -*.exe -*.dll -*.bin -*.deb -*.rpm -*.tar.gz -*.zip -*.tgz - -# Ignore repository metadata or Git itself -.git/ -.gitignore -.vscode/settings.json diff --git a/.vscode/.shellcheckrc b/.shellcheckrc similarity index 100% rename from .vscode/.shellcheckrc rename to .shellcheckrc diff --git a/.vscode/.editorconfig b/.vscode/.editorconfig deleted file mode 100644 index f79a823d7..000000000 --- a/.vscode/.editorconfig +++ /dev/null @@ -1,16 +0,0 @@ -; editorconfig.org -root = true - -[*] -charset = utf-8 -continuation_indent_size = 2 -end_of_line = lf -indent_size = 2 -indent_style = space -insert_final_newline = true -max_line_length = 120 -tab_width = 2 -; trim_trailing_whitespace = true ; disabled until files are cleaned up - -[*.md] -trim_trailing_whitespace = false diff --git a/CHANGELOG.md b/CHANGELOG.md index 6cb6b2aa9..b862c4972 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,33 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit + + + + + + + + + + + + + + + + + + + + + + + + + + +

📜 History

@@ -36,7 +63,21 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
-

March (7 entries)

+

May (9 entries)

+ +[View May 2026 Changelog](.github/changelogs/2026/05.md) + +
+ +
+

April (30 entries)

+ +[View April 2026 Changelog](.github/changelogs/2026/04.md) + +
+ +
+

March (31 entries)

[View March 2026 Changelog](.github/changelogs/2026/03.md) @@ -420,989 +461,657 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
-## 2026-03-11 +## 2026-05-11 + +### 🆕 New Scripts + + - Lychee ([#14424](https://github.com/community-scripts/ProxmoxVE/pull/14424)) ### 🚀 Updated Scripts - #### 🐞 Bug Fixes - - Tracearr: Increase default disk variable from 5 to 10 [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#12762](https://github.com/community-scripts/ProxmoxVE/pull/12762)) - - Fix Wireguard Dashboard update [@odin568](https://github.com/odin568) ([#12767](https://github.com/community-scripts/ProxmoxVE/pull/12767)) + - Termix: fix nginx pid path and log paths on update (#) [@MickLesk](https://github.com/MickLesk) ([#14419](https://github.com/community-scripts/ProxmoxVE/pull/14419)) + - Nginxproxymanager: restore NPM nginx.conf after OpenResty rebuid [@MickLesk](https://github.com/MickLesk) ([#14421](https://github.com/community-scripts/ProxmoxVE/pull/14421)) + + - #### 🔧 Refactor + + - InvestBrain: add commented reverse proxy config hints to .env [@MickLesk](https://github.com/MickLesk) ([#14422](https://github.com/community-scripts/ProxmoxVE/pull/14422)) ### 🧰 Tools - - #### ✨ New Features + - #### 🐞 Bug Fixes - - Coder-Code-Server: Check if config file exists [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#12758](https://github.com/community-scripts/ProxmoxVE/pull/12758)) + - Cronmaster: fix unexpected EOF in update_cronmaster script [@MickLesk](https://github.com/MickLesk) ([#14420](https://github.com/community-scripts/ProxmoxVE/pull/14420)) -## 2026-03-10 +## 2026-05-10 ### 🚀 Updated Scripts - #### 🐞 Bug Fixes - - [Fix] Immich: Pin libvips to 8.17.3 [@vhsdream](https://github.com/vhsdream) ([#12744](https://github.com/community-scripts/ProxmoxVE/pull/12744)) + - Save Beszel version [@lucacome](https://github.com/lucacome) ([#14389](https://github.com/community-scripts/ProxmoxVE/pull/14389)) + - karakeep: Fix SERVER_VERSION update [@MickLesk](https://github.com/MickLesk) ([#14378](https://github.com/community-scripts/ProxmoxVE/pull/14378)) + - inspIRCd: Fix service not autostarting [@tremor021](https://github.com/tremor021) ([#14368](https://github.com/community-scripts/ProxmoxVE/pull/14368)) -## 2026-03-09 + - #### 🔧 Refactor -### 🚀 Updated Scripts - - - Pin Opencloud to 5.2.0 [@vhsdream](https://github.com/vhsdream) ([#12721](https://github.com/community-scripts/ProxmoxVE/pull/12721)) - - - #### 🐞 Bug Fixes - - - [Hotfix] qBittorrent: Disable UPnP port forwarding by default [@vhsdream](https://github.com/vhsdream) ([#12728](https://github.com/community-scripts/ProxmoxVE/pull/12728)) - - [Quickfix] Opencloud: ensure correct case for binary [@vhsdream](https://github.com/vhsdream) ([#12729](https://github.com/community-scripts/ProxmoxVE/pull/12729)) - - Omada: Bump libssl [@MickLesk](https://github.com/MickLesk) ([#12724](https://github.com/community-scripts/ProxmoxVE/pull/12724)) - - openwebui: Ensure required dependencies [@MickLesk](https://github.com/MickLesk) ([#12717](https://github.com/community-scripts/ProxmoxVE/pull/12717)) - - Frigate: try an OpenVino model build fallback [@MickLesk](https://github.com/MickLesk) ([#12704](https://github.com/community-scripts/ProxmoxVE/pull/12704)) - - Change cronjob setup to use www-data user [@opastorello](https://github.com/opastorello) ([#12695](https://github.com/community-scripts/ProxmoxVE/pull/12695)) - - RustDesk Server: Fix check_for_gh_release function call [@tremor021](https://github.com/tremor021) ([#12694](https://github.com/community-scripts/ProxmoxVE/pull/12694)) - - - #### ✨ New Features - - - feat: improve zigbee2mqtt backup handler [@MickLesk](https://github.com/MickLesk) ([#12714](https://github.com/community-scripts/ProxmoxVE/pull/12714)) - - - #### 💥 Breaking Changes - - - Reactive Resume: rewrite for v5 using original repo amruthpilla/reactive-resume [@MickLesk](https://github.com/MickLesk) ([#12705](https://github.com/community-scripts/ProxmoxVE/pull/12705)) + - refactor: webcheck [@CrazyWolf13](https://github.com/CrazyWolf13) ([#14391](https://github.com/community-scripts/ProxmoxVE/pull/14391)) ### 💾 Core - - #### ✨ New Features + - #### 🐞 Bug Fixes - - tools: add Alpine (apk) support to ensure_dependencies and is_package_installed [@MickLesk](https://github.com/MickLesk) ([#12703](https://github.com/community-scripts/ProxmoxVE/pull/12703)) - - tools.func: extend hwaccel with ROCm [@MickLesk](https://github.com/MickLesk) ([#12707](https://github.com/community-scripts/ProxmoxVE/pull/12707)) + - [tools.func]: Pin `pnpm` version [@tremor021](https://github.com/tremor021) ([#14386](https://github.com/community-scripts/ProxmoxVE/pull/14386)) -### 🌐 Website - - - #### ✨ New Features - - - feat: add CopycatWarningToast component for user warnings [@BramSuurdje](https://github.com/BramSuurdje) ([#12733](https://github.com/community-scripts/ProxmoxVE/pull/12733)) - -## 2026-03-08 +## 2026-05-09 ### 🚀 Updated Scripts - #### 🐞 Bug Fixes - - [Fix] Immich: chown install dir before machine-learning update [@vhsdream](https://github.com/vhsdream) ([#12684](https://github.com/community-scripts/ProxmoxVE/pull/12684)) - - [Fix] Scanopy: Build generate-fixtures [@vhsdream](https://github.com/vhsdream) ([#12686](https://github.com/community-scripts/ProxmoxVE/pull/12686)) - - fix: rustdeskserver: use correct repo string [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12682](https://github.com/community-scripts/ProxmoxVE/pull/12682)) - - NZBGet: Fixes for RAR5 handling [@tremor021](https://github.com/tremor021) ([#12675](https://github.com/community-scripts/ProxmoxVE/pull/12675)) + - FlowiseAI: Migrate to pnpm [@MickLesk](https://github.com/MickLesk) ([#14344](https://github.com/community-scripts/ProxmoxVE/pull/14344)) + - Purge openresty [@lucacome](https://github.com/lucacome) ([#14353](https://github.com/community-scripts/ProxmoxVE/pull/14353)) + - Check for release for Sonarr [@lucacome](https://github.com/lucacome) ([#14354](https://github.com/community-scripts/ProxmoxVE/pull/14354)) + - fix(termix-install.sh): add tmpfiles.d persistence and systemd PIDFile path [@runnylogan](https://github.com/runnylogan) ([#14350](https://github.com/community-scripts/ProxmoxVE/pull/14350)) + - ERPNext: start bench Redis services before bench new-site [@MickLesk](https://github.com/MickLesk) ([#14343](https://github.com/community-scripts/ProxmoxVE/pull/14343)) + - [Hotfix]Jotty: use absolute path when creating data dir [@vhsdream](https://github.com/vhsdream) ([#14355](https://github.com/community-scripts/ProxmoxVE/pull/14355)) -### 🌐 Website - - - #### 🐞 Bug Fixes - - - LXC-Execute: Fix slug [@tremor021](https://github.com/tremor021) ([#12681](https://github.com/community-scripts/ProxmoxVE/pull/12681)) - -## 2026-03-07 - -### 🆕 New Scripts - - - ImmichFrame ([#12653](https://github.com/community-scripts/ProxmoxVE/pull/12653)) +## 2026-05-08 ### 🚀 Updated Scripts - #### 🐞 Bug Fixes - - Grocy: bump PHP version from 8.3 to 8.5 [@MickLesk](https://github.com/MickLesk) ([#12651](https://github.com/community-scripts/ProxmoxVE/pull/12651)) - - Check for influxdb3 installation in update_script [@odin568](https://github.com/odin568) ([#12648](https://github.com/community-scripts/ProxmoxVE/pull/12648)) - - Update Rdtclient to dotnet 10.0 [@asylumexp](https://github.com/asylumexp) ([#12638](https://github.com/community-scripts/ProxmoxVE/pull/12638)) - - fix(immich): fix update script failing to add Debian testing repo when preferences file already exists [@Copilot](https://github.com/Copilot) ([#12631](https://github.com/community-scripts/ProxmoxVE/pull/12631)) + - wishlist: pin pnpm to v10 to match engine requirements [@MickLesk](https://github.com/MickLesk) ([#14342](https://github.com/community-scripts/ProxmoxVE/pull/14342)) + - [pelican] fix env copy regression [@LetterN](https://github.com/LetterN) ([#14328](https://github.com/community-scripts/ProxmoxVE/pull/14328)) + - fix(homepage): fix ERR_PNPM_IGNORED_BUILDS error [@Sergih28](https://github.com/Sergih28) ([#14315](https://github.com/community-scripts/ProxmoxVE/pull/14315)) + + - #### ✨ New Features + + - tools.func: add setup_nltk as new function [@MickLesk](https://github.com/MickLesk) ([#14314](https://github.com/community-scripts/ProxmoxVE/pull/14314)) ### 💾 Core - - #### ✨ New Features + - #### 🐞 Bug Fixes - - tools: add interactive GitHub PAT prompt on rate limit / auth failure [@MickLesk](https://github.com/MickLesk) ([#12652](https://github.com/community-scripts/ProxmoxVE/pull/12652)) + - tools.func: fix meilisearch import-dump background process handling [@MickLesk](https://github.com/MickLesk) ([#14341](https://github.com/community-scripts/ProxmoxVE/pull/14341)) -### 🌐 Website - - - #### 📝 Script Information - - - Papra: update repository URL to papra-hq/papra [@MickLesk](https://github.com/MickLesk) ([#12650](https://github.com/community-scripts/ProxmoxVE/pull/12650)) - -## 2026-03-06 +## 2026-05-07 ### 🚀 Updated Scripts - #### 🐞 Bug Fixes - - RustDesk Server: Fix update script [@tremor021](https://github.com/tremor021) ([#12625](https://github.com/community-scripts/ProxmoxVE/pull/12625)) - - [Node-RED] Restart service after update [@Aurelien30000](https://github.com/Aurelien30000) ([#12621](https://github.com/community-scripts/ProxmoxVE/pull/12621)) - - wealthfolio: update cors [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12617](https://github.com/community-scripts/ProxmoxVE/pull/12617)) - - CryptPad: Better update handling [@tremor021](https://github.com/tremor021) ([#12611](https://github.com/community-scripts/ProxmoxVE/pull/12611)) + - termix: create /tmp/nginx before nginx -t [@MickLesk](https://github.com/MickLesk) ([#14312](https://github.com/community-scripts/ProxmoxVE/pull/14312)) + - The Lounge: Fix service not starting automaticaly [@tremor021](https://github.com/tremor021) ([#14311](https://github.com/community-scripts/ProxmoxVE/pull/14311)) + - netbird-lxc: fix installation check [@MickLesk](https://github.com/MickLesk) ([#14309](https://github.com/community-scripts/ProxmoxVE/pull/14309)) + - databasus: Backup and secure configuration file [@MickLesk](https://github.com/MickLesk) ([#14308](https://github.com/community-scripts/ProxmoxVE/pull/14308)) + - vm: update disk image URL for Ubuntu 25.04 [@MickLesk](https://github.com/MickLesk) ([#14290](https://github.com/community-scripts/ProxmoxVE/pull/14290)) - #### ✨ New Features - - RustDesk Server: Switch to updated repository [@tremor021](https://github.com/tremor021) ([#12083](https://github.com/community-scripts/ProxmoxVE/pull/12083)) - - - #### 💥 Breaking Changes - - - Semaphore: Move from BoltDB to SQLite [@tremor021](https://github.com/tremor021) ([#12624](https://github.com/community-scripts/ProxmoxVE/pull/12624)) - -## 2026-03-05 - -### 🆕 New Scripts - - - ddclient ([#12587](https://github.com/community-scripts/ProxmoxVE/pull/12587)) -- Netbird ([#12585](https://github.com/community-scripts/ProxmoxVE/pull/12585)) -- Papra ([#12577](https://github.com/community-scripts/ProxmoxVE/pull/12577)) - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - fluid-calendar: add build-essential to install and update dependencies [@Copilot](https://github.com/Copilot) ([#12602](https://github.com/community-scripts/ProxmoxVE/pull/12602)) - - Refactor: BentoPDF [@vhsdream](https://github.com/vhsdream) ([#12597](https://github.com/community-scripts/ProxmoxVE/pull/12597)) - - Tianji: Fix the bug introduced by the refactor [@tremor021](https://github.com/tremor021) ([#12564](https://github.com/community-scripts/ProxmoxVE/pull/12564)) - - PowerDNS: use 'launch=' instead of 'launch+=' for gsqlite3 backend [@MickLesk](https://github.com/MickLesk) ([#12579](https://github.com/community-scripts/ProxmoxVE/pull/12579)) + - pangolin: bump version to 1.18.3 [@MickLesk](https://github.com/MickLesk) ([#14297](https://github.com/community-scripts/ProxmoxVE/pull/14297)) ### 🗑️ Deleted Scripts - - Suwayomi-Server: remove due to inactivity and very low usage [@MickLesk](https://github.com/MickLesk) ([#12596](https://github.com/community-scripts/ProxmoxVE/pull/12596)) - -### 💾 Core - - - #### 🔧 Refactor - - - core: add var_os / var_version to whitelist for app.vars [@MickLesk](https://github.com/MickLesk) ([#12576](https://github.com/community-scripts/ProxmoxVE/pull/12576)) - -## 2026-03-04 - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - fix: gitea-mirror [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12549](https://github.com/community-scripts/ProxmoxVE/pull/12549)) - - fix(immich): correct LibRaw clone URL to official upstream [@DenislavDenev](https://github.com/DenislavDenev) ([#12526](https://github.com/community-scripts/ProxmoxVE/pull/12526)) - - update: stirling-pdf: java 25 [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12552](https://github.com/community-scripts/ProxmoxVE/pull/12552)) - - Docmost: register NoopAuditService globally when EE submodule is missing [@MickLesk](https://github.com/MickLesk) ([#12551](https://github.com/community-scripts/ProxmoxVE/pull/12551)) - - jellyseer/overseer migration corrupting /usr/bin/update [@MickLesk](https://github.com/MickLesk) ([#12539](https://github.com/community-scripts/ProxmoxVE/pull/12539)) - - PowerDNS: use gsqlite3 backend instead of BIND [@MickLesk](https://github.com/MickLesk) ([#12538](https://github.com/community-scripts/ProxmoxVE/pull/12538)) - - addon migrations: /usr/bin/update replacement to prevent syntax error [@MickLesk](https://github.com/MickLesk) ([#12540](https://github.com/community-scripts/ProxmoxVE/pull/12540)) - - - #### 🔧 Refactor - - - Fluid-Calendar: NodeJS bump [@tremor021](https://github.com/tremor021) ([#12558](https://github.com/community-scripts/ProxmoxVE/pull/12558)) - - Refactor: LiteLLM [@tremor021](https://github.com/tremor021) ([#12550](https://github.com/community-scripts/ProxmoxVE/pull/12550)) - -### 💾 Core - - - #### 🐞 Bug Fixes - - - tools: fall back to distro packages for psql [@MickLesk](https://github.com/MickLesk) ([#12542](https://github.com/community-scripts/ProxmoxVE/pull/12542)) - - fix: whitelist var_searchdomain and fix the handling of var_ns and va… [@tommoyer](https://github.com/tommoyer) ([#12521](https://github.com/community-scripts/ProxmoxVE/pull/12521)) - -## 2026-03-03 - -### 🆕 New Scripts - - - Tinyauth: v5 Support & add Debian Version [@MickLesk](https://github.com/MickLesk) ([#12501](https://github.com/community-scripts/ProxmoxVE/pull/12501)) - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - cross-seed: install build-essential to resolve missing `make` error [@Copilot](https://github.com/Copilot) ([#12522](https://github.com/community-scripts/ProxmoxVE/pull/12522)) - - meshcentral: increased disk space to 4GB [@MickLesk](https://github.com/MickLesk) ([#12509](https://github.com/community-scripts/ProxmoxVE/pull/12509)) - - - #### 🔧 Refactor - - - opnsense-vm: harden temp dir, bridge detection and network selection [@MickLesk](https://github.com/MickLesk) ([#12513](https://github.com/community-scripts/ProxmoxVE/pull/12513)) - -### 🗑️ Deleted Scripts - - - Remove Unifi Network Server scripts (dead APT repo) [@Copilot](https://github.com/Copilot) ([#12500](https://github.com/community-scripts/ProxmoxVE/pull/12500)) + - Remove: LiteLLM [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#14294](https://github.com/community-scripts/ProxmoxVE/pull/14294)) ### 💾 Core - #### ✨ New Features - - core: recovery - add ENOSPC disk-full detection with auto-retry using * 2 hdd [@MickLesk](https://github.com/MickLesk) ([#12511](https://github.com/community-scripts/ProxmoxVE/pull/12511)) + - update-apps: some improvements [@MickLesk](https://github.com/MickLesk) ([#14275](https://github.com/community-scripts/ProxmoxVE/pull/14275)) -### 📚 Documentation - - - Fix config_path casing in reactive-resume.json [@ScubyG](https://github.com/ScubyG) ([#12525](https://github.com/community-scripts/ProxmoxVE/pull/12525)) - -### 🌐 Website - - - #### 🐞 Bug Fixes - - - Revert #11534 PR that messed up search [@BramSuurdje](https://github.com/BramSuurdje) ([#12492](https://github.com/community-scripts/ProxmoxVE/pull/12492)) - -## 2026-03-02 +## 2026-05-06 ### 🆕 New Scripts - - PowerDNS ([#12481](https://github.com/community-scripts/ProxmoxVE/pull/12481)) -- Profilarr ([#12441](https://github.com/community-scripts/ProxmoxVE/pull/12441)) + - Hoodik ([#14279](https://github.com/community-scripts/ProxmoxVE/pull/14279)) ### 🚀 Updated Scripts - #### 🐞 Bug Fixes - - Tracearr: prepare for imminent v1.4.19 release [@durzo](https://github.com/durzo) ([#12413](https://github.com/community-scripts/ProxmoxVE/pull/12413)) + - Pelican-Panel: create backup subdirectory before copying storage [@MickLesk](https://github.com/MickLesk) ([#14274](https://github.com/community-scripts/ProxmoxVE/pull/14274)) + - Rustdeskserver: remove redundant else with undefined RELEASE var [@MickLesk](https://github.com/MickLesk) ([#14272](https://github.com/community-scripts/ProxmoxVE/pull/14272)) + +### 🧰 Tools + + - #### 🔧 Refactor + + - AdguardHome-Sync replace ifconfig with hostname -I for IP detection [@MickLesk](https://github.com/MickLesk) ([#14273](https://github.com/community-scripts/ProxmoxVE/pull/14273)) + +## 2026-05-05 + +### 🆕 New Scripts + + - LibreChat ([#14247](https://github.com/community-scripts/ProxmoxVE/pull/14247)) +- Matomo ([#14248](https://github.com/community-scripts/ProxmoxVE/pull/14248)) +- Storyteller ([#14122](https://github.com/community-scripts/ProxmoxVE/pull/14122)) + +### 🧰 Tools + + - Fix container count message in update-apps.sh [@Quotacious](https://github.com/Quotacious) ([#14265](https://github.com/community-scripts/ProxmoxVE/pull/14265)) + +## 2026-05-04 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Databasus: move .env to filesystem root so service starts correctly [@Copilot](https://github.com/Copilot) ([#14252](https://github.com/community-scripts/ProxmoxVE/pull/14252)) + - Databasus: update mongo-tools fallback to 100.16.1 and use now pnpm instead of npm ci [@MickLesk](https://github.com/MickLesk) ([#14240](https://github.com/community-scripts/ProxmoxVE/pull/14240)) + +### 💾 Core - #### ✨ New Features - - Frigate: Bump to v0.17 [@MickLesk](https://github.com/MickLesk) ([#12474](https://github.com/community-scripts/ProxmoxVE/pull/12474)) + - tools.func get_latest_gh_tag - add pagination to find prefixed tags beyond first 50 [@MickLesk](https://github.com/MickLesk) ([#14241](https://github.com/community-scripts/ProxmoxVE/pull/14241)) + - tools.func: add GitLab release check/fetch/deploy helpers [@MickLesk](https://github.com/MickLesk) ([#14242](https://github.com/community-scripts/ProxmoxVE/pull/14242)) + +## 2026-05-03 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Hortusfox: fix update issues [@tomfrenzel](https://github.com/tomfrenzel) ([#14214](https://github.com/community-scripts/ProxmoxVE/pull/14214)) + + - #### ✨ New Features + + - Refactor: PeaNUT for v6 [@MickLesk](https://github.com/MickLesk) ([#14224](https://github.com/community-scripts/ProxmoxVE/pull/14224)) + - pangolin: pin version, drop manual SQL, use upstream migrator [@MickLesk](https://github.com/MickLesk) ([#14223](https://github.com/community-scripts/ProxmoxVE/pull/14223)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - core: fix validate_bridge function [@MichaelOultram](https://github.com/MichaelOultram) ([#14206](https://github.com/community-scripts/ProxmoxVE/pull/14206)) + +### 🧰 Tools + + - #### 🐞 Bug Fixes + + - pve/pbs scripts: guard sed against missing /etc/apt/sources.list [@MickLesk](https://github.com/MickLesk) ([#14222](https://github.com/community-scripts/ProxmoxVE/pull/14222)) + +## 2026-05-02 + +### 🆕 New Scripts + + - protonmail-bridge ([#14136](https://github.com/community-scripts/ProxmoxVE/pull/14136)) +- Tube Archivist ([#14123](https://github.com/community-scripts/ProxmoxVE/pull/14123)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Nagios: Ping fix [@tremor021](https://github.com/tremor021) ([#14186](https://github.com/community-scripts/ProxmoxVE/pull/14186)) + - opnsense-vm: retry pvesm alloc on transient zfs 'got timeout' errors [@MickLesk](https://github.com/MickLesk) ([#14157](https://github.com/community-scripts/ProxmoxVE/pull/14157)) + - ImmichFrame: fix update by reinstalling dotnet-sdk before publish [@MickLesk](https://github.com/MickLesk) ([#14158](https://github.com/community-scripts/ProxmoxVE/pull/14158)) + - [FIX]ShelfMark: Use UV sync for shelfmark backend build; update to Python 3.14 [@vhsdream](https://github.com/vhsdream) ([#14170](https://github.com/community-scripts/ProxmoxVE/pull/14170)) + - alpine: remove deb/ubuntu-only resource & storage checks from update-script [@MickLesk](https://github.com/MickLesk) ([#14166](https://github.com/community-scripts/ProxmoxVE/pull/14166)) + - Threadfin: use 'threadfin-app' as app name to avoid version-file clash [@MickLesk](https://github.com/MickLesk) ([#14159](https://github.com/community-scripts/ProxmoxVE/pull/14159)) + +### 💾 Core + + - #### ✨ New Features + + - core: prompt to also run installed addon update scripts (…/bin/update_*) after update_script [@MickLesk](https://github.com/MickLesk) ([#14162](https://github.com/community-scripts/ProxmoxVE/pull/14162)) + +## 2026-05-01 + +### 🆕 New Scripts + + - SoulSync ([#14124](https://github.com/community-scripts/ProxmoxVE/pull/14124)) +- Teable ([#14125](https://github.com/community-scripts/ProxmoxVE/pull/14125)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Step ca update [@heinemannj](https://github.com/heinemannj) ([#14058](https://github.com/community-scripts/ProxmoxVE/pull/14058)) + - paperless-ngx: refresh NLTK data on update [@kurtislanderson](https://github.com/kurtislanderson) ([#14144](https://github.com/community-scripts/ProxmoxVE/pull/14144)) + - [Pelican Panel] stop deleting the public storage [@LetterN](https://github.com/LetterN) ([#14145](https://github.com/community-scripts/ProxmoxVE/pull/14145)) + + - #### 🔧 Refactor + + - Mail-Archiver: update dependencies [@tremor021](https://github.com/tremor021) ([#14152](https://github.com/community-scripts/ProxmoxVE/pull/14152)) + +## 2026-04-30 + +### 🆕 New Scripts + + - Nagios ([#14126](https://github.com/community-scripts/ProxmoxVE/pull/14126)) +- Neko ([#14121](https://github.com/community-scripts/ProxmoxVE/pull/14121)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - alpine-docker: install openssl as core dependency | alpine-komodo: check & install openssl if missing [@MickLesk](https://github.com/MickLesk) ([#14134](https://github.com/community-scripts/ProxmoxVE/pull/14134)) + - endurain: update source references to Codeberg [@MickLesk](https://github.com/MickLesk) ([#14128](https://github.com/community-scripts/ProxmoxVE/pull/14128)) + +### 💾 Core + + - #### 🔧 Refactor + + - tools.func: Manage minor versions for MongoDB 8.x [@tremor021](https://github.com/tremor021) ([#14131](https://github.com/community-scripts/ProxmoxVE/pull/14131)) + +## 2026-04-29 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - GrayLog: MongoDB update to 8.2.x [@tremor021](https://github.com/tremor021) ([#14114](https://github.com/community-scripts/ProxmoxVE/pull/14114)) + - Graylog: Better information in the log file [@tremor021](https://github.com/tremor021) ([#14110](https://github.com/community-scripts/ProxmoxVE/pull/14110)) + + - #### 🔧 Refactor + + - Refactor: checkMK [@MickLesk](https://github.com/MickLesk) ([#14105](https://github.com/community-scripts/ProxmoxVE/pull/14105)) + - PatchMon: Unpin release [@tremor021](https://github.com/tremor021) ([#14097](https://github.com/community-scripts/ProxmoxVE/pull/14097)) + +### 💾 Core + + - #### 🔧 Refactor + + - core: add guidance when storage lacks rootdir support [@MickLesk](https://github.com/MickLesk) ([#14108](https://github.com/community-scripts/ProxmoxVE/pull/14108)) + +## 2026-04-28 + +### 🆕 New Scripts + + - StoryBook ([#14081](https://github.com/community-scripts/ProxmoxVE/pull/14081)) +- CoreDNS ([#14082](https://github.com/community-scripts/ProxmoxVE/pull/14082)) + +### 🚀 Updated Scripts + + - Fix Dawarich Install/Update [@Jerry1098](https://github.com/Jerry1098) ([#14078](https://github.com/community-scripts/ProxmoxVE/pull/14078)) + + - #### ✨ New Features + + - PatchMon Version 2.0.2 Script update [@9technologygroup](https://github.com/9technologygroup) ([#14095](https://github.com/community-scripts/ProxmoxVE/pull/14095)) + +## 2026-04-27 + +### 🚀 Updated Scripts + + - Add pamUsername column to userOrgs table [@JVKeller](https://github.com/JVKeller) ([#14075](https://github.com/community-scripts/ProxmoxVE/pull/14075)) + + - #### 🐞 Bug Fixes + + - Dawarich: run db:migrate before assets:precompile [@MickLesk](https://github.com/MickLesk) ([#14051](https://github.com/community-scripts/ProxmoxVE/pull/14051)) + - TechnitiumDNS: always install .NET 10 if not already present [@MickLesk](https://github.com/MickLesk) ([#14049](https://github.com/community-scripts/ProxmoxVE/pull/14049)) - #### 💥 Breaking Changes - - Migrate: DokPloy, Komodo, Coolify, Dockge, Runtipi to Addons [@MickLesk](https://github.com/MickLesk) ([#12275](https://github.com/community-scripts/ProxmoxVE/pull/12275)) + - PatchMon: v2.0.0 migration [@vhsdream](https://github.com/vhsdream) ([#14015](https://github.com/community-scripts/ProxmoxVE/pull/14015)) + +### 💾 Core - #### 🔧 Refactor - - ref: replace generic exit 1 with specific exit codes in ct & install [@MickLesk](https://github.com/MickLesk) ([#12475](https://github.com/community-scripts/ProxmoxVE/pull/12475)) - -### 💾 Core - - - #### ✨ New Features - - - tools.func: Improve stability with retry logic, caching, and debug mode [@MickLesk](https://github.com/MickLesk) ([#10351](https://github.com/community-scripts/ProxmoxVE/pull/10351)) - - - #### 🔧 Refactor - - - core: standardize exit codes and add mappings [@MickLesk](https://github.com/MickLesk) ([#12467](https://github.com/community-scripts/ProxmoxVE/pull/12467)) - -### 🌐 Website - - - frontend: improve detail view badges, addon texts, and HTML title [@MickLesk](https://github.com/MickLesk) ([#12461](https://github.com/community-scripts/ProxmoxVE/pull/12461)) - -## 2026-03-01 - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - Sparkyfitness: use pnpm [@tomfrenzel](https://github.com/tomfrenzel) ([#12445](https://github.com/community-scripts/ProxmoxVE/pull/12445)) - - OpenArchiver: Fix installation [@tremor021](https://github.com/tremor021) ([#12447](https://github.com/community-scripts/ProxmoxVE/pull/12447)) - -## 2026-02-28 - -### 🚀 Updated Scripts - - - Update Reactive Resume install script with useful .env information for reverse proxy setup [@Mazianni](https://github.com/Mazianni) ([#12401](https://github.com/community-scripts/ProxmoxVE/pull/12401)) - - - #### 🐞 Bug Fixes - - - gramps-web: install addons (FilterRules) for relationship diagram [@MickLesk](https://github.com/MickLesk) ([#12387](https://github.com/community-scripts/ProxmoxVE/pull/12387)) - - [Fix] Immich: Change `sed` command to fully replace line in postgresql.conf [@vhsdream](https://github.com/vhsdream) ([#12429](https://github.com/community-scripts/ProxmoxVE/pull/12429)) - - [FIX] Immich: fix Openvino memory leak during OCR; improve HW-accelerated ML performance [@vhsdream](https://github.com/vhsdream) ([#12426](https://github.com/community-scripts/ProxmoxVE/pull/12426)) - - Fix default tag for ioBroker LXC install [@josefglatz](https://github.com/josefglatz) ([#12423](https://github.com/community-scripts/ProxmoxVE/pull/12423)) - - Ombi: Add database.json [@hraphael](https://github.com/hraphael) ([#12412](https://github.com/community-scripts/ProxmoxVE/pull/12412)) - - Dawarich: add missing build deps and handle seed failure [@MickLesk](https://github.com/MickLesk) ([#12410](https://github.com/community-scripts/ProxmoxVE/pull/12410)) - - pangolin: increase hdd to 10G [@MickLesk](https://github.com/MickLesk) ([#12409](https://github.com/community-scripts/ProxmoxVE/pull/12409)) - - - #### ✨ New Features - - - BookLore: add additional JVM flags [@vhsdream](https://github.com/vhsdream) ([#12421](https://github.com/community-scripts/ProxmoxVE/pull/12421)) - -### 🗑️ Deleted Scripts - - - Delete Palmr [@vhsdream](https://github.com/vhsdream) ([#12399](https://github.com/community-scripts/ProxmoxVE/pull/12399)) - -### 💾 Core - - - #### 🐞 Bug Fixes - - - core: read from /dev/tty in all interactive prompts | fix empty or cropped logs due build process [@MickLesk](https://github.com/MickLesk) ([#12406](https://github.com/community-scripts/ProxmoxVE/pull/12406)) - -## 2026-02-27 - -### 🆕 New Scripts - - - Strapi ([#12320](https://github.com/community-scripts/ProxmoxVE/pull/12320)) - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - TrueNAS VM: filter out new nightlies with MASTER [@juronja](https://github.com/juronja) ([#12355](https://github.com/community-scripts/ProxmoxVE/pull/12355)) - -### 💾 Core - - - #### ✨ New Features - - - core: graceful fallback for apt-get update failures [@MickLesk](https://github.com/MickLesk) ([#12386](https://github.com/community-scripts/ProxmoxVE/pull/12386)) - - core: Improve error outputs across core functions [@MickLesk](https://github.com/MickLesk) ([#12378](https://github.com/community-scripts/ProxmoxVE/pull/12378)) - -## 2026-02-26 - -### 🆕 New Scripts - - - Kima-Hub ([#12319](https://github.com/community-scripts/ProxmoxVE/pull/12319)) - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - tools.func: update glx alternatives / nvidia alternative if nvidia glx are missing [@MickLesk](https://github.com/MickLesk) ([#12372](https://github.com/community-scripts/ProxmoxVE/pull/12372)) - - hotfix: overseer version [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12366](https://github.com/community-scripts/ProxmoxVE/pull/12366)) - - - #### ✨ New Features - - - Add ffmpeg for booklore (ffprobe) [@MickLesk](https://github.com/MickLesk) ([#12371](https://github.com/community-scripts/ProxmoxVE/pull/12371)) - - [QOL] Immich: add warning regarding library compilation time [@vhsdream](https://github.com/vhsdream) ([#12345](https://github.com/community-scripts/ProxmoxVE/pull/12345)) + - Update build.func - fixed spelling mistake [@m1ckywill](https://github.com/m1ckywill) ([#14047](https://github.com/community-scripts/ProxmoxVE/pull/14047)) ### 🧰 Tools - #### 🐞 Bug Fixes - - Improves adguardhome-sync addon when running on alpine LXCs [@Darkangeel-hd](https://github.com/Darkangeel-hd) ([#12362](https://github.com/community-scripts/ProxmoxVE/pull/12362)) + - update-lxcs/apps: avoid pct exec on containers mid-shutdown [@MickLesk](https://github.com/MickLesk) ([#14050](https://github.com/community-scripts/ProxmoxVE/pull/14050)) - #### ✨ New Features - - Add Alpine support and improve Tailscale install [@MickLesk](https://github.com/MickLesk) ([#12370](https://github.com/community-scripts/ProxmoxVE/pull/12370)) + - Add patchmon-agent report execution in update script [@heinemannj](https://github.com/heinemannj) ([#14054](https://github.com/community-scripts/ProxmoxVE/pull/14054)) -### 📚 Documentation - - - fix wrong link on contributions README.md [@Darkangeel-hd](https://github.com/Darkangeel-hd) ([#12363](https://github.com/community-scripts/ProxmoxVE/pull/12363)) - -### 📂 Github - - - github: add workflow to autom. close unauthorized new-script PRs [@MickLesk](https://github.com/MickLesk) ([#12356](https://github.com/community-scripts/ProxmoxVE/pull/12356)) - -## 2026-02-25 +## 2026-04-26 ### 🆕 New Scripts - - Zerobyte ([#12321](https://github.com/community-scripts/ProxmoxVE/pull/12321)) + - TREK ([#14017](https://github.com/community-scripts/ProxmoxVE/pull/14017)) ### 🚀 Updated Scripts + - fix(2fauth): handle stale backup directory on update [@omertahaoztop](https://github.com/omertahaoztop) ([#14018](https://github.com/community-scripts/ProxmoxVE/pull/14018)) + - #### 🐞 Bug Fixes - - fix: overseer migration [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12340](https://github.com/community-scripts/ProxmoxVE/pull/12340)) - - add: vikunja: daemon reload [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12323](https://github.com/community-scripts/ProxmoxVE/pull/12323)) - - opnsense-VM: Use ip link to verify bridge existence [@MickLesk](https://github.com/MickLesk) ([#12329](https://github.com/community-scripts/ProxmoxVE/pull/12329)) - - wger: Use $http_host for proxy Host header [@MickLesk](https://github.com/MickLesk) ([#12327](https://github.com/community-scripts/ProxmoxVE/pull/12327)) - - Passbolt: Update Nginx config `client_max_body_size` [@tremor021](https://github.com/tremor021) ([#12313](https://github.com/community-scripts/ProxmoxVE/pull/12313)) - - Zammad: configure Elasticsearch before zammad start [@MickLesk](https://github.com/MickLesk) ([#12308](https://github.com/community-scripts/ProxmoxVE/pull/12308)) - - - #### 🔧 Refactor - - - OpenProject: Various fixes [@tremor021](https://github.com/tremor021) ([#12246](https://github.com/community-scripts/ProxmoxVE/pull/12246)) + - Increase Frigate default CPU cores from 4 to 8 [@MickLesk](https://github.com/MickLesk) ([#14039](https://github.com/community-scripts/ProxmoxVE/pull/14039)) + - Technitium DNS: Ensure directories exist before running service [@tremor021](https://github.com/tremor021) ([#14030](https://github.com/community-scripts/ProxmoxVE/pull/14030)) ### 💾 Core - #### 🐞 Bug Fixes - - Fix detection of ssh keys [@1-tempest](https://github.com/1-tempest) ([#12230](https://github.com/community-scripts/ProxmoxVE/pull/12230)) + - core: Correct deb822 repository flat path detection [@MickLesk](https://github.com/MickLesk) ([#14037](https://github.com/community-scripts/ProxmoxVE/pull/14037)) - - #### ✨ New Features - - - tools.func: Improve GitHub/Codeberg API error handling and error output [@MickLesk](https://github.com/MickLesk) ([#12330](https://github.com/community-scripts/ProxmoxVE/pull/12330)) - - - #### 🔧 Refactor - - - core: remove duplicate traps, consolidate error handling and harden signal traps [@MickLesk](https://github.com/MickLesk) ([#12316](https://github.com/community-scripts/ProxmoxVE/pull/12316)) - -### 📂 Github - - - github: improvements for node drift wf [@MickLesk](https://github.com/MickLesk) ([#12309](https://github.com/community-scripts/ProxmoxVE/pull/12309)) - -## 2026-02-24 +## 2026-04-25 ### 🚀 Updated Scripts - - several scripts: add additional github link in source [@MickLesk](https://github.com/MickLesk) ([#12282](https://github.com/community-scripts/ProxmoxVE/pull/12282)) -- adds further documentation during the installation script. [@d12rio](https://github.com/d12rio) ([#12248](https://github.com/community-scripts/ProxmoxVE/pull/12248)) + - #### 🐞 Bug Fixes + + - VictoriaMetrics: Stop vmagent/vmalert before update [@irishpadres](https://github.com/irishpadres) ([#14016](https://github.com/community-scripts/ProxmoxVE/pull/14016)) + - Domain-Monitor: start apache2 after stop instead of reload [@omertahaoztop](https://github.com/omertahaoztop) ([#14019](https://github.com/community-scripts/ProxmoxVE/pull/14019)) + - Transmute: Fix ffmpeg detection [@tremor021](https://github.com/tremor021) ([#14008](https://github.com/community-scripts/ProxmoxVE/pull/14008)) + + - #### 🔧 Refactor + + - Refactor: Technitium DNS [@tremor021](https://github.com/tremor021) ([#14013](https://github.com/community-scripts/ProxmoxVE/pull/14013)) + +## 2026-04-24 + +### 🆕 New Scripts + + - Apprise-API ([#13934](https://github.com/community-scripts/ProxmoxVE/pull/13934)) +- fireshare ([#13995](https://github.com/community-scripts/ProxmoxVE/pull/13995)) +- Transmute ([#13935](https://github.com/community-scripts/ProxmoxVE/pull/13935)) +- Jitsi-Meet ([#13897](https://github.com/community-scripts/ProxmoxVE/pull/13897)) + +### 🚀 Updated Scripts + + - Update wger.sh [@Soppster1029](https://github.com/Soppster1029) ([#13977](https://github.com/community-scripts/ProxmoxVE/pull/13977)) + + - #### 🔧 Refactor + + - Refactor: Ghostfolio [@MickLesk](https://github.com/MickLesk) ([#13990](https://github.com/community-scripts/ProxmoxVE/pull/13990)) + +## 2026-04-23 + +### 🚀 Updated Scripts - #### 🐞 Bug Fixes - - [Fix] PatchMon: remove VITE_API_URL from frontend env [@vhsdream](https://github.com/vhsdream) ([#12294](https://github.com/community-scripts/ProxmoxVE/pull/12294)) - - fix(searxng): remove orphaned fi causing syntax error [@mark-jeffrey](https://github.com/mark-jeffrey) ([#12283](https://github.com/community-scripts/ProxmoxVE/pull/12283)) - - Refactor n8n [@MickLesk](https://github.com/MickLesk) ([#12264](https://github.com/community-scripts/ProxmoxVE/pull/12264)) - - Firefly: PHP bump [@tremor021](https://github.com/tremor021) ([#12247](https://github.com/community-scripts/ProxmoxVE/pull/12247)) + - mealie: start.sh missing after failed update [@MickLesk](https://github.com/MickLesk) ([#13958](https://github.com/community-scripts/ProxmoxVE/pull/13958)) + - twingate-connector: perform real apt upgrade during update flow [@MickLesk](https://github.com/MickLesk) ([#13959](https://github.com/community-scripts/ProxmoxVE/pull/13959)) - #### ✨ New Features - - Databasus: add mariadb path for mysql/mariadb backups | add mongodb database tools [@MickLesk](https://github.com/MickLesk) ([#12259](https://github.com/community-scripts/ProxmoxVE/pull/12259)) - - make searxng updateable [@shtefko](https://github.com/shtefko) ([#12207](https://github.com/community-scripts/ProxmoxVE/pull/12207)) + - core: auto-size NODE_OPTIONS heap [@MickLesk](https://github.com/MickLesk) ([#13960](https://github.com/community-scripts/ProxmoxVE/pull/13960)) + + - #### 🔧 Refactor + + - Update scripts to match standard [@tremor021](https://github.com/tremor021) ([#13956](https://github.com/community-scripts/ProxmoxVE/pull/13956)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - tools.func: upgrade Node.js minor/patch on same major version [@MickLesk](https://github.com/MickLesk) ([#13957](https://github.com/community-scripts/ProxmoxVE/pull/13957)) + - core: hotfix - prefer silent mode on PHS env conflict [@MickLesk](https://github.com/MickLesk) ([#13951](https://github.com/community-scripts/ProxmoxVE/pull/13951)) + + - #### 🔧 Refactor + + - core: improve system update information / lxc stack upgrade [@MickLesk](https://github.com/MickLesk) ([#13970](https://github.com/community-scripts/ProxmoxVE/pull/13970)) + +## 2026-04-22 + +### 🆕 New Scripts + + - Dashy ([#13817](https://github.com/community-scripts/ProxmoxVE/pull/13817)) +- Mini-QR ([#13902](https://github.com/community-scripts/ProxmoxVE/pull/13902)) +- ownfoil ([#13904](https://github.com/community-scripts/ProxmoxVE/pull/13904)) +- ERPNext ([#13921](https://github.com/community-scripts/ProxmoxVE/pull/13921)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - add --clear to uv venv in update_script() to prevent interactive prompt [@MickLesk](https://github.com/MickLesk) ([#13926](https://github.com/community-scripts/ProxmoxVE/pull/13926)) + +### 💾 Core + + - #### ✨ New Features + + - core: Add PHS_VERBOSE env var to skip verbose mode prompts [@gormanity](https://github.com/gormanity) ([#13797](https://github.com/community-scripts/ProxmoxVE/pull/13797)) + +## 2026-04-21 + +### 🆕 New Scripts + + - gogs ([#13896](https://github.com/community-scripts/ProxmoxVE/pull/13896)) +- anchor ([#13895](https://github.com/community-scripts/ProxmoxVE/pull/13895)) +- minthcm ([#13903](https://github.com/community-scripts/ProxmoxVE/pull/13903)) +- foldergram ([#13900](https://github.com/community-scripts/ProxmoxVE/pull/13900)) + +### 🚀 Updated Scripts + + - OpenCloud: Pin version to 6.1.0 [@vhsdream](https://github.com/vhsdream) ([#13890](https://github.com/community-scripts/ProxmoxVE/pull/13890)) + + - #### 🐞 Bug Fixes + + - Domain-Locker: Update dependencies [@tremor021](https://github.com/tremor021) ([#13901](https://github.com/community-scripts/ProxmoxVE/pull/13901)) + - homelable: fix install failure by correcting password-reset chmod target [@Copilot](https://github.com/Copilot) ([#13894](https://github.com/community-scripts/ProxmoxVE/pull/13894)) + + - #### ✨ New Features + + - FileFlows: Update dependencies [@tremor021](https://github.com/tremor021) ([#13917](https://github.com/community-scripts/ProxmoxVE/pull/13917)) + +## 2026-04-20 + +### 🆕 New Scripts + + - WhoDB ([#13880](https://github.com/community-scripts/ProxmoxVE/pull/13880)) + +### 🚀 Updated Scripts + + - pangolin: create migration tables before data transfer to prevent role loss [@MickLesk](https://github.com/MickLesk) ([#13874](https://github.com/community-scripts/ProxmoxVE/pull/13874)) + + - #### 🐞 Bug Fixes + + - Pangolin: pre-apply schema migrations to prevent data loss [@MickLesk](https://github.com/MickLesk) ([#13861](https://github.com/community-scripts/ProxmoxVE/pull/13861)) + - ActualBudget: change migration messages to warnings [@MickLesk](https://github.com/MickLesk) ([#13860](https://github.com/community-scripts/ProxmoxVE/pull/13860)) + - slskd: migrate config keys for 0.25.0 breaking change [@MickLesk](https://github.com/MickLesk) ([#13862](https://github.com/community-scripts/ProxmoxVE/pull/13862)) + + - #### ✨ New Features + + - Wanderer: add pocketbase CLI wrapper with env [@MickLesk](https://github.com/MickLesk) ([#13863](https://github.com/community-scripts/ProxmoxVE/pull/13863)) + - feat(homelable): add password reset utility script [@davidsoncabista](https://github.com/davidsoncabista) ([#13798](https://github.com/community-scripts/ProxmoxVE/pull/13798)) + + - #### 🔧 Refactor + + - Several Scripts: Bump NodeJS to align Node.js versions with upstream for 5 scripts [@MickLesk](https://github.com/MickLesk) ([#13875](https://github.com/community-scripts/ProxmoxVE/pull/13875)) + - Refactor: PMG Post Install [@MickLesk](https://github.com/MickLesk) ([#13693](https://github.com/community-scripts/ProxmoxVE/pull/13693)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - core: detect Perl breakage after LXC stack upgrade and improve storage validation [@MickLesk](https://github.com/MickLesk) ([#13879](https://github.com/community-scripts/ProxmoxVE/pull/13879)) + +## 2026-04-19 + +### 🆕 New Scripts + + - nametag ([#13849](https://github.com/community-scripts/ProxmoxVE/pull/13849)) + +## 2026-04-18 + +### 🆕 New Scripts + + - Dagu ([#13830](https://github.com/community-scripts/ProxmoxVE/pull/13830)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - BabyBuddy: set DJANGO_SETTINGS_MODULE before migrate in update [@MickLesk](https://github.com/MickLesk) ([#13836](https://github.com/community-scripts/ProxmoxVE/pull/13836)) + - litellm: add prisma generate and use venv binary directly [@MickLesk](https://github.com/MickLesk) ([#13835](https://github.com/community-scripts/ProxmoxVE/pull/13835)) + - yamtrack: add missing nginx.conf sed edits to update script [@MickLesk](https://github.com/MickLesk) ([#13834](https://github.com/community-scripts/ProxmoxVE/pull/13834)) + +### 🧰 Tools + + - #### 🐞 Bug Fixes + + - SparkyFitness Garmin Microservice: fix update function [@tomfrenzel](https://github.com/tomfrenzel) ([#13824](https://github.com/community-scripts/ProxmoxVE/pull/13824)) + + - #### 🔧 Refactor + + - Clean-Orphan-LVM: check all cluster nodes for VM/CT configs [@MickLesk](https://github.com/MickLesk) ([#13837](https://github.com/community-scripts/ProxmoxVE/pull/13837)) + +## 2026-04-17 + +### 🆕 New Scripts + + - step-ca ([#13775](https://github.com/community-scripts/ProxmoxVE/pull/13775)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - core: pin IGC version to compute-runtime compatible tag (Intel GPU) [@MickLesk](https://github.com/MickLesk) ([#13814](https://github.com/community-scripts/ProxmoxVE/pull/13814)) + - Fix for bambuddy community script update [@abbasegbeyemi](https://github.com/abbasegbeyemi) ([#13816](https://github.com/community-scripts/ProxmoxVE/pull/13816)) + - Umami: Fix update procedure [@tremor021](https://github.com/tremor021) ([#13807](https://github.com/community-scripts/ProxmoxVE/pull/13807)) + +### 💾 Core + + - #### 🐞 Bug Fixes + + - core: sanitize mount_fs input — strip spaces and trailing commas [@MickLesk](https://github.com/MickLesk) ([#13806](https://github.com/community-scripts/ProxmoxVE/pull/13806)) + + - #### 🔧 Refactor + + - core: fix some pct create issues (telemetry) + cleanup [@MickLesk](https://github.com/MickLesk) ([#13810](https://github.com/community-scripts/ProxmoxVE/pull/13810)) + +## 2026-04-16 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Add pnpm as a dependency to ghost-cli install [@YourFavoriteKyle](https://github.com/YourFavoriteKyle) ([#13789](https://github.com/community-scripts/ProxmoxVE/pull/13789)) + +### 💾 Core + + - #### ✨ New Features + + - core: wire ENABLE_MKNOD and ALLOW_MOUNT_FS into LXC features [@MickLesk](https://github.com/MickLesk) ([#13796](https://github.com/community-scripts/ProxmoxVE/pull/13796)) + +## 2026-04-15 + +### 🆕 New Scripts + + - iGotify ([#13773](https://github.com/community-scripts/ProxmoxVE/pull/13773)) +- GitHub-Runner ([#13709](https://github.com/community-scripts/ProxmoxVE/pull/13709)) +- Revert "Remove low-install-count CT scripts and installers (#13570)" [@CrazyWolf13](https://github.com/CrazyWolf13) ([#13752](https://github.com/community-scripts/ProxmoxVE/pull/13752)) + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - [alpine-nextcloud] Update Nginx MIME types to support .mjs files [@GuiltyFox](https://github.com/GuiltyFox) ([#13771](https://github.com/community-scripts/ProxmoxVE/pull/13771)) + - Domain Monitor: Fix file ownership after update [@tremor021](https://github.com/tremor021) ([#13759](https://github.com/community-scripts/ProxmoxVE/pull/13759)) - #### 💥 Breaking Changes - - fix: wealthfolio for v3 [@CrazyWolf13](https://github.com/CrazyWolf13) ([#11765](https://github.com/community-scripts/ProxmoxVE/pull/11765)) + - Reitti: refactor scripts for v4 - remove RabbitMQ and Photon [@MickLesk](https://github.com/MickLesk) ([#13728](https://github.com/community-scripts/ProxmoxVE/pull/13728)) - #### 🔧 Refactor - - bump various scripts from Node 22 to 24 [@MickLesk](https://github.com/MickLesk) ([#12265](https://github.com/community-scripts/ProxmoxVE/pull/12265)) - -### 💾 Core - - - #### 🐞 Bug Fixes - - - core: fix broken "command not found" after err_trap [@MickLesk](https://github.com/MickLesk) ([#12280](https://github.com/community-scripts/ProxmoxVE/pull/12280)) - - - #### ✨ New Features - - - tools.func: add get_latest_gh_tag helper function [@MickLesk](https://github.com/MickLesk) ([#12261](https://github.com/community-scripts/ProxmoxVE/pull/12261)) - -### 🧰 Tools - - - Arcane ([#12263](https://github.com/community-scripts/ProxmoxVE/pull/12263)) - -### 📂 Github - - - github: add weekly Node.js version drift check workflow [@MickLesk](https://github.com/MickLesk) ([#12267](https://github.com/community-scripts/ProxmoxVE/pull/12267)) -- add: workflow to close stale PRs [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12243](https://github.com/community-scripts/ProxmoxVE/pull/12243)) - -## 2026-02-23 - -### 🆕 New Scripts - - - SeaweedFS ([#12220](https://github.com/community-scripts/ProxmoxVE/pull/12220)) -- Sonobarr ([#12221](https://github.com/community-scripts/ProxmoxVE/pull/12221)) -- SparkyFitness ([#12185](https://github.com/community-scripts/ProxmoxVE/pull/12185)) -- Frigate v16.4 [@MickLesk](https://github.com/MickLesk) ([#11887](https://github.com/community-scripts/ProxmoxVE/pull/11887)) - -### 🚀 Updated Scripts - - - #### ✨ New Features - - - memos: unpin version due new release artifacts [@MickLesk](https://github.com/MickLesk) ([#12224](https://github.com/community-scripts/ProxmoxVE/pull/12224)) - - core: Enhance signal handling, reported "status" and logs [@MickLesk](https://github.com/MickLesk) ([#12216](https://github.com/community-scripts/ProxmoxVE/pull/12216)) - - - #### 🔧 Refactor - - - booklore v2: embed frontend, bump Java to 25, remove nginx [@MickLesk](https://github.com/MickLesk) ([#12223](https://github.com/community-scripts/ProxmoxVE/pull/12223)) - -### 🗑️ Deleted Scripts - - - Remove: Huntarr (deprecated & Security) [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#12226](https://github.com/community-scripts/ProxmoxVE/pull/12226)) - -### 💾 Core - - - #### 🔧 Refactor - - - core: Improve error handling and logging for LXC builds [@MickLesk](https://github.com/MickLesk) ([#12208](https://github.com/community-scripts/ProxmoxVE/pull/12208)) - -### 🌐 Website - - - #### 🐞 Bug Fixes - - - calibre-web: update default credentials [@LaevaertK](https://github.com/LaevaertK) ([#12201](https://github.com/community-scripts/ProxmoxVE/pull/12201)) - - - #### 📝 Script Information - - - chore: update Frigate documentation and website URLs [@JohnICB](https://github.com/JohnICB) ([#12218](https://github.com/community-scripts/ProxmoxVE/pull/12218)) - -## 2026-02-22 - -### 🆕 New Scripts - - - Gramps-Web ([#12157](https://github.com/community-scripts/ProxmoxVE/pull/12157)) - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - fix: Apache Guacamole - bump to Temurin JDK 17 to resolve Debian 13 (Trixie) install failure [@Copilot](https://github.com/Copilot) ([#12161](https://github.com/community-scripts/ProxmoxVE/pull/12161)) - - Docker-VM: add error handling for virt-customize finalization [@MickLesk](https://github.com/MickLesk) ([#12127](https://github.com/community-scripts/ProxmoxVE/pull/12127)) - - [Fix] Sure: add Sidekiq service [@vhsdream](https://github.com/vhsdream) ([#12186](https://github.com/community-scripts/ProxmoxVE/pull/12186)) - - - #### ✨ New Features - - - Refactor & Bump to v2: Plex [@MickLesk](https://github.com/MickLesk) ([#12179](https://github.com/community-scripts/ProxmoxVE/pull/12179)) - - - #### 🔧 Refactor - - - karakeep: bump to node 24 [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12183](https://github.com/community-scripts/ProxmoxVE/pull/12183)) - -### 💾 Core - - - #### ✨ New Features - - - tools.func: add GitHub API rate-limit detection and GITHUB_TOKEN support [@MickLesk](https://github.com/MickLesk) ([#12176](https://github.com/community-scripts/ProxmoxVE/pull/12176)) - -### 🧰 Tools - - - CR*NMASTER ([#12065](https://github.com/community-scripts/ProxmoxVE/pull/12065)) - - - #### 🔧 Refactor - - - Update package management commands in clean-lxcs.sh [@heinemannj](https://github.com/heinemannj) ([#12166](https://github.com/community-scripts/ProxmoxVE/pull/12166)) - -### ❔ Uncategorized - - - calibre-web: Update logo URL [@MickLesk](https://github.com/MickLesk) ([#12178](https://github.com/community-scripts/ProxmoxVE/pull/12178)) - -## 2026-02-21 - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - Pangolin: restore config before db migration, use drizzle-kit push [@MickLesk](https://github.com/MickLesk) ([#12130](https://github.com/community-scripts/ProxmoxVE/pull/12130)) - - PLANKA: fix msg's [@danielalanbates](https://github.com/danielalanbates) ([#12143](https://github.com/community-scripts/ProxmoxVE/pull/12143)) - -### 🌐 Website - - - #### 📝 Script Information - - - MediaManager: Update documentation URL [@tremor021](https://github.com/tremor021) ([#12154](https://github.com/community-scripts/ProxmoxVE/pull/12154)) - -## 2026-02-20 - -### 🆕 New Scripts - - - Sure ([#12114](https://github.com/community-scripts/ProxmoxVE/pull/12114)) -- Calibre-Web ([#12115](https://github.com/community-scripts/ProxmoxVE/pull/12115)) - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - Zammad: fix Elasticsearch JVM config and add daemon-reload [@MickLesk](https://github.com/MickLesk) ([#12125](https://github.com/community-scripts/ProxmoxVE/pull/12125)) - - Huntarr: add build-essential for native pip dependencies [@MickLesk](https://github.com/MickLesk) ([#12126](https://github.com/community-scripts/ProxmoxVE/pull/12126)) - - Dokploy: fix update function [@vhsdream](https://github.com/vhsdream) ([#12116](https://github.com/community-scripts/ProxmoxVE/pull/12116)) - - - #### 💥 Breaking Changes - - - recyclarr: adjust paths for v8.0 breaking changes [@MickLesk](https://github.com/MickLesk) ([#12129](https://github.com/community-scripts/ProxmoxVE/pull/12129)) - - - #### 🔧 Refactor - - - Planka: migrate data paths to new v2 directory structure [@MickLesk](https://github.com/MickLesk) ([#12128](https://github.com/community-scripts/ProxmoxVE/pull/12128)) - -### 🌐 Website - - - #### 📝 Script Information - - - fixen broken link to dawarich documentation [@RiX012](https://github.com/RiX012) ([#12103](https://github.com/community-scripts/ProxmoxVE/pull/12103)) - -## 2026-02-19 - -### 🆕 New Scripts - - - TrueNAS-VM ([#12059](https://github.com/community-scripts/ProxmoxVE/pull/12059)) - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - add: patchmon breaking change msg [@CrazyWolf13](https://github.com/CrazyWolf13) ([#12075](https://github.com/community-scripts/ProxmoxVE/pull/12075)) - - LibreNMS: Various fixes [@tremor021](https://github.com/tremor021) ([#12089](https://github.com/community-scripts/ProxmoxVE/pull/12089)) - -### 🌐 Website - - - #### 📝 Script Information - - - truenas-vm: slug fix for source code link [@juronja](https://github.com/juronja) ([#12088](https://github.com/community-scripts/ProxmoxVE/pull/12088)) - -## 2026-02-18 - -### 🚀 Updated Scripts - - - #### 💥 Breaking Changes - - - [Fix] PatchMon: use `SERVER_PORT` in Nginx config if set in env [@vhsdream](https://github.com/vhsdream) ([#12053](https://github.com/community-scripts/ProxmoxVE/pull/12053)) - -### 💾 Core - - - #### ✨ New Features - - - core: Execution ID & Telemetry Improvements [@MickLesk](https://github.com/MickLesk) ([#12041](https://github.com/community-scripts/ProxmoxVE/pull/12041)) - -## 2026-02-17 - -### 🆕 New Scripts - - - Databasus ([#12018](https://github.com/community-scripts/ProxmoxVE/pull/12018)) - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - [Hotfix] Cleanuparr: backup config before update [@vhsdream](https://github.com/vhsdream) ([#12039](https://github.com/community-scripts/ProxmoxVE/pull/12039)) - - fix: pterodactyl-panel add symlink [@CrazyWolf13](https://github.com/CrazyWolf13) ([#11997](https://github.com/community-scripts/ProxmoxVE/pull/11997)) - -### 💾 Core - - - #### 🐞 Bug Fixes - - - core: call get_lxc_ip in start() before updates [@MickLesk](https://github.com/MickLesk) ([#12015](https://github.com/community-scripts/ProxmoxVE/pull/12015)) - - - #### ✨ New Features - - - tools/pve: add data analytics / formatting / linting [@MickLesk](https://github.com/MickLesk) ([#12034](https://github.com/community-scripts/ProxmoxVE/pull/12034)) - - core: smart recovery for failed installs | extend exit_codes [@MickLesk](https://github.com/MickLesk) ([#11221](https://github.com/community-scripts/ProxmoxVE/pull/11221)) - - - #### 🔧 Refactor - - - core: error-handler improvements | better exit_code handling | better tools.func source check [@MickLesk](https://github.com/MickLesk) ([#12019](https://github.com/community-scripts/ProxmoxVE/pull/12019)) - -### 🧰 Tools - - - #### 🔧 Refactor - - - Immich Public Proxy: centralize and fix systemd service creation [@MickLesk](https://github.com/MickLesk) ([#12025](https://github.com/community-scripts/ProxmoxVE/pull/12025)) + - Semaphore: add BoltDB to SQLite migration [@tremor021](https://github.com/tremor021) ([#13779](https://github.com/community-scripts/ProxmoxVE/pull/13779)) ### 📚 Documentation - - fix contribution/setup-fork [@andreasabeck](https://github.com/andreasabeck) ([#12047](https://github.com/community-scripts/ProxmoxVE/pull/12047)) + - cleanup: remove docs/, update README & CONTRIBUTING, fix repo config [@MickLesk](https://github.com/MickLesk) ([#13770](https://github.com/community-scripts/ProxmoxVE/pull/13770)) -## 2026-02-16 - -### 🆕 New Scripts - - - RomM ([#11987](https://github.com/community-scripts/ProxmoxVE/pull/11987)) -- LinkDing ([#11976](https://github.com/community-scripts/ProxmoxVE/pull/11976)) +## 2026-04-14 ### 🚀 Updated Scripts - - Opencloud: Pin version to 5.1.0 [@vhsdream](https://github.com/vhsdream) ([#12004](https://github.com/community-scripts/ProxmoxVE/pull/12004)) + - Immich: Pin photo-processing library revisions [@vhsdream](https://github.com/vhsdream) ([#13748](https://github.com/community-scripts/ProxmoxVE/pull/13748)) - #### 🐞 Bug Fixes - - Tududi: Fix sed command for DB_FILE configuration [@tremor021](https://github.com/tremor021) ([#11988](https://github.com/community-scripts/ProxmoxVE/pull/11988)) - - slskd: fix exit position [@MickLesk](https://github.com/MickLesk) ([#11963](https://github.com/community-scripts/ProxmoxVE/pull/11963)) - - cryptpad: restore config earlier and run onlyoffice upgrade [@MickLesk](https://github.com/MickLesk) ([#11964](https://github.com/community-scripts/ProxmoxVE/pull/11964)) - - jellyseerr/overseerr: Migrate update script to Seerr; prompt rerun [@MickLesk](https://github.com/MickLesk) ([#11965](https://github.com/community-scripts/ProxmoxVE/pull/11965)) - - - #### 🔧 Refactor - - - core/vm's: ensure script state is sent on script exit [@MickLesk](https://github.com/MickLesk) ([#11991](https://github.com/community-scripts/ProxmoxVE/pull/11991)) - - Vaultwarden: export VW_VERSION as version number [@MickLesk](https://github.com/MickLesk) ([#11966](https://github.com/community-scripts/ProxmoxVE/pull/11966)) - - Zabbix: Improve zabbix-agent service detection [@MickLesk](https://github.com/MickLesk) ([#11968](https://github.com/community-scripts/ProxmoxVE/pull/11968)) - -### 💾 Core + - BentoPDF: Nginx fixes [@tremor021](https://github.com/tremor021) ([#13741](https://github.com/community-scripts/ProxmoxVE/pull/13741)) + - Zerobyte: add git to dependencies to fix bun install failure [@Copilot](https://github.com/Copilot) ([#13721](https://github.com/community-scripts/ProxmoxVE/pull/13721)) + - alpine-nextcloud-install: do not use deprecated nginx config [@AlexanderStein](https://github.com/AlexanderStein) ([#13726](https://github.com/community-scripts/ProxmoxVE/pull/13726)) - #### ✨ New Features - - tools.func: ensure /usr/local/bin PATH persists for pct enter sessions [@MickLesk](https://github.com/MickLesk) ([#11970](https://github.com/community-scripts/ProxmoxVE/pull/11970)) + - Mealie: support v3.15+ Nuxt 4 migration [@MickLesk](https://github.com/MickLesk) ([#13731](https://github.com/community-scripts/ProxmoxVE/pull/13731)) - #### 🔧 Refactor - - core: remove duplicate error handler from alpine-install.func [@MickLesk](https://github.com/MickLesk) ([#11971](https://github.com/community-scripts/ProxmoxVE/pull/11971)) + - Lyrion: correct service name and version file in update script [@MickLesk](https://github.com/MickLesk) ([#13734](https://github.com/community-scripts/ProxmoxVE/pull/13734)) + - Changedetection: move env vars from service file to .env [@tremor021](https://github.com/tremor021) ([#13732](https://github.com/community-scripts/ProxmoxVE/pull/13732)) -### 📂 Github - - - github: add "website" label if "json" changed [@MickLesk](https://github.com/MickLesk) ([#11975](https://github.com/community-scripts/ProxmoxVE/pull/11975)) - -### 🌐 Website - - - #### 📝 Script Information - - - Update Wishlist LXC webpage to include reverse proxy info [@summoningpixels](https://github.com/summoningpixels) ([#11973](https://github.com/community-scripts/ProxmoxVE/pull/11973)) - - Update OpenCloud LXC webpage to include services ports [@summoningpixels](https://github.com/summoningpixels) ([#11969](https://github.com/community-scripts/ProxmoxVE/pull/11969)) - -## 2026-02-15 - -### 🆕 New Scripts - - - ebusd ([#11942](https://github.com/community-scripts/ProxmoxVE/pull/11942)) -- add: seer script and migrations [@CrazyWolf13](https://github.com/CrazyWolf13) ([#11930](https://github.com/community-scripts/ProxmoxVE/pull/11930)) +## 2026-04-13 ### 🚀 Updated Scripts - #### 🐞 Bug Fixes - - Fix seerr URL in jellyseerr script [@lucacome](https://github.com/lucacome) ([#11951](https://github.com/community-scripts/ProxmoxVE/pull/11951)) - - Fix jellyseer and overseer script replacement [@lucacome](https://github.com/lucacome) ([#11949](https://github.com/community-scripts/ProxmoxVE/pull/11949)) - - Tautulli: Add setuptools < 81 [@tremor021](https://github.com/tremor021) ([#11943](https://github.com/community-scripts/ProxmoxVE/pull/11943)) + - Slskd: Remove stale Soularr lock file on startup and redirect logs to stderr [@MickLesk](https://github.com/MickLesk) ([#13669](https://github.com/community-scripts/ProxmoxVE/pull/13669)) + - Bambuddy: preserve database and archive on update [@Copilot](https://github.com/Copilot) ([#13706](https://github.com/community-scripts/ProxmoxVE/pull/13706)) + + - #### ✨ New Features + + - Immich: Pin version to 2.7.5 [@vhsdream](https://github.com/vhsdream) ([#13715](https://github.com/community-scripts/ProxmoxVE/pull/13715)) + - Bytestash: auto backup/restore data on update [@MickLesk](https://github.com/MickLesk) ([#13707](https://github.com/community-scripts/ProxmoxVE/pull/13707)) + - OpenCloud: pin version to 6.0.0 [@vhsdream](https://github.com/vhsdream) ([#13691](https://github.com/community-scripts/ProxmoxVE/pull/13691)) - #### 💥 Breaking Changes - - Refactor: Patchmon [@vhsdream](https://github.com/vhsdream) ([#11888](https://github.com/community-scripts/ProxmoxVE/pull/11888)) + - Mealie: pin version to v3.14.0 in install and update scripts [@Copilot](https://github.com/Copilot) ([#13724](https://github.com/community-scripts/ProxmoxVE/pull/13724)) -## 2026-02-14 + - #### 🔧 Refactor + + - core: remove unused TEMP_DIR mktemp leak in build_container / clean sonarqube [@MickLesk](https://github.com/MickLesk) ([#13708](https://github.com/community-scripts/ProxmoxVE/pull/13708)) + +## 2026-04-12 ### 🚀 Updated Scripts - #### 🐞 Bug Fixes - - Increase disk allocation for OpenWebUI and Ollama to prevent installation failures [@Copilot](https://github.com/Copilot) ([#11920](https://github.com/community-scripts/ProxmoxVE/pull/11920)) + - Alpine-Wakapi: Remove container checks in update_script function [@MickLesk](https://github.com/MickLesk) ([#13694](https://github.com/community-scripts/ProxmoxVE/pull/13694)) + + - #### 🔧 Refactor + + - IronClaw: Install keychain dependencies and launch in a DBus session [@MickLesk](https://github.com/MickLesk) ([#13692](https://github.com/community-scripts/ProxmoxVE/pull/13692)) + - MeTube: Allow pnpm build scripts to fix ERR_PNPM_IGNORED_BUILDS [@MickLesk](https://github.com/MickLesk) ([#13668](https://github.com/community-scripts/ProxmoxVE/pull/13668)) + +## 2026-04-11 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - Immich: Ensure newline before appending IMMICH_HELMET_FILE to .env [@MickLesk](https://github.com/MickLesk) ([#13667](https://github.com/community-scripts/ProxmoxVE/pull/13667)) + + - #### ✨ New Features + + - BentoPDF: replace http-server with nginx to fix WASM initialization timeout [@MickLesk](https://github.com/MickLesk) ([#13625](https://github.com/community-scripts/ProxmoxVE/pull/13625)) + - Element Synapse: Add MatrixRTC configuration for Element Call support [@MickLesk](https://github.com/MickLesk) ([#13665](https://github.com/community-scripts/ProxmoxVE/pull/13665)) + - RomM: Use ROMM_BASE_PATH from .env for symlinks and nginx config [@MickLesk](https://github.com/MickLesk) ([#13666](https://github.com/community-scripts/ProxmoxVE/pull/13666)) + - Immich: Pin version to 2.7.4 [@vhsdream](https://github.com/vhsdream) ([#13661](https://github.com/community-scripts/ProxmoxVE/pull/13661)) + + - #### 🔧 Refactor + + - Crafty Controller: Wait for credentials file instead of fixed sleep [@MickLesk](https://github.com/MickLesk) ([#13670](https://github.com/community-scripts/ProxmoxVE/pull/13670)) + - Refactor: Alpine-Wakapi [@tremor021](https://github.com/tremor021) ([#13656](https://github.com/community-scripts/ProxmoxVE/pull/13656)) + +## 2026-04-10 + +### 🚀 Updated Scripts + + - #### 🐞 Bug Fixes + + - fix: ensure trailing newline in redis.conf before appending bind directive [@Copilot](https://github.com/Copilot) ([#13647](https://github.com/community-scripts/ProxmoxVE/pull/13647)) + + - #### ✨ New Features + + - Immich: Pin version to 2.7.3 [@vhsdream](https://github.com/vhsdream) ([#13631](https://github.com/community-scripts/ProxmoxVE/pull/13631)) + - Homarr: bind Redis to localhost only [@MickLesk](https://github.com/MickLesk) ([#13552](https://github.com/community-scripts/ProxmoxVE/pull/13552)) ### 💾 Core - #### 🐞 Bug Fixes - - core: handle missing RAM speed in nested VMs [@MickLesk](https://github.com/MickLesk) ([#11913](https://github.com/community-scripts/ProxmoxVE/pull/11913)) - - - #### ✨ New Features - - - core: overwriteable app version [@CrazyWolf13](https://github.com/CrazyWolf13) ([#11753](https://github.com/community-scripts/ProxmoxVE/pull/11753)) - - core: validate container IDs cluster-wide across all nodes [@MickLesk](https://github.com/MickLesk) ([#11906](https://github.com/community-scripts/ProxmoxVE/pull/11906)) - - core: improve error reporting with structured error strings and better categorization + output formatting [@MickLesk](https://github.com/MickLesk) ([#11907](https://github.com/community-scripts/ProxmoxVE/pull/11907)) - - core: unified logging system with combined logs [@MickLesk](https://github.com/MickLesk) ([#11761](https://github.com/community-scripts/ProxmoxVE/pull/11761)) + - tools.func: prevent script crash when entering GitHub token after rate limit [@MickLesk](https://github.com/MickLesk) ([#13638](https://github.com/community-scripts/ProxmoxVE/pull/13638)) ### 🧰 Tools - - lxc-updater: add patchmon aware [@failure101](https://github.com/failure101) ([#11905](https://github.com/community-scripts/ProxmoxVE/pull/11905)) - -### 🌐 Website - - - #### 📝 Script Information - - - Disable UniFi script - APT packages no longer available [@Copilot](https://github.com/Copilot) ([#11898](https://github.com/community-scripts/ProxmoxVE/pull/11898)) - -## 2026-02-13 - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - OpenWebUI: pin numba constraint [@MickLesk](https://github.com/MickLesk) ([#11874](https://github.com/community-scripts/ProxmoxVE/pull/11874)) - - Planka: add migrate step to update function [@ZimmermannLeon](https://github.com/ZimmermannLeon) ([#11877](https://github.com/community-scripts/ProxmoxVE/pull/11877)) - - Pangolin: switch sqlite-specific back to generic [@MickLesk](https://github.com/MickLesk) ([#11868](https://github.com/community-scripts/ProxmoxVE/pull/11868)) - - [Hotfix] Jotty: Copy contents of config backup into /opt/jotty/config [@vhsdream](https://github.com/vhsdream) ([#11864](https://github.com/community-scripts/ProxmoxVE/pull/11864)) - - #### 🔧 Refactor - - Refactor: Radicale [@vhsdream](https://github.com/vhsdream) ([#11850](https://github.com/community-scripts/ProxmoxVE/pull/11850)) - - chore(donetick): add config entry for v0.1.73 [@tomfrenzel](https://github.com/tomfrenzel) ([#11872](https://github.com/community-scripts/ProxmoxVE/pull/11872)) - -### 💾 Core - - - #### 🔧 Refactor - - - core: retry reporting with fallback payloads [@MickLesk](https://github.com/MickLesk) ([#11885](https://github.com/community-scripts/ProxmoxVE/pull/11885)) - -### 📡 API - - - #### ✨ New Features - - - error-handler: Implement json_escape and enhance error handling [@MickLesk](https://github.com/MickLesk) ([#11875](https://github.com/community-scripts/ProxmoxVE/pull/11875)) - -### 🌐 Website - - - #### 📝 Script Information - - - SQLServer-2025: add PVE9/Kernel 6.x incompatibility warning [@MickLesk](https://github.com/MickLesk) ([#11829](https://github.com/community-scripts/ProxmoxVE/pull/11829)) - -## 2026-02-12 - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - EMQX: increase disk to 6GB and add optional MQ disable prompt [@MickLesk](https://github.com/MickLesk) ([#11844](https://github.com/community-scripts/ProxmoxVE/pull/11844)) - - Increased the Grafana container default disk size. [@shtefko](https://github.com/shtefko) ([#11840](https://github.com/community-scripts/ProxmoxVE/pull/11840)) - - Pangolin: Update database generation command in install script [@tremor021](https://github.com/tremor021) ([#11825](https://github.com/community-scripts/ProxmoxVE/pull/11825)) - - Deluge: add python3-setuptools as dep [@MickLesk](https://github.com/MickLesk) ([#11833](https://github.com/community-scripts/ProxmoxVE/pull/11833)) - - Dispatcharr: migrate to uv sync [@MickLesk](https://github.com/MickLesk) ([#11831](https://github.com/community-scripts/ProxmoxVE/pull/11831)) - - - #### ✨ New Features - - - Archlinux-VM: fix LVM/LVM-thin storage and improve error reporting | VM's add correct exit_code for analytics [@MickLesk](https://github.com/MickLesk) ([#11842](https://github.com/community-scripts/ProxmoxVE/pull/11842)) - - Debian13-VM: Optimize First Boot & add noCloud/Cloud Selection [@MickLesk](https://github.com/MickLesk) ([#11810](https://github.com/community-scripts/ProxmoxVE/pull/11810)) - -### 💾 Core - - - #### ✨ New Features - - - tools.func: auto-detect binary vs armored GPG keys in setup_deb822_repo [@MickLesk](https://github.com/MickLesk) ([#11841](https://github.com/community-scripts/ProxmoxVE/pull/11841)) - - core: remove old Go API and extend misc/api.func with new backend [@MickLesk](https://github.com/MickLesk) ([#11822](https://github.com/community-scripts/ProxmoxVE/pull/11822)) - - - #### 🔧 Refactor - - - error_handler: prevent stuck 'installing' status [@MickLesk](https://github.com/MickLesk) ([#11845](https://github.com/community-scripts/ProxmoxVE/pull/11845)) - -### 🧰 Tools - - - #### 🐞 Bug Fixes - - - Tailscale: fix DNS check and keyrings directory issues [@MickLesk](https://github.com/MickLesk) ([#11837](https://github.com/community-scripts/ProxmoxVE/pull/11837)) - -## 2026-02-11 - -### 🆕 New Scripts - - - Draw.io ([#11788](https://github.com/community-scripts/ProxmoxVE/pull/11788)) - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - dispatcharr: include port 9191 in success-message [@MickLesk](https://github.com/MickLesk) ([#11808](https://github.com/community-scripts/ProxmoxVE/pull/11808)) - - fix: make donetick 0.1.71 compatible [@tomfrenzel](https://github.com/tomfrenzel) ([#11804](https://github.com/community-scripts/ProxmoxVE/pull/11804)) - - Kasm: Support new version URL format without hash suffix [@MickLesk](https://github.com/MickLesk) ([#11787](https://github.com/community-scripts/ProxmoxVE/pull/11787)) - - LibreTranslate: Remove Torch [@tremor021](https://github.com/tremor021) ([#11783](https://github.com/community-scripts/ProxmoxVE/pull/11783)) - - Snowshare: fix update script [@TuroYT](https://github.com/TuroYT) ([#11726](https://github.com/community-scripts/ProxmoxVE/pull/11726)) - - - #### ✨ New Features - - - [Feature] OpenCloud: support PosixFS Collaborative Mode [@vhsdream](https://github.com/vhsdream) ([#11806](https://github.com/community-scripts/ProxmoxVE/pull/11806)) - -### 💾 Core - - - #### 🔧 Refactor - - - core: respect EDITOR variable for config editing [@ls-root](https://github.com/ls-root) ([#11693](https://github.com/community-scripts/ProxmoxVE/pull/11693)) - -### 📚 Documentation - - - Fix formatting in kutt.json notes section [@tiagodenoronha](https://github.com/tiagodenoronha) ([#11774](https://github.com/community-scripts/ProxmoxVE/pull/11774)) - -## 2026-02-10 - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - Immich: Pin version to 2.5.6 [@vhsdream](https://github.com/vhsdream) ([#11775](https://github.com/community-scripts/ProxmoxVE/pull/11775)) - - Libretranslate: Fix setuptools [@tremor021](https://github.com/tremor021) ([#11772](https://github.com/community-scripts/ProxmoxVE/pull/11772)) - - Element Synapse: prevent systemd invoke failure during apt install [@MickLesk](https://github.com/MickLesk) ([#11758](https://github.com/community-scripts/ProxmoxVE/pull/11758)) - - - #### ✨ New Features - - - Refactor: Slskd & Soularr [@vhsdream](https://github.com/vhsdream) ([#11674](https://github.com/community-scripts/ProxmoxVE/pull/11674)) - -### 🗑️ Deleted Scripts - - - move paperless-exporter from LXC to addon ([#11737](https://github.com/community-scripts/ProxmoxVE/pull/11737)) - -### 🧰 Tools - - - #### 🐞 Bug Fixes - - - feat: improve storage parsing & add guestname [@carlosmaroot](https://github.com/carlosmaroot) ([#11752](https://github.com/community-scripts/ProxmoxVE/pull/11752)) - -### 📂 Github - - - Github-Version Workflow: include addon scripts in extraction [@MickLesk](https://github.com/MickLesk) ([#11757](https://github.com/community-scripts/ProxmoxVE/pull/11757)) - -### 🌐 Website - - - #### 📝 Script Information - - - Snowshare: fix typo in config file path on website [@BirdMakingStuff](https://github.com/BirdMakingStuff) ([#11754](https://github.com/community-scripts/ProxmoxVE/pull/11754)) - -## 2026-02-09 - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - several scripts: add --clear to uv venv calls for uv 0.10 compatibility [@MickLesk](https://github.com/MickLesk) ([#11723](https://github.com/community-scripts/ProxmoxVE/pull/11723)) - - Koillection: ensure setup_composer is in update script [@MickLesk](https://github.com/MickLesk) ([#11734](https://github.com/community-scripts/ProxmoxVE/pull/11734)) - - PeaNUT: symlink server.js after update [@vhsdream](https://github.com/vhsdream) ([#11696](https://github.com/community-scripts/ProxmoxVE/pull/11696)) - - Umlautadaptarr: use release appsettings.json instead of hardcoded copy [@MickLesk](https://github.com/MickLesk) ([#11725](https://github.com/community-scripts/ProxmoxVE/pull/11725)) - - tracearr: prepare for next stable release [@durzo](https://github.com/durzo) ([#11673](https://github.com/community-scripts/ProxmoxVE/pull/11673)) - - - #### ✨ New Features - - - remove whiptail from update scripts for unattended update support [@MickLesk](https://github.com/MickLesk) ([#11712](https://github.com/community-scripts/ProxmoxVE/pull/11712)) - - - #### 🔧 Refactor - - - Refactor: FileFlows [@tremor021](https://github.com/tremor021) ([#11108](https://github.com/community-scripts/ProxmoxVE/pull/11108)) - - Refactor: wger [@MickLesk](https://github.com/MickLesk) ([#11722](https://github.com/community-scripts/ProxmoxVE/pull/11722)) - - Nginx-UI: better User Handling | ACME [@MickLesk](https://github.com/MickLesk) ([#11715](https://github.com/community-scripts/ProxmoxVE/pull/11715)) - - NginxProxymanager: use better-sqlite3 [@MickLesk](https://github.com/MickLesk) ([#11708](https://github.com/community-scripts/ProxmoxVE/pull/11708)) - -### 💾 Core - - - #### 🔧 Refactor - - - hwaccel: add libmfx-gen1.2 to Intel Arc setup for QSV support [@MickLesk](https://github.com/MickLesk) ([#11707](https://github.com/community-scripts/ProxmoxVE/pull/11707)) - -### 🧰 Tools - - - #### 🐞 Bug Fixes - - - addons: ensure curl is installed before use [@MickLesk](https://github.com/MickLesk) ([#11718](https://github.com/community-scripts/ProxmoxVE/pull/11718)) - - Netbird (addon): add systemd ordering to start after Docker [@MickLesk](https://github.com/MickLesk) ([#11716](https://github.com/community-scripts/ProxmoxVE/pull/11716)) - -### ❔ Uncategorized - - - Bichon: Update website [@tremor021](https://github.com/tremor021) ([#11711](https://github.com/community-scripts/ProxmoxVE/pull/11711)) - -## 2026-02-08 - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - feat(healthchecks): add sendalerts service [@Mika56](https://github.com/Mika56) ([#11694](https://github.com/community-scripts/ProxmoxVE/pull/11694)) - - ComfyUI: Dynamic Fetch PyTorch Versions [@MickLesk](https://github.com/MickLesk) ([#11657](https://github.com/community-scripts/ProxmoxVE/pull/11657)) - - - #### 💥 Breaking Changes - - - Semaphore: switch from Debian to Ubuntu 24.04 [@MickLesk](https://github.com/MickLesk) ([#11670](https://github.com/community-scripts/ProxmoxVE/pull/11670)) - -## 2026-02-07 - -### 🆕 New Scripts - - - Checkmate ([#11672](https://github.com/community-scripts/ProxmoxVE/pull/11672)) -- Bichon ([#11671](https://github.com/community-scripts/ProxmoxVE/pull/11671)) - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - NocoDB: pin to v0.301.1 [@MickLesk](https://github.com/MickLesk) ([#11655](https://github.com/community-scripts/ProxmoxVE/pull/11655)) - - Pin Memos to v0.25.3 - last version with release binaries [@MickLesk](https://github.com/MickLesk) ([#11658](https://github.com/community-scripts/ProxmoxVE/pull/11658)) - - Downgrade: OpenProject | NginxProxyManager | Semaphore to Debian 12 due to persistent SHA1 issues [@MickLesk](https://github.com/MickLesk) ([#11654](https://github.com/community-scripts/ProxmoxVE/pull/11654)) - -### 💾 Core - - - #### ✨ New Features - - - tools: fallback to previous release when asset is missing [@MickLesk](https://github.com/MickLesk) ([#11660](https://github.com/community-scripts/ProxmoxVE/pull/11660)) - -### 📚 Documentation - - - fix(setup): correctly auto-detect username when using --full [@ls-root](https://github.com/ls-root) ([#11650](https://github.com/community-scripts/ProxmoxVE/pull/11650)) - -### 🌐 Website - - - #### ✨ New Features - - - feat(frontend): add JSON script import functionality [@ls-root](https://github.com/ls-root) ([#11563](https://github.com/community-scripts/ProxmoxVE/pull/11563)) - -## 2026-02-06 - -### 🆕 New Scripts - - - Nightscout ([#11621](https://github.com/community-scripts/ProxmoxVE/pull/11621)) -- PVE LXC Apps Updater [@MickLesk](https://github.com/MickLesk) ([#11533](https://github.com/community-scripts/ProxmoxVE/pull/11533)) - -### 🚀 Updated Scripts - - - #### 🐞 Bug Fixes - - - Immich: supress startup messages for immich-admin [@vhsdream](https://github.com/vhsdream) ([#11635](https://github.com/community-scripts/ProxmoxVE/pull/11635)) - - Semaphore: Change Ubuntu release from 'jammy' to 'noble' [@MickLesk](https://github.com/MickLesk) ([#11625](https://github.com/community-scripts/ProxmoxVE/pull/11625)) - - Pangolin: replace build:sqlite with db:generate + build [@MickLesk](https://github.com/MickLesk) ([#11616](https://github.com/community-scripts/ProxmoxVE/pull/11616)) - - [FIX] OpenCloud: path issues [@vhsdream](https://github.com/vhsdream) ([#11593](https://github.com/community-scripts/ProxmoxVE/pull/11593)) - - [FIX] Homepage: preserve public/images & public/icons if they exist [@vhsdream](https://github.com/vhsdream) ([#11594](https://github.com/community-scripts/ProxmoxVE/pull/11594)) - - - #### ✨ New Features - - - Shelfmark: remove Chromedriver dep, add URL_BASE env [@vhsdream](https://github.com/vhsdream) ([#11619](https://github.com/community-scripts/ProxmoxVE/pull/11619)) - - Immich: pin to v2.5.5 [@vhsdream](https://github.com/vhsdream) ([#11598](https://github.com/community-scripts/ProxmoxVE/pull/11598)) - - - #### 🔧 Refactor - - - refactor: homepage [@CrazyWolf13](https://github.com/CrazyWolf13) ([#11605](https://github.com/community-scripts/ProxmoxVE/pull/11605)) - -### 💾 Core - - - #### 🐞 Bug Fixes - - - fix(core): spinner misalignment [@ls-root](https://github.com/ls-root) ([#11627](https://github.com/community-scripts/ProxmoxVE/pull/11627)) - - - #### 🔧 Refactor - - - [Fix] build.func: QOL grammar adjustment for Creating LXC message [@vhsdream](https://github.com/vhsdream) ([#11633](https://github.com/community-scripts/ProxmoxVE/pull/11633)) - -### 📚 Documentation - - - [gh] Update to the New Script request template [@tremor021](https://github.com/tremor021) ([#11612](https://github.com/community-scripts/ProxmoxVE/pull/11612)) - -### 🌐 Website - - - #### 📝 Script Information - - - Update LXC App Updater JSON to reflect tag override option [@vhsdream](https://github.com/vhsdream) ([#11626](https://github.com/community-scripts/ProxmoxVE/pull/11626)) - -### ❔ Uncategorized - - - Opencloud: fix JSON [@vhsdream](https://github.com/vhsdream) ([#11617](https://github.com/community-scripts/ProxmoxVE/pull/11617)) \ No newline at end of file + - addons: Filebrowser & Filebrowser-Quantum get warning if host install [@MickLesk](https://github.com/MickLesk) ([#13639](https://github.com/community-scripts/ProxmoxVE/pull/13639)) \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..6493ff4f6 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,137 @@ +# Contributing to Proxmox VE Helper-Scripts + +Welcome! We're glad you want to contribute. This guide covers everything you need to add new scripts, improve existing ones, or help in other ways. + +For detailed coding standards and full documentation, visit **[community-scripts.org/docs](https://community-scripts.org/docs)**. + +--- + +## How Can I Help? + +> [!IMPORTANT] +> **New scripts** must always be submitted to [ProxmoxVED](https://github.com/community-scripts/ProxmoxVED) first — not to this repository. +> PRs with new scripts opened directly against ProxmoxVE **will be closed without review**. +> **Bug fixes, improvements, and features for existing scripts** go here (ProxmoxVE). + +| I want to… | Where to go | +| :------------------------------------------ | :------------------------------------------------------------------------------------------- | +| **Add a brand-new script** | [ProxmoxVED](https://github.com/community-scripts/ProxmoxVED) — testing repo for new scripts | +| **Fix a bug or improve an existing script** | This repo (ProxmoxVE) — open a PR here | +| **Add a feature to an existing script** | This repo (ProxmoxVE) — open a PR here | +| Report a bug or broken script | [Open an Issue](https://github.com/community-scripts/ProxmoxVE/issues) | +| Request a new script or feature | [Start a Discussion](https://github.com/community-scripts/ProxmoxVE/discussions) | +| Report a security vulnerability | [Security Policy](SECURITY.md) | +| Chat with contributors | [Discord](https://discord.gg/3AnUqsXnmK) | + +--- + +## Prerequisites + +Before writing scripts, we recommend setting up: + +- **Visual Studio Code** with these extensions: + - [Shell Syntax](https://marketplace.visualstudio.com/items?itemName=bmalehorn.shell-syntax) + - [ShellCheck](https://marketplace.visualstudio.com/items?itemName=timonwong.shellcheck) + - [Shell Format](https://marketplace.visualstudio.com/items?itemName=foxundermoon.shell-format) + +--- + +## Script Structure + +Every script consists of two files: + +| File | Purpose | +| :--------------------------- | :------------------------------------------------------ | +| `ct/AppName.sh` | Container creation, variable setup, and update handling | +| `install/AppName-install.sh` | Application installation logic | + +Use existing scripts in [`ct/`](ct/) and [`install/`](install/) as reference. Full coding standards and annotated templates are at **[community-scripts.org/docs/contribution](https://community-scripts.org/docs/contribution)**. + +--- + +## Contribution Process + +### Adding a new script + +New scripts are **not accepted directly in this repository**. The workflow is: + +1. Fork [ProxmoxVED](https://github.com/community-scripts/ProxmoxVED) and clone it +2. Create a branch: `git switch -c feat/myapp` +3. Write your two script files: + - `ct/myapp.sh` + - `install/myapp-install.sh` +4. Test thoroughly in ProxmoxVED — run the script against a real Proxmox instance +5. Open a PR in **ProxmoxVED** for review and testing +6. Once accepted and verified there, the script will be promoted to ProxmoxVE by maintainers + +Follow the coding standards at [community-scripts.org/docs/contribution](https://community-scripts.org/docs/contribution). + +--- + +### Fixing a bug or improving an existing script + +Changes to scripts that already exist in ProxmoxVE go directly here: + +1. Fork **this repository** (ProxmoxVE) and clone it: + + ```bash + git clone https://github.com/YOUR_USERNAME/ProxmoxVE + cd ProxmoxVE + ``` + +2. Create a branch: + + ```bash + git switch -c fix/myapp-description + ``` + +3. Make your changes to the relevant files in `ct/` and/or `install/` + +4. Open a PR from your fork to `community-scripts/ProxmoxVE/main` + +Your PR should only contain the files you changed. Do not include unrelated modifications. + +--- + +## Code Standards + +Key rules at a glance: + +- One script per service — keep them focused +- Naming convention: lowercase, hyphen-separated (`my-app.sh`) +- Shebang: `#!/usr/bin/env bash` +- Quote all variables: `"$VAR"` not `$VAR` +- Use lowercase variable names +- Do not hardcode credentials or sensitive values + +Full standards and examples: **[community-scripts.org/docs/contribution](https://community-scripts.org/docs/contribution)** + +--- + +## Developer Mode & Debugging + +Set the `dev_mode` variable to enable debugging features when testing. Flags can be combined (comma-separated): + +```bash +dev_mode="trace,keep" bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/myapp.sh)" +``` + +| Flag | Description | +| :----------- | :----------------------------------------------------------- | +| `trace` | Enables `set -x` for maximum verbosity during execution | +| `keep` | Prevents the container from being deleted if the build fails | +| `pause` | Pauses execution at key points before customization | +| `breakpoint` | Drops to a shell at hardcoded `breakpoint` calls in scripts | +| `logs` | Saves detailed build logs to `/var/log/community-scripts/` | +| `dryrun` | Bypasses actual container creation (limited support) | +| `motd` | Forces an update of the Message of the Day | + +--- + +## Notes + +- **Website metadata** (name, description, logo, tags) is managed via the website — use the "Report Issue" link on any script page to request changes. Do not submit metadata changes via repo files. +- **JSON files** in `json/` define script properties used by the website. See existing files for structure reference. +- Keep PRs small and focused. One fix or feature per PR is ideal. +- PRs with **new scripts** opened against ProxmoxVE will be closed — submit them to [ProxmoxVED](https://github.com/community-scripts/ProxmoxVED) instead. +- PRs that fail CI checks will not be merged. diff --git a/README.md b/README.md index 6f5f7dcb1..de874750e 100644 --- a/README.md +++ b/README.md @@ -1,283 +1,209 @@
- Proxmox VE Helper-Scripts Logo - + Proxmox VE Helper-Scripts Logo +

Proxmox VE Helper-Scripts

-

A Community Legacy in Memory of @tteck

+

One-command installations for services, containers, and VMs on Proxmox VE
+ A community project — built on the foundation of @tteck's original work

- - Website - - - Discord - - - Donate - + + + + +

- -

- - Contribute - - - Guides - - - Changelog - -

- -
- - **Simplify your Proxmox VE setup with community-driven automation scripts** - Originally created by tteck, now maintained and expanded by the community - -
- -
- -
- 🙌 Shoutout to -
-
- - selfh.st Icons - -
- View on GitHub • Consistent, beautiful icons for 5000+ self-hosted apps
--- -## 🎯 Key Features +## What is this? -
+**Simplify your Proxmox VE setup with community-driven automation scripts.** - - - - - - - - - - - - - -
-

⚡ Quick Setup

-

One-command installations for popular services and containers

-
-

⚙️ Flexible Config

-

Simple mode for beginners, advanced options for power users

-
-

🔄 Auto Updates

-

Keep your installations current with built-in update mechanisms

-
-

🛠️ Easy Management

-

Post-install scripts for configuration and troubleshooting

-
-

👥 Community Driven

-

Actively maintained with contributions from users worldwide

-
-

📖 Well Documented

-

Comprehensive guides and community support

-
-

🔒 Secure

-

Regular security updates and best practices

-
-

⚡ Performance

-

Optimized configurations for best performance

-
+Install and configure popular self-hosted services with a single command — no manual package hunting, no config file archaeology. Paste a command into your Proxmox shell, answer a few prompts, and your container or VM is up and running. -
+The collection covers hundreds of services across categories like home automation, media servers, networking tools, databases, monitoring stacks, and more. --- -## 📋 Requirements +## Requirements -
- - - - - - - -
-

🖥️ Proxmox VE

-

Version: 8.4.x | 9.0.x | 9.1.x

-
-

🐧 Operating System

-

Debian-based with Proxmox Tools

-
-

🌐 Network

-

Internet connection required

-
- -
+| Component | Details | +| -------------- | ------------------------------------------------ | +| **Proxmox VE** | Version 8.4, 9.0, or 9.1 | +| **Host OS** | Proxmox VE (Debian-based) | +| **Access** | Root shell access on the Proxmox host | +| **Network** | Internet connection required during installation | --- -## 📥 Getting Started +## Getting Started -Choose your preferred installation method: +The fastest way to find and run scripts: -### Method 1: One-Click Web Installer +1. Go to **[community-scripts.org](https://community-scripts.org)** +2. Search for the service you want (e.g. "Home Assistant", "Nginx Proxy Manager", "Jellyfin") +3. Copy the one-line install command from the script page +4. Open your **Proxmox Shell** and paste it +5. Choose between **Default** or **Advanced** setup and follow the prompts -The fastest way to get started: - -1. Visit **[helper-scripts.com](https://helper-scripts.com/)** 🌐 -2. Search for your desired script (e.g., "Home Assistant", "Docker") -3. Copy the bash command displayed on the script page -4. Open your **Proxmox Shell** and paste the command -5. Press Enter and follow the interactive prompts - -### Method 2: PVEScripts-Local - -Install a convenient script manager directly in your Proxmox UI: - -```bash -bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/pve-scripts-local.sh)" -``` - -This adds a menu to your Proxmox interface for easy script access without visiting the website. - -📖 **Learn more:** [ProxmoxVE-Local Repository](https://github.com/community-scripts/ProxmoxVE-Local) +Each script page documents what the container includes, default resource allocation, and post-install notes. --- -## 💬 Join the Community +## How Scripts Work -
+Every script follows the same pattern: - +**Default mode** — Picks sensible resource defaults (CPU, RAM, storage) and asks only the minimum required questions. Most installs finish in under five minutes. + +**Advanced mode** — Gives you full control over container settings, networking, storage backends, and application-level configuration before anything is installed. + +After installation, each container ships with a **post-install helper** accessible from the Proxmox shell. It handles common tasks like: + +- Applying updates to the installed service +- Changing application settings without manually editing config files +- Basic troubleshooting and log access + +--- + +## What's Included + +The repository covers a wide range of categories. A few examples: + +| Category | Examples | +| --------------- | --------------------------------------------------- | +| Home Automation | Home Assistant, Zigbee2MQTT, ESPHome, Node-RED | +| Media | Jellyfin, Plex, Radarr, Sonarr, Immich | +| Networking | AdGuard Home, Nginx Proxy Manager, Pi-hole, Traefik | +| Monitoring | Grafana, Prometheus, Uptime Kuma, Netdata | +| Databases | PostgreSQL, MariaDB, Redis, InfluxDB | +| Security | Vaultwarden, CrowdSec, Authentik | +| Dev & Tools | Gitea, Portainer, VS Code Server, n8n | + +> Browse the full list at **[community-scripts.org/categories](https://community-scripts.org/categories)** — new scripts are added regularly. + +--- + +## Contributing + +This project runs on community contributions. Whether you want to write new scripts, improve existing ones, or just report a bug — every bit helps. + +### Where to start + +| I want to… | Go here | +| ------------------------------------- | ------------------------------------------------------------------------------------------------- | +| Add a **new** script | [ProxmoxVED](https://github.com/community-scripts/ProxmoxVED) — new scripts are tested here first | +| Fix or improve an **existing** script | [Contributing Guidelines](CONTRIBUTING.md) — open a PR in this repo | +| Report a bug or broken script | [Issues](https://github.com/community-scripts/ProxmoxVE/issues) | +| Request a new script or feature | [Discussions](https://github.com/community-scripts/ProxmoxVE/discussions) | +| Report a security vulnerability | [Security Policy](SECURITY.md) | +| Get help or chat with other users | [Discord](https://discord.gg/3AnUqsXnmK) | + +### Before you open a PR + +- **New scripts go to [ProxmoxVED](https://github.com/community-scripts/ProxmoxVED), not here.** PRs with new scripts opened directly against this repo will be closed. +- Bug fixes and improvements to existing scripts belong in this repo — read the [Contributing Guidelines](CONTRIBUTING.md) first. +- Keep PRs focused. One fix or feature per PR. +- Document what your script installs and any non-obvious decisions in the corresponding JSON metadata file. + +--- + +## Core Team + +
- - - + + +
-

💬 Discord

-

Real-time chat, support, and discussions

- - Discord +
+ + MickLesk
+ MickLesk
-

💭 Discussions

-

Feature requests, Q&A, and ideas

- - Discussions +
+ + michelroegl-brunner
+ michelroegl-brunner
-

🐛 Issues

-

Bug reports and issue tracking

- - Issues +
+ + BramSuurdje
+ BramSuurdje +
+
+ + CrazyWolf13
+ CrazyWolf13 +
+
+ + tremor021
+ tremor021 +
+
+ + vhsdream
+ vhsdream
-
- --- -## 🛠️ Contribute +## Project Activity -
- - - - - - - - -
-

💻 Code

-

Add new scripts or improve existing ones

-
-

📝 Documentation

-

Write guides, improve READMEs, translate content

-
-

🧪 Testing

-

Test scripts and report compatibility issues

-
-

💡 Ideas

-

Suggest features or workflow improvements

-
- -
- -
-
- - 👉 Check our **[Contributing Guidelines](https://github.com/community-scripts/ProxmoxVE/blob/main/docs/contribution/README.md)** to get started - -
- ---- - -## ❤️ Support the Project - -This project is maintained by volunteers in memory of tteck. Your support helps us maintain infrastructure, improve documentation, and give back to important causes. - -**🎗️ 30% of all donations go directly to cancer research and hospice care** - -
- - - Support on Ko-fi - - -
-Every contribution helps keep this project alive and supports meaningful causes - -
- ---- - -## 📈 Project Statistics

Repobeats analytics

- - - Star History Chart + + + Star History Chart

--- -## 📜 License +## Support the Project -This project is licensed under the **[MIT License](LICENSE)** - feel free to use, modify, and distribute. +This project is maintained by volunteers. All infrastructure costs come out of pocket, and the work is done in people's spare time. + +**30% of all donations are forwarded directly to cancer research and hospice care** — a cause that was important to tteck. + +
+ + Support on Ko-fi + +   + + Donate via community-scripts.org + +
+ +--- + +## License + +This project is licensed under the [MIT License](LICENSE) — free to use, modify, and redistribute for personal and commercial purposes. + +See the full license text in [LICENSE](LICENSE). ---
- Made with ❤️ by the Proxmox community in memory of tteck -
+ Built on the foundation of tteck's original work · Original Repository
+ Maintained and expanded by the community · In memory of tteck
Proxmox® is a registered trademark of Proxmox Server Solutions GmbH
diff --git a/ct/2fauth.sh b/ct/2fauth.sh index deee15371..2d984da37 100644 --- a/ct/2fauth.sh +++ b/ct/2fauth.sh @@ -24,7 +24,7 @@ function update_script() { check_container_storage check_container_resources - if [[ ! -d "/opt/2fauth" ]]; then + if [[ ! -d /opt/2fauth ]]; then msg_error "No ${APP} Installation Found!" exit fi @@ -34,7 +34,8 @@ function update_script() { $STD apt -y upgrade msg_info "Creating Backup" - mv "/opt/2fauth" "/opt/2fauth-backup" + rm -rf /opt/2fauth-backup + mv /opt/2fauth /opt/2fauth-backup if ! dpkg -l | grep -q 'php8.4'; then cp /etc/nginx/conf.d/2fauth.conf /etc/nginx/conf.d/2fauth.conf.bak fi @@ -46,15 +47,17 @@ function update_script() { fi fetch_and_deploy_gh_release "2fauth" "Bubka/2FAuth" "tarball" setup_composer - mv "/opt/2fauth-backup/.env" "/opt/2fauth/.env" - mv "/opt/2fauth-backup/storage" "/opt/2fauth/storage" - cd "/opt/2fauth" || return - chown -R www-data: "/opt/2fauth" - chmod -R 755 "/opt/2fauth" + cp /opt/2fauth-backup/.env /opt/2fauth/.env + cp -r /opt/2fauth-backup/storage /opt/2fauth/storage + cd /opt/2fauth || return export COMPOSER_ALLOW_SUPERUSER=1 $STD composer install --no-dev --prefer-dist php artisan 2fauth:install + chown -R www-data: /opt/2fauth + chmod -R 755 /opt/2fauth + $STD systemctl restart php8.4-fpm $STD systemctl restart nginx + rm -rf /opt/2fauth-backup msg_ok "Updated successfully!" fi exit diff --git a/ct/actualbudget.sh b/ct/actualbudget.sh index f9003cbd6..7a75e4d7e 100644 --- a/ct/actualbudget.sh +++ b/ct/actualbudget.sh @@ -48,9 +48,9 @@ function update_script() { msg_ok "Updated successfully!" fi else - msg_info "Old Installation Found, you need to migrate your data and recreate to a new container" - msg_info "Please follow the instructions on the Actual Budget website to migrate your data" - msg_info "https://actualbudget.org/docs/backup-restore/backup" + msg_warn "Old Installation Found, you need to migrate your data and recreate to a new container" + msg_warn "Please follow the instructions on the Actual Budget website to migrate your data" + msg_warn "https://actualbudget.org/docs/backup-restore/backup" exit fi exit diff --git a/ct/adventurelog.sh b/ct/adventurelog.sh index 1859bdac2..a2997a471 100644 --- a/ct/adventurelog.sh +++ b/ct/adventurelog.sh @@ -56,6 +56,7 @@ function update_script() { fi $STD .venv/bin/python -m pip install --upgrade pip $STD .venv/bin/python -m pip install -r requirements.txt + $STD .venv/bin/python -m pip install 'djangorestframework<3.15' $STD .venv/bin/python -m manage collectstatic --noinput $STD .venv/bin/python -m manage migrate diff --git a/ct/alpine-borgbackup-server.sh b/ct/alpine-borgbackup-server.sh new file mode 100644 index 000000000..2c2c0f51f --- /dev/null +++ b/ct/alpine-borgbackup-server.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Sander Koenders (sanderkoenders) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://www.borgbackup.org/ + +APP="Alpine-BorgBackup-Server" +var_tags="${var_tags:-alpine;backup}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-20}" +var_os="${var_os:-alpine}" +var_version="${var_version:-3.23}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + + if [[ ! -f /usr/bin/borg ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + CHOICE=$(msg_menu "BorgBackup Server Update Options" \ + "1" "Update BorgBackup Server" \ + "2" "Reset SSH Access" \ + "3" "Enable password authentication for backup user (not recommended, use SSH key instead)" \ + "4" "Disable password authentication for backup user (recommended for security, use SSH key)") + + case $CHOICE in + 1) + msg_info "Updating $APP LXC" + $STD apk -U upgrade + msg_ok "Updated $APP LXC successfully!" + ;; + 2) + if [[ "${PHS_SILENT:-0}" == "1" ]]; then + msg_warn "Reset SSH Public key requires interactive mode, skipping." + exit + fi + + msg_info "Setting up SSH Public Key for backup user" + + msg_info "Please paste your SSH public key (e.g., ssh-rsa AAAAB3... user@host): \n" + read -p "Key: " SSH_PUBLIC_KEY + echo + + if [[ -z "$SSH_PUBLIC_KEY" ]]; then + msg_error "No SSH public key provided!" + exit 1 + fi + + if [[ ! "$SSH_PUBLIC_KEY" =~ ^(ssh-rsa|ssh-dss|ssh-ed25519|ecdsa-sha2-) ]]; then + msg_error "Invalid SSH public key format!" + exit 1 + fi + + msg_info "Setting up SSH access" + mkdir -p /home/backup/.ssh + echo "$SSH_PUBLIC_KEY" >/home/backup/.ssh/authorized_keys + + chown -R backup:backup /home/backup/.ssh + chmod 700 /home/backup/.ssh + chmod 600 /home/backup/.ssh/authorized_keys + + msg_ok "SSH access configured for backup user" + ;; + 3) + if [[ "${PHS_SILENT:-0}" == "1" ]]; then + msg_warn "Enabling password authentication requires interactive mode, skipping." + exit + fi + + msg_info "Enabling password authentication for backup user" + msg_warn "Password authentication is less secure than using SSH keys. Consider using SSH keys instead." + passwd backup + sed -i 's/^#*\s*PasswordAuthentication\s\+\(yes\|no\)/PasswordAuthentication yes/' /etc/ssh/sshd_config + rc-service sshd restart + msg_ok "Password authentication enabled for backup user" + ;; + 4) + msg_info "Disabling password authentication for backup user" + sed -i 's/^#*\s*PasswordAuthentication\s\+\(yes\|no\)/PasswordAuthentication no/' /etc/ssh/sshd_config + rc-service sshd restart + msg_ok "Password authentication disabled for backup user" + ;; + esac + + exit 0 +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW}Connection information:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}ssh backup@${IP}${CL}" +echo -e "${TAB}${VERIFYPW}${YW}To set SSH key, run this script with the 'update' option and select option 2${CL}" diff --git a/ct/alpine-ironclaw.sh b/ct/alpine-ironclaw.sh new file mode 100644 index 000000000..de8a8558a --- /dev/null +++ b/ct/alpine-ironclaw.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/nearai/ironclaw + +APP="Alpine-IronClaw" +var_tags="${var_tags:-ai;agent;alpine}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-8}" +var_os="${var_os:-alpine}" +var_version="${var_version:-3.23}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + + if [[ ! -f /usr/local/bin/ironclaw ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "ironclaw-bin" "nearai/ironclaw"; then + msg_info "Stopping Service" + rc-service ironclaw stop 2>/dev/null || true + msg_ok "Stopped Service" + + msg_info "Backing up Configuration" + cp /root/.ironclaw/.env /root/ironclaw.env.bak + msg_ok "Backed up Configuration" + + fetch_and_deploy_gh_release "ironclaw-bin" "nearai/ironclaw" "prebuild" "latest" "/usr/local/bin" \ + "ironclaw-$(uname -m)-unknown-linux-musl.tar.gz" + chmod +x /usr/local/bin/ironclaw + + msg_info "Restoring Configuration" + cp /root/ironclaw.env.bak /root/.ironclaw/.env + rm -f /root/ironclaw.env.bak + msg_ok "Restored Configuration" + + msg_info "Starting Service" + rc-service ironclaw start + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Complete setup by running:${CL}" +echo -e "${TAB}${BGN}ironclaw onboard${CL}" +echo -e "${INFO}${YW} Then start the service:${CL}" +echo -e "${TAB}${BGN}rc-service ironclaw start${CL}" +echo -e "${INFO}${YW} Access the Web UI at:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" +echo -e "${INFO}${YW} Auth token and database credentials:${CL}" +echo -e "${TAB}${BGN}cat /root/.ironclaw/.env${CL}" diff --git a/ct/alpine-komodo.sh b/ct/alpine-komodo.sh index e18de110e..e856e7b1d 100644 --- a/ct/alpine-komodo.sh +++ b/ct/alpine-komodo.sh @@ -35,6 +35,8 @@ function update_script() { read -r -p "${TAB}Migrate update function now? [y/N]: " CONFIRM if [[ ! "${CONFIRM,,}" =~ ^(y|yes)$ ]]; then msg_warn "Migration skipped. The old update will continue to work for now." + msg_warn "⚠️ Komodo v2 uses :2 image tags. The :latest tag is deprecated and will not receive v2 updates." + msg_warn "Please migrate to the addon script to receive Komodo v2." msg_info "Updating ${APP} (legacy)" COMPOSE_FILE=$(find /opt/komodo -maxdepth 1 -type f -name '*.compose.yaml' ! -name 'compose.env' | head -n1) if [[ -z "$COMPOSE_FILE" ]]; then diff --git a/ct/alpine-ntfy.sh b/ct/alpine-ntfy.sh new file mode 100644 index 000000000..a3bc21f31 --- /dev/null +++ b/ct/alpine-ntfy.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: cobalt (cobaltgit) +# License: MIT | https://github.com/community-scripts/ProxmoxVED/raw/main/LICENSE +# Source: https://ntfy.sh/ + +APP="Alpine-ntfy" +var_tags="${var_tags:-notification}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-256}" +var_disk="${var_disk:-2}" +var_os="${var_os:-alpine}" +var_version="${var_version:-3.23}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + if [[ ! -d /etc/ntfy ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + msg_info "Updating ntfy LXC" + $STD apk -U upgrade + setcap 'cap_net_bind_service=+ep' /usr/bin/ntfy + msg_ok "Updated ntfy LXC" + + msg_info "Restarting ntfy" + rc-service ntfy restart + msg_ok "Restarted ntfy" + msg_ok "Updated successfully!" + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" diff --git a/ct/alpine-redlib.sh b/ct/alpine-redlib.sh index b9b4d49ff..71d470a1f 100644 --- a/ct/alpine-redlib.sh +++ b/ct/alpine-redlib.sh @@ -21,7 +21,6 @@ catch_errors function update_script() { header_info - check_container_resources if [[ ! -d /opt/redlib ]]; then msg_error "No ${APP} Installation Found!" diff --git a/ct/alpine-rustypaste.sh b/ct/alpine-rustypaste.sh index 4bf9c633c..bdb4cb9df 100644 --- a/ct/alpine-rustypaste.sh +++ b/ct/alpine-rustypaste.sh @@ -21,8 +21,6 @@ catch_errors function update_script() { header_info - check_container_storage - check_container_resources if ! apk info -e rustypaste >/dev/null 2>&1; then msg_error "No ${APP} Installation Found!" diff --git a/ct/alpine-wakapi.sh b/ct/alpine-wakapi.sh new file mode 100644 index 000000000..042ffa522 --- /dev/null +++ b/ct/alpine-wakapi.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Slaviša Arežina (tremor021) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://wakapi.dev/ | https://github.com/muety/wakapi + +APP="Alpine-Wakapi" +var_tags="${var_tags:-code;time-tracking}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-4}" +var_os="${var_os:-alpine}" +var_version="${var_version:-3.23}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + if [[ ! -d /opt/wakapi ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + RELEASE=$(curl -s https://api.github.com/repos/muety/wakapi/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }') + if [ "${RELEASE}" != "$(cat ~/.wakapi 2>/dev/null)" ] || [ ! -f ~/.wakapi ]; then + msg_info "Stopping Wakapi Service" + $STD rc-service wakapi stop + msg_ok "Stopped Wakapi Service" + + msg_info "Updating Wakapi LXC" + $STD apk -U upgrade + msg_ok "Updated Wakapi LXC" + + msg_info "Creating backup" + mkdir -p /opt/wakapi-backup + cp /opt/wakapi/config.yml /opt/wakapi/wakapi_db.db /opt/wakapi-backup/ + msg_ok "Created backup" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "wakapi" "muety/wakapi" "prebuild" "latest" "/opt/wakapi" "wakapi_linux_amd64.zip" + + msg_info "Configuring Wakapi" + cd /opt/wakapi + cp /opt/wakapi-backup/config.yml /opt/wakapi/ + cp /opt/wakapi-backup/wakapi_db.db /opt/wakapi/ + rm -rf /opt/wakapi-backup + msg_ok "Configured Wakapi" + + msg_info "Starting Service" + $STD rc-service wakapi start + msg_ok "Started Service" + msg_ok "Updated successfully" + else + msg_ok "No update required. ${APP} is already at ${RELEASE}" + fi + exit 0 +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" diff --git a/ct/anchor.sh b/ct/anchor.sh new file mode 100644 index 000000000..6d8ea36cb --- /dev/null +++ b/ct/anchor.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/ZhFahim/anchor + +APP="Anchor" +var_tags="${var_tags:-notes;productivity;sync}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-10}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f ~/.anchor ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "anchor" "ZhFahim/anchor"; then + msg_info "Stopping Services" + systemctl stop anchor-web anchor-server + msg_ok "Stopped Services" + + msg_info "Backing up Configuration" + cp /opt/anchor/.env /opt/anchor.env.bak + msg_ok "Backed up Configuration" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "anchor" "ZhFahim/anchor" "tarball" + + msg_info "Building Server" + cd /opt/anchor/server + $STD pnpm install --frozen-lockfile + $STD pnpm prisma generate + $STD pnpm build + [[ -d src/generated ]] && mkdir -p dist/src && cp -R src/generated dist/src/ + msg_ok "Built Server" + + msg_info "Building Web Interface" + cd /opt/anchor/web + $STD pnpm install --frozen-lockfile + SERVER_URL=http://127.0.0.1:3001 $STD pnpm build + cp -r .next/static .next/standalone/.next/static + cp -r public .next/standalone/public + msg_ok "Built Web Interface" + + cp /opt/anchor.env.bak /opt/anchor/.env + rm -f /opt/anchor.env.bak + + msg_info "Running Database Migrations" + cd /opt/anchor/server + set -a && source /opt/anchor/.env && set +a + $STD pnpm prisma migrate deploy + msg_ok "Ran Database Migrations" + + msg_info "Starting Services" + systemctl start anchor-server anchor-web + msg_ok "Started Services" + msg_ok "Updated ${APP}" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" diff --git a/ct/anytype-server.sh b/ct/anytype-server.sh new file mode 100644 index 000000000..a390a3c95 --- /dev/null +++ b/ct/anytype-server.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://anytype.io + +APP="Anytype-Server" +var_tags="${var_tags:-notes;productivity;sync}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-4096}" +var_disk="${var_disk:-16}" +var_os="${var_os:-ubuntu}" +var_version="${var_version:-24.04}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /opt/anytype/any-sync-bundle ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "anytype" "grishy/any-sync-bundle"; then + msg_info "Stopping Service" + systemctl stop anytype + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp -r /opt/anytype/data /opt/anytype_data_backup + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "anytype" "grishy/any-sync-bundle" "prebuild" "latest" "/opt/anytype" "any-sync-bundle_*_linux_amd64.tar.gz" + chmod +x /opt/anytype/any-sync-bundle + + msg_info "Restoring Data" + cp -r /opt/anytype_data_backup/. /opt/anytype/data + rm -rf /opt/anytype_data_backup + msg_ok "Restored Data" + + msg_info "Starting Service" + systemctl start anytype + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:33010${CL}" +echo -e "${INFO}${YW} Client config file:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}/opt/anytype/data/client-config.yml${CL}" diff --git a/ct/apprise-api.sh b/ct/apprise-api.sh new file mode 100644 index 000000000..f3e6f7f0b --- /dev/null +++ b/ct/apprise-api.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: SystemIdleProcess +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/caronc/apprise-api + +APP="Apprise-API" +var_tags="${var_tags:-notification}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-2}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d "/opt/apprise" ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + if check_for_gh_release "apprise" "caronc/apprise-api"; then + msg_info "Stopping Service" + systemctl stop apprise-api + msg_ok "Stopped Service" + + PYTHON_VERSION="3.12" setup_uv + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "apprise" "caronc/apprise-api" "tarball" + + msg_info "Updating Apprise-API" + cd /opt/apprise + cp ./requirements.txt /etc/requirements.txt + $STD apt install -y nginx git + $STD uv pip install -r requirements.txt gunicorn supervisor --system + cp -fr apprise_api/static /usr/share/nginx/html/s/ + mv apprise_api/ webapp + touch /etc/nginx/server-override.conf + touch /etc/nginx/location-override.conf + mkdir -p /config/store /attach /plugin /tmp/apprise /opt/apprise/logs + chmod 1777 /tmp/apprise && chmod 777 /config /config/store /attach /plugin /opt/apprise/logs + sed -i \ + -e '/[[]program:nginx]/,/^[[]/ s|stdout_logfile=/dev/stdout|stdout_logfile=/opt/apprise/logs/nginx.log|' \ + -e '/[[]program:nginx]/,/^[[]/ s|stderr_logfile=/dev/stderr|stderr_logfile=/opt/apprise/logs/nginx_error.log|' \ + -e '/[[]program:gunicorn]/,/^[[]/ s|stdout_logfile=/dev/stdout|stdout_logfile=/opt/apprise/logs/gunicorn.log|' \ + -e '/[[]program:gunicorn]/,/^[[]/ s|stderr_logfile=/dev/stderr|stderr_logfile=/opt/apprise/logs/gunicorn_error.log|' \ + -e '/[[]supervisord]/,/^[[]/ s|logfile=/dev/null|logfile=/opt/apprise/logs/supervisor.log|' \ + -e 's|_maxbytes=0|_maxbytes=10485760|g' \ + /opt/apprise/webapp/etc/supervisord.conf + msg_ok "Updated Apprise-API" + + msg_info "Starting Service" + systemctl start apprise-api + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}" diff --git a/ct/babybuddy.sh b/ct/babybuddy.sh index e920cc4fb..cf7a20e14 100644 --- a/ct/babybuddy.sh +++ b/ct/babybuddy.sh @@ -48,6 +48,7 @@ function update_script() { mv /tmp/production.py.bak /opt/babybuddy/babybuddy/settings/production.py source .venv/bin/activate $STD uv pip install -r requirements.txt + export DJANGO_SETTINGS_MODULE=babybuddy.settings.production $STD python manage.py migrate msg_ok "Updated ${APP}" diff --git a/ct/bambuddy.sh b/ct/bambuddy.sh new file mode 100644 index 000000000..5b115b11d --- /dev/null +++ b/ct/bambuddy.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Adrian-RDA +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/maziggy/bambuddy + +APP="Bambuddy" +var_tags="${var_tags:-media;3d-printing}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-10}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/bambuddy ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + ensure_dependencies ffmpeg + + if check_for_gh_release "bambuddy" "maziggy/bambuddy"; then + msg_info "Stopping Service" + systemctl stop bambuddy + msg_ok "Stopped Service" + + msg_info "Backing up Configuration and Data" + cp /opt/bambuddy/.env /opt/bambuddy.env.bak + cp -r /opt/bambuddy/data /opt/bambuddy_data_bak + [[ -f /opt/bambuddy/bambuddy.db ]] && cp /opt/bambuddy/bambuddy.db /opt/bambuddy.db.bak + [[ -f /opt/bambuddy/bambutrack.db ]] && cp /opt/bambuddy/bambutrack.db /opt/bambutrack.db.bak + [[ -d /opt/bambuddy/archive ]] && cp -r /opt/bambuddy/archive /opt/bambuddy_archive_bak + msg_ok "Backed up Configuration and Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "bambuddy" "maziggy/bambuddy" "tarball" "latest" "/opt/bambuddy" + + msg_info "Updating Python Dependencies" + cd /opt/bambuddy + $STD uv venv --clear + $STD uv pip install -r requirements.txt + msg_ok "Updated Python Dependencies" + + msg_info "Rebuilding Frontend" + cd /opt/bambuddy/frontend + $STD npm install + $STD npm run build + msg_ok "Rebuilt Frontend" + + msg_info "Restoring Configuration and Data" + mkdir -p /opt/bambuddy/data + cp /opt/bambuddy.env.bak /opt/bambuddy/.env + cp -r /opt/bambuddy_data_bak/. /opt/bambuddy/data/ + [[ -f /opt/bambuddy.db.bak ]] && cp /opt/bambuddy.db.bak /opt/bambuddy/bambuddy.db + [[ -f /opt/bambutrack.db.bak ]] && cp /opt/bambutrack.db.bak /opt/bambuddy/bambutrack.db + if [[ -d /opt/bambuddy_archive_bak ]]; then + mkdir -p /opt/bambuddy/archive + cp -r /opt/bambuddy_archive_bak/. /opt/bambuddy/archive/ + fi + rm -f /opt/bambuddy.env.bak /opt/bambuddy.db.bak /opt/bambutrack.db.bak + rm -rf /opt/bambuddy_data_bak /opt/bambuddy_archive_bak + msg_ok "Restored Configuration and Data" + + msg_info "Starting Service" + systemctl start bambuddy + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}" diff --git a/ct/bentopdf.sh b/ct/bentopdf.sh index c07f95bb6..f4662bee9 100644 --- a/ct/bentopdf.sh +++ b/ct/bentopdf.sh @@ -42,7 +42,6 @@ function update_script() { msg_info "Updating BentoPDF" cd /opt/bentopdf $STD npm ci --no-audit --no-fund - $STD npm install http-server -g if [[ -f /opt/production.env ]]; then mv /opt/production.env ./.env.production else @@ -52,15 +51,97 @@ function update_script() { export SIMPLE_MODE=true export VITE_USE_CDN=true $STD npm run build:all + if [[ ! -f /opt/bentopdf/dist/config.json ]]; then + cat <<'EOF' >/opt/bentopdf/dist/config.json +{} +EOF + fi msg_ok "Updated BentoPDF" msg_info "Starting Service" - if grep -q '8080' /etc/systemd/system/bentopdf.service; then - sed -i -e 's|/bentopdf|/bentopdf/dist|' \ - -e 's|npx.*|npx http-server -g -b -d false -r --no-dotfiles|' \ - /etc/systemd/system/bentopdf.service - systemctl daemon-reload + ensure_dependencies nginx openssl + if [[ ! -f /etc/ssl/private/bentopdf-selfsigned.key || ! -f /etc/ssl/certs/bentopdf-selfsigned.crt ]]; then + CERT_CN="$(hostname -I | awk '{print $1}')" + $STD openssl req -x509 -nodes -newkey rsa:2048 -days 3650 \ + -keyout /etc/ssl/private/bentopdf-selfsigned.key \ + -out /etc/ssl/certs/bentopdf-selfsigned.crt \ + -subj "/CN=${CERT_CN}" fi + cat <<'EOF' >/etc/nginx/sites-available/bentopdf +server { + listen 8080; + server_name _; + return 301 https://$host:8443$request_uri; + } + + server { + listen 8443 ssl; + server_name _; + ssl_certificate /etc/ssl/certs/bentopdf-selfsigned.crt; + ssl_certificate_key /etc/ssl/private/bentopdf-selfsigned.key; + root /opt/bentopdf/dist; + index index.html; + + # Required for LibreOffice WASM (Word/Excel/PowerPoint to PDF via SharedArrayBuffer) + add_header Cross-Origin-Opener-Policy "same-origin" always; + add_header Cross-Origin-Embedder-Policy "require-corp" always; + add_header Cross-Origin-Resource-Policy "cross-origin" always; + add_header X-Content-Type-Options "nosniff" always; + add_header X-Frame-Options "SAMEORIGIN" always; + + gzip_static on; + + location ~* /libreoffice-wasm/soffice\.wasm\.gz$ { + gzip off; + types {} default_type application/wasm; + add_header Content-Encoding gzip; + add_header Vary "Accept-Encoding"; + add_header Cache-Control "public, immutable"; + } + + location ~* /libreoffice-wasm/soffice\.data\.gz$ { + gzip off; + types {} default_type application/octet-stream; + add_header Content-Encoding gzip; + add_header Vary "Accept-Encoding"; + add_header Cache-Control "public, immutable"; + } + + location ~* \.wasm$ { + types {} default_type application/wasm; + expires 1y; + add_header Cache-Control "public, immutable"; + } + + location ~* \.(wasm\.gz|data\.gz|data)$ { + expires 1y; + add_header Cache-Control "public, immutable"; + } + + location / { + try_files $uri $uri/ $uri.html =404; + } + + error_page 404 /404.html; +} +EOF + rm -f /etc/nginx/sites-enabled/default + ln -sf /etc/nginx/sites-available/bentopdf /etc/nginx/sites-enabled/bentopdf + cat <<'EOF' >/etc/systemd/system/bentopdf.service +[Unit] +Description=BentoPDF Service +After=network.target + +[Service] +Type=simple +ExecStart=/usr/sbin/nginx -g "daemon off;" +ExecReload=/bin/kill -HUP $MAINPID +Restart=always + +[Install] +WantedBy=multi-user.target +EOF + systemctl daemon-reload systemctl start bentopdf msg_ok "Started Service" msg_ok "Updated successfully!" @@ -75,4 +156,4 @@ description msg_ok "Completed successfully!\n" echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}" +echo -e "${TAB}${GATEWAY}${BGN}https://${IP}:8443${CL}" diff --git a/ct/beszel.sh b/ct/beszel.sh index d3417a488..59fbe0989 100644 --- a/ct/beszel.sh +++ b/ct/beszel.sh @@ -36,7 +36,9 @@ function update_script() { msg_info "Updating Beszel" $STD /opt/beszel/beszel update sleep 2 && chmod +x /opt/beszel/beszel - msg_ok "Updated Beszel" + VERSION=$(/opt/beszel/beszel -v | awk '{print $3}') + echo "${VERSION}" >$HOME/.beszel + msg_ok "Updated Beszel to ${VERSION}" msg_info "Starting Service" systemctl start beszel-hub diff --git a/ct/birdnet-go.sh b/ct/birdnet-go.sh new file mode 100644 index 000000000..7deb1dc8e --- /dev/null +++ b/ct/birdnet-go.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/tphakala/birdnet-go + +APP="BirdNET-Go" +var_tags="${var_tags:-monitoring;ai;nature}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-12}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" +var_gpu="${var_gpu:-no}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /usr/local/bin/birdnet-go ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "birdnet" "tphakala/birdnet-go"; then + msg_info "Stopping Service" + systemctl stop birdnet + msg_ok "Stopped Service" + + fetch_and_deploy_gh_release "birdnet" "tphakala/birdnet-go" "prebuild" "latest" "/opt/birdnet" "birdnet-go-linux-amd64.tar.gz" + + msg_info "Deploying Binary" + cp /opt/birdnet/birdnet-go /usr/local/bin/birdnet-go + chmod +x /usr/local/bin/birdnet-go + cp -r /opt/birdnet/libtensorflowlite_c.so /usr/local/lib/ || true + ldconfig + msg_ok "Deployed Binary" + + msg_info "Starting Service" + systemctl start birdnet + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}" diff --git a/ct/booklore.sh b/ct/booklore.sh deleted file mode 100644 index 250df5a87..000000000 --- a/ct/booklore.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: MickLesk (CanbiZ) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/booklore-app/BookLore - -APP="BookLore" -var_tags="${var_tags:-books;library}" -var_cpu="${var_cpu:-3}" -var_ram="${var_ram:-3072}" -var_disk="${var_disk:-7}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - if [[ ! -d /opt/booklore ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - if check_for_gh_release "booklore" "booklore-app/BookLore"; then - JAVA_VERSION="25" setup_java - NODE_VERSION="22" setup_nodejs - setup_mariadb - setup_yq - ensure_dependencies ffmpeg - - msg_info "Stopping Service" - systemctl stop booklore - msg_ok "Stopped Service" - - if grep -qE "^BOOKLORE_(DATA_PATH|BOOKDROP_PATH|BOOKS_PATH|PORT)=" /opt/booklore_storage/.env 2>/dev/null; then - msg_info "Migrating old environment variables" - sed -i 's/^BOOKLORE_DATA_PATH=/APP_PATH_CONFIG=/g' /opt/booklore_storage/.env - sed -i 's/^BOOKLORE_BOOKDROP_PATH=/APP_BOOKDROP_FOLDER=/g' /opt/booklore_storage/.env - sed -i '/^BOOKLORE_BOOKS_PATH=/d' /opt/booklore_storage/.env - sed -i '/^BOOKLORE_PORT=/d' /opt/booklore_storage/.env - msg_ok "Migrated old environment variables" - fi - - msg_info "Backing up old installation" - mv /opt/booklore /opt/booklore_bak - msg_ok "Backed up old installation" - - fetch_and_deploy_gh_release "booklore" "booklore-app/BookLore" "tarball" - - msg_info "Building Frontend" - cd /opt/booklore/booklore-ui - $STD npm install --force - $STD npm run build --configuration=production - msg_ok "Built Frontend" - - msg_info "Embedding Frontend into Backend" - mkdir -p /opt/booklore/booklore-api/src/main/resources/static - cp -r /opt/booklore/booklore-ui/dist/booklore/browser/* /opt/booklore/booklore-api/src/main/resources/static/ - msg_ok "Embedded Frontend into Backend" - - msg_info "Building Backend" - cd /opt/booklore/booklore-api - APP_VERSION=$(get_latest_github_release "booklore-app/BookLore") - yq eval ".app.version = \"${APP_VERSION}\"" -i src/main/resources/application.yaml - $STD ./gradlew clean build -x test --no-daemon - mkdir -p /opt/booklore/dist - JAR_PATH=$(find /opt/booklore/booklore-api/build/libs -maxdepth 1 -type f -name "booklore-api-*.jar" ! -name "*plain*" | head -n1) - if [[ -z "$JAR_PATH" ]]; then - msg_error "Backend JAR not found" - exit - fi - cp "$JAR_PATH" /opt/booklore/dist/app.jar - msg_ok "Built Backend" - - if systemctl is-active --quiet nginx 2>/dev/null; then - msg_info "Removing Nginx (no longer needed)" - systemctl disable --now nginx - $STD apt-get purge -y nginx nginx-common - msg_ok "Removed Nginx" - fi - - if ! grep -q "^SERVER_PORT=" /opt/booklore_storage/.env 2>/dev/null; then - echo "SERVER_PORT=6060" >>/opt/booklore_storage/.env - fi - - sed -i 's|ExecStart=.*|ExecStart=/usr/bin/java -XX:+UseG1GC -XX:+UseStringDeduplication -XX:+UseCompactObjectHeaders -XX:MaxRAMPercentage=75.0 -XX:+ExitOnOutOfMemoryError -jar /opt/booklore/dist/app.jar|' /etc/systemd/system/booklore.service - systemctl daemon-reload - - msg_info "Starting Service" - systemctl start booklore - rm -rf /opt/booklore_bak - msg_ok "Started Service" - msg_ok "Updated successfully!" - fi - exit -} - -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:6060${CL}" diff --git a/ct/bookstack.sh b/ct/bookstack.sh index caf899285..2093fe4b0 100644 --- a/ct/bookstack.sh +++ b/ct/bookstack.sh @@ -29,6 +29,7 @@ function update_script() { exit fi setup_mariadb + ensure_dependencies git if check_for_gh_release "bookstack" "BookStackApp/BookStack"; then msg_info "Stopping Apache2" systemctl stop apache2 diff --git a/ct/bytestash.sh b/ct/bytestash.sh index 9aad7faf9..0309ed8da 100644 --- a/ct/bytestash.sh +++ b/ct/bytestash.sh @@ -29,28 +29,41 @@ function update_script() { exit fi if check_for_gh_release "bytestash" "jordan-dalby/ByteStash"; then - read -rp "${TAB3}Did you make a backup via application WebUI? (y/n): " backuped - if [[ "$backuped" =~ ^[Yy]$ ]]; then - msg_info "Stopping Services" - systemctl stop bytestash-backend bytestash-frontend - msg_ok "Services Stopped" + msg_info "Stopping Services" + systemctl stop bytestash-backend bytestash-frontend + msg_ok "Services Stopped" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "bytestash" "jordan-dalby/ByteStash" "tarball" - - msg_info "Configuring ByteStash" - cd /opt/bytestash/server - $STD npm install - cd /opt/bytestash/client - $STD npm install - msg_ok "Updated ByteStash" - - msg_info "Starting Services" - systemctl start bytestash-backend bytestash-frontend - msg_ok "Started Services" - else - msg_error "PLEASE MAKE A BACKUP FIRST!" - exit + msg_info "Backing up data" + tmp_dir="/opt/bytestash-data-backup" + mkdir -p "$tmp_dir" + if [[ -d /opt/bytestash/data ]]; then + cp -r /opt/bytestash/data "$tmp_dir"/data + elif [[ -d /opt/data ]]; then + cp -r /opt/data "$tmp_dir"/data fi + msg_ok "Data backed up" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "bytestash" "jordan-dalby/ByteStash" "tarball" + + msg_info "Restoring data" + if [[ -d "$tmp_dir"/data ]]; then + mkdir -p /opt/bytestash/data + cp -r "$tmp_dir"/data/* /opt/bytestash/data/ + rm -rf "$tmp_dir" + fi + msg_ok "Data restored" + + msg_info "Configuring ByteStash" + cd /opt/bytestash/server + $STD npm install + cd /opt/bytestash/client + $STD npm install + msg_ok "Updated ByteStash" + + msg_info "Starting Services" + systemctl start bytestash-backend bytestash-frontend + msg_ok "Started Services" + msg_ok "Updated successfully!" fi exit diff --git a/ct/calibre-web.sh b/ct/calibre-web.sh index ffea110b5..0d168002a 100644 --- a/ct/calibre-web.sh +++ b/ct/calibre-web.sh @@ -44,7 +44,7 @@ function update_script() { msg_info "Installing Dependencies" cd /opt/calibre-web - $STD uv venv + $STD uv venv --clear /opt/calibre-web/.venv $STD uv pip install --python /opt/calibre-web/.venv/bin/python --no-cache-dir --upgrade pip setuptools wheel $STD uv pip install --python /opt/calibre-web/.venv/bin/python --no-cache-dir -r requirements.txt msg_ok "Installed Dependencies" diff --git a/ct/changedetection.sh b/ct/changedetection.sh index ff7c2181d..6fccc0354 100644 --- a/ct/changedetection.sh +++ b/ct/changedetection.sh @@ -34,11 +34,11 @@ function update_script() { NODE_VERSION="24" setup_nodejs msg_info "Updating ${APP}" - $STD pip3 install changedetection.io --upgrade + $STD pip3 install changedetection.io --upgrade --break-system-packages --ignore-installed typing_extensions msg_ok "Updated ${APP}" msg_info "Updating Playwright" - $STD pip3 install playwright --upgrade + $STD pip3 install playwright --upgrade --break-system-packages msg_ok "Updated Playwright" if [[ -f /etc/systemd/system/browserless.service ]]; then diff --git a/ct/checkmate.sh b/ct/checkmate.sh index a8c14445b..cd8e23ae0 100644 --- a/ct/checkmate.sh +++ b/ct/checkmate.sh @@ -39,7 +39,7 @@ function update_script() { [ -f /opt/checkmate/client/.env.local ] && cp /opt/checkmate/client/.env.local /opt/checkmate_client.env.local.bak msg_ok "Backed up Data" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "checkmate" "bluewave-labs/Checkmate" + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "checkmate" "bluewave-labs/Checkmate" "tarball" msg_info "Updating Checkmate Server" cd /opt/checkmate/server diff --git a/ct/checkmk.sh b/ct/checkmk.sh index fa9d2c3b1..1f36bbca4 100644 --- a/ct/checkmk.sh +++ b/ct/checkmk.sh @@ -11,7 +11,7 @@ var_cpu="${var_cpu:-2}" var_ram="${var_ram:-2048}" var_disk="${var_disk:-6}" var_os="${var_os:-debian}" -var_version="${var_version:-12}" +var_version="${var_version:-13}" var_unprivileged="${var_unprivileged:-1}" header_info "$APP" @@ -23,25 +23,25 @@ function update_script() { header_info check_container_storage check_container_resources - if [[ ! -f /opt/checkmk_version.txt ]]; then + if ! command -v omd &>/dev/null; then msg_error "No ${APP} Installation Found!" exit fi - RELEASE=$(curl -fsSL https://api.github.com/repos/checkmk/checkmk/tags | grep "name" | awk '{print substr($2, 3, length($2)-4) }' | tr ' ' '\n' | grep -Ev 'rc|b' | sort -V | tail -n 1) - msg_info "Updating ${APP} to v${RELEASE}" + RELEASE=$(curl_with_retry "https://api.github.com/repos/checkmk/checkmk/tags" "-" | grep "name" | awk '{print substr($2, 3, length($2)-4) }' | tr ' ' '\n' | grep -Ev 'rc|b' | sort -V | tail -n 1) + RELEASE="${RELEASE%%+*}" + msg_info "Updating checkmk" $STD omd stop monitoring $STD omd cp monitoring monitoringbackup - curl -fsSL "https://download.checkmk.com/checkmk/${RELEASE}/check-mk-raw-${RELEASE}_0.bookworm_amd64.deb" -o "/opt/checkmk.deb" - $STD apt-get install -y /opt/checkmk.deb + curl_with_retry "https://download.checkmk.com/checkmk/${RELEASE}/check-mk-community-${RELEASE}_0.$(get_os_info codename)_amd64.deb" "/opt/checkmk.deb" + $STD apt install -y /opt/checkmk.deb $STD omd --force -V ${RELEASE}.cre update --conflict=install monitoring $STD omd start monitoring $STD omd -f rm monitoringbackup $STD omd cleanup rm -rf /opt/checkmk.deb - msg_ok "Updated ${APP}" + msg_ok "Updated checkmk" msg_ok "Updated successfully!" - exit } diff --git a/ct/convertx.sh b/ct/convertx.sh index 968e15e77..167982669 100644 --- a/ct/convertx.sh +++ b/ct/convertx.sh @@ -24,7 +24,7 @@ function update_script() { header_info check_container_storage check_container_resources - if [[ ! -d /var ]]; then + if [[ ! -d /opt/convertx ]]; then msg_error "No ${APP} Installation Found!" exit fi @@ -33,6 +33,8 @@ function update_script() { systemctl stop convertx msg_info "Stopped Service" + ensure_dependencies libreoffice-writer + msg_info "Move data-Folder" if [[ -d /opt/convertx/data ]]; then mv /opt/convertx/data /opt/data diff --git a/ct/coredns.sh b/ct/coredns.sh new file mode 100644 index 000000000..7bd910674 --- /dev/null +++ b/ct/coredns.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/coredns/coredns + +APP="CoreDNS" +var_tags="${var_tags:-dns;network}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-256}" +var_disk="${var_disk:-1}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /usr/local/bin/coredns ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "coredns" "coredns/coredns"; then + msg_info "Stopping Service" + systemctl stop coredns + msg_ok "Stopped Service" + + fetch_and_deploy_gh_release "coredns" "coredns/coredns" "prebuild" "latest" "/usr/local/bin" \ + "coredns_*_linux_$(dpkg --print-architecture).tgz" + chmod +x /usr/local/bin/coredns + + msg_info "Starting Service" + systemctl start coredns + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} CoreDNS is listening on port 53 (DNS)${CL}" +echo -e "${TAB}${GATEWAY}${BGN}dns://${IP}${CL}" diff --git a/ct/dagu.sh b/ct/dagu.sh new file mode 100644 index 000000000..9eb8aeb99 --- /dev/null +++ b/ct/dagu.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://dagu.sh/ + +APP="Dagu" +var_tags="${var_tags:-automation;workflow;scheduler}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /opt/dagu/dagu ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "dagu" "dagucloud/dagu"; then + msg_info "Stopping Service" + systemctl stop dagu + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp -r /opt/dagu/data /opt/dagu_data_backup + msg_ok "Backed up Data" + + fetch_and_deploy_gh_release "dagu" "dagucloud/dagu" "prebuild" "latest" "/opt/dagu" "dagu_*_linux_amd64.tar.gz" + + msg_info "Restoring Data" + mkdir -p /opt/dagu/data + cp -r /opt/dagu_data_backup/. /opt/dagu/data + rm -rf /opt/dagu_data_backup + msg_ok "Restored Data" + + msg_info "Starting Service" + systemctl start dagu + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}" diff --git a/ct/dashy.sh b/ct/dashy.sh new file mode 100644 index 000000000..cd885200f --- /dev/null +++ b/ct/dashy.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: tteck (tteckster) | Co-Author: CrazyWolf13 +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://dashy.to/ + +APP="Dashy" +var_tags="${var_tags:-dashboard}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-6}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -d /opt/dashy/public/ ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + if check_for_gh_release "dashy" "Lissy93/dashy"; then + msg_info "Stopping Service" + systemctl stop dashy + msg_ok "Stopped Service" + + msg_info "Backing up conf.yml" + if [[ -f /opt/dashy/public/conf.yml ]]; then + cp -R /opt/dashy/public/conf.yml /opt/dashy_conf_backup.yml + else + cp -R /opt/dashy/user-data/conf.yml /opt/dashy_conf_backup.yml + fi + msg_ok "Backed up conf.yml" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "dashy" "Lissy93/dashy" "prebuild" "latest" "/opt/dashy" "dashy-*.tar.gz" + + msg_info "Updating Dashy" + cd /opt/dashy + $STD yarn install --ignore-engines --network-timeout 300000 + msg_ok "Updated Dashy" + + msg_info "Restoring conf.yml" + cp -R /opt/dashy_conf_backup.yml /opt/dashy/user-data + msg_ok "Restored conf.yml" + + msg_info "Cleaning" + rm -rf /opt/dashy_conf_backup.yml /opt/dashy/public/conf.yml + msg_ok "Cleaned" + + msg_info "Starting Dashy" + systemctl start dashy + msg_ok "Started Dashy" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:4000${CL}" diff --git a/ct/databasus.sh b/ct/databasus.sh index 1035c1cf3..06ed39d42 100644 --- a/ct/databasus.sh +++ b/ct/databasus.sh @@ -35,7 +35,10 @@ function update_script() { msg_ok "Stopped Databasus" msg_info "Backing up Configuration" - cp /opt/databasus/.env /opt/databasus.env.bak + [[ ! -f /.env && -f /opt/databasus/.env ]] && cp /opt/databasus/.env /.env + chmod 600 /.env + cp /.env /opt/databasus.env.bak + chmod 600 /opt/databasus.env.bak msg_ok "Backed up Configuration" msg_info "Ensuring Database Clients" @@ -46,7 +49,7 @@ function update_script() { # Install MongoDB Database Tools via direct .deb (no APT repo for Debian 13) if ! command -v mongodump &>/dev/null; then [[ "$(get_os_info id)" == "ubuntu" ]] && MONGO_DIST="ubuntu2204" || MONGO_DIST="debian12" - fetch_and_deploy_from_url "https://fastdl.mongodb.org/tools/db/mongodb-database-tools-${MONGO_DIST}-x86_64-100.14.1.deb" + fetch_and_deploy_from_url "https://fastdl.mongodb.org/tools/db/mongodb-database-tools-${MONGO_DIST}-x86_64-100.16.1.deb" fi [[ -f /usr/bin/mongodump ]] && ln -sf /usr/bin/mongodump /usr/local/mongodb-database-tools/bin/mongodump [[ -f /usr/bin/mongorestore ]] && ln -sf /usr/bin/mongorestore /usr/local/mongodb-database-tools/bin/mongorestore @@ -66,9 +69,12 @@ function update_script() { CLEAN_INSTALL=1 fetch_and_deploy_gh_release "databasus" "databasus/databasus" "tarball" "latest" "/opt/databasus" msg_info "Updating Databasus" + export COREPACK_ENABLE_DOWNLOAD_PROMPT=0 cd /opt/databasus/frontend - $STD npm ci - $STD npm run build + $STD corepack enable + $STD corepack prepare pnpm@latest --activate + $STD pnpm install --frozen-lockfile + $STD pnpm run build cd /opt/databasus/backend $STD go mod download $STD /root/go/bin/swag init -g cmd/main.go -o swagger @@ -81,11 +87,18 @@ function update_script() { msg_ok "Updated Databasus" msg_info "Restoring Configuration" - cp /opt/databasus.env.bak /opt/databasus/.env + cp /opt/databasus.env.bak /.env rm -f /opt/databasus.env.bak - chown postgres:postgres /opt/databasus/.env + chmod 600 /.env msg_ok "Restored Configuration" + if ! grep -q "EnvironmentFile=/.env" /etc/systemd/system/databasus.service; then + msg_info "Updating Service" + sed -i 's|EnvironmentFile=.*|EnvironmentFile=/.env|' /etc/systemd/system/databasus.service + $STD systemctl daemon-reload + msg_ok "Updated Service" + fi + msg_info "Starting Databasus" $STD systemctl start databasus msg_ok "Started Databasus" diff --git a/ct/dawarich.sh b/ct/dawarich.sh index d2d260998..805a6c0a1 100644 --- a/ct/dawarich.sh +++ b/ct/dawarich.sh @@ -53,6 +53,18 @@ function update_script() { export PATH="/root/.rbenv/shims:/root/.rbenv/bin:$PATH" eval "$(/root/.rbenv/bin/rbenv init - bash)" + if ! grep -q "OTP_ENCRYPTION_PRIMARY_KEY" /opt/dawarich/.env; then + echo "OTP_ENCRYPTION_PRIMARY_KEY=$(openssl rand -hex 64)" >>/opt/dawarich/.env + fi + + if ! grep -q "OTP_ENCRYPTION_DETERMINISTIC_KEY" /opt/dawarich/.env; then + echo "OTP_ENCRYPTION_DETERMINISTIC_KEY=$(openssl rand -hex 64)" >>/opt/dawarich/.env + fi + + if ! grep -q "OTP_ENCRYPTION_KEY_DERIVATION_SALT" /opt/dawarich/.env; then + echo "OTP_ENCRYPTION_KEY_DERIVATION_SALT=$(openssl rand -hex 64)" >>/opt/dawarich/.env + fi + set -a && source /opt/dawarich/.env && set +a $STD bundle config set --local deployment 'true' @@ -67,8 +79,8 @@ function update_script() { $STD npm install fi - $STD bundle exec rake assets:precompile $STD bundle exec rails db:migrate + $STD bundle exec rake assets:precompile $STD bundle exec rake data:migrate msg_ok "Ran Migrations" diff --git a/ct/discopanel.sh b/ct/discopanel.sh index 268ee86d3..5bd026423 100644 --- a/ct/discopanel.sh +++ b/ct/discopanel.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) # Copyright (c) 2021-2026 community-scripts ORG -# Author: DragoQC +# Author: DragoQC | Co-Author: nickheyer # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE # Source: https://discopanel.app/ | Github: https://github.com/nickheyer/discopanel @@ -38,34 +38,15 @@ function update_script() { msg_info "Creating Backup" mkdir -p /opt/discopanel_backup_temp - cp -r /opt/discopanel/data/discopanel.db \ - /opt/discopanel/data/.recovery_key \ - /opt/discopanel_backup_temp/ - if [[ -d /opt/discopanel/data/servers ]]; then - cp -r /opt/discopanel/data/servers /opt/discopanel_backup_temp/ - fi + cp /opt/discopanel/data/discopanel.db /opt/discopanel_backup_temp/discopanel.db msg_ok "Created Backup" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "discopanel" "nickheyer/discopanel" "tarball" "latest" "/opt/discopanel" - - msg_info "Setting up DiscoPanel" - cd /opt/discopanel - $STD make gen - cd /opt/discopanel/web/discopanel - $STD npm install - $STD npm run build - msg_ok "Built Web Interface" - - setup_go - - msg_info "Building DiscoPanel" - cd /opt/discopanel - $STD go build -o discopanel cmd/discopanel/main.go - msg_ok "Built DiscoPanel" + fetch_and_deploy_gh_release "discopanel" "nickheyer/discopanel" "prebuild" "latest" "/opt/discopanel" "discopanel-linux-amd64.tar.gz" + ln -sf /opt/discopanel/discopanel-linux-amd64 /opt/discopanel/discopanel msg_info "Restoring Data" mkdir -p /opt/discopanel/data - cp -a /opt/discopanel_backup_temp/. /opt/discopanel/data/ + mv /opt/discopanel_backup_temp/discopanel.db /opt/discopanel/data/discopanel.db rm -rf /opt/discopanel_backup_temp msg_ok "Restored Data" diff --git a/ct/dispatcharr.sh b/ct/dispatcharr.sh index 361f8304e..c656a9a4a 100644 --- a/ct/dispatcharr.sh +++ b/ct/dispatcharr.sh @@ -70,7 +70,7 @@ function update_script() { source /opt/dispatcharr/.env set +o allexport if [[ -n "$POSTGRES_DB" ]] && [[ -n "$POSTGRES_USER" ]] && [[ -n "$POSTGRES_PASSWORD" ]]; then - PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U $POSTGRES_USER -h ${POSTGRES_HOST:-localhost} $POSTGRES_DB >/tmp/dispatcharr_db_$(date +%F).sql + PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U "$POSTGRES_USER" -h "${POSTGRES_HOST:-localhost}" -p "${POSTGRES_PORT:-5432}" "$POSTGRES_DB" >/tmp/dispatcharr_db_$(date +%F).sql msg_info "Database backup created" fi fi @@ -110,7 +110,9 @@ function update_script() { msg_info "Building Frontend" cd /opt/dispatcharr/frontend - $STD npm install --legacy-peer-deps + node -e "const p=require('./package.json');p.overrides=p.overrides||{};p.overrides['webworkify-webpack']='2.1.3';require('fs').writeFileSync('package.json',JSON.stringify(p,null,2));" + rm -f package-lock.json + $STD npm install --no-audit --progress=false $STD npm run build msg_ok "Built Frontend" diff --git a/ct/domain-locker.sh b/ct/domain-locker.sh index 6eb67c5e6..8f08a02fa 100644 --- a/ct/domain-locker.sh +++ b/ct/domain-locker.sh @@ -20,41 +20,43 @@ color catch_errors function update_script() { - header_info - check_container_storage - check_container_resources - if [[ ! -d /opt/domain-locker ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - if check_for_gh_release "domain-locker" "Lissy93/domain-locker"; then - msg_info "Stopping Service" - systemctl stop domain-locker - msg_info "Service stopped" - - PG_VERSION="17" setup_postgresql - NODE_VERSION="22" setup_nodejs - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "domain-locker" "Lissy93/domain-locker" "tarball" - - msg_info "Installing Modules (patience)" - cd /opt/domain-locker - $STD npm install - msg_ok "Installed Modules" - - msg_info "Building Domain-Locker (a lot of patience)" - set -a - source /opt/domain-locker.env - set +a - $STD npm run build - msg_info "Built Domain-Locker" - - msg_info "Restarting Services" - systemctl start domain-locker - msg_ok "Restarted Services" - msg_ok "Updated successfully!" - fi + header_info + check_container_storage + check_container_resources + if [[ ! -d /opt/domain-locker ]]; then + msg_error "No ${APP} Installation Found!" exit + fi + + ensure_dependencies whois + + if check_for_gh_release "domain-locker" "Lissy93/domain-locker"; then + msg_info "Stopping Service" + systemctl stop domain-locker + msg_info "Service stopped" + + PG_VERSION="17" setup_postgresql + NODE_VERSION="22" setup_nodejs + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "domain-locker" "Lissy93/domain-locker" "tarball" + + msg_info "Installing Modules (patience)" + cd /opt/domain-locker + $STD npm install + msg_ok "Installed Modules" + + msg_info "Building Domain-Locker (a lot of patience)" + set -a + source /opt/domain-locker.env + set +a + $STD npm run build + msg_info "Built Domain-Locker" + + msg_info "Restarting Services" + systemctl start domain-locker + msg_ok "Restarted Services" + msg_ok "Updated successfully!" + fi + exit } start diff --git a/ct/domain-monitor.sh b/ct/domain-monitor.sh index 8d699a08f..86462510f 100644 --- a/ct/domain-monitor.sh +++ b/ct/domain-monitor.sh @@ -34,7 +34,7 @@ function update_script() { fi if ! grep -Fq "www-data /usr/bin/php /opt/domain-monitor/cron/check_domains.php" /etc/crontab; then - echo "0 0 * * * www-data /usr/bin/php /opt/domain-monitor/cron/check_domains.php" >> /etc/crontab + echo "0 0 * * * www-data /usr/bin/php /opt/domain-monitor/cron/check_domains.php" >>/etc/crontab fi if check_for_gh_release "domain-monitor" "Hosteroid/domain-monitor"; then @@ -52,6 +52,7 @@ function update_script() { msg_info "Updating Domain Monitor" cd /opt/domain-monitor $STD composer install + chown -R www-data:www-data /opt/domain-monitor msg_ok "Updated Domain Monitor" msg_info "Restoring backup" @@ -59,7 +60,7 @@ function update_script() { msg_ok "Restored backup" msg_info "Restarting Services" - systemctl reload apache2 + systemctl start apache2 msg_ok "Restarted Services" msg_ok "Updated successfully!" fi diff --git a/ct/drawdb.sh b/ct/drawdb.sh new file mode 100644 index 000000000..48cba4650 --- /dev/null +++ b/ct/drawdb.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/drawdb-io/drawdb + +APP="DrawDB" +var_tags="${var_tags:-database;dev-tools}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-6144}" +var_disk="${var_disk:-5}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/drawdb ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_tag "drawdb" "drawdb-io/drawdb"; then + CLEAN_INSTALL=1 fetch_and_deploy_gh_tag "drawdb" "drawdb-io/drawdb" "latest" "/opt/drawdb" + + msg_info "Rebuilding Frontend" + cd /opt/drawdb + $STD npm ci + NODE_OPTIONS="--max-old-space-size=4096" $STD npm run build + sed -i '//a ' /opt/drawdb/dist/index.html + msg_ok "Rebuilt Frontend" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" diff --git a/ct/endurain.sh b/ct/endurain.sh index b115e8c35..9f38646dd 100644 --- a/ct/endurain.sh +++ b/ct/endurain.sh @@ -3,7 +3,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV # Copyright (c) 2021-2026 community-scripts ORG # Author: johanngrobe # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/joaovitoriasilva/endurain +# Source: https://codeberg.org/endurain-project/endurain APP="Endurain" var_tags="${var_tags:-sport;social-media}" @@ -28,7 +28,7 @@ function update_script() { msg_error "No ${APP} installation found!" exit 233 fi - if check_for_gh_release "endurain" "endurain-project/endurain"; then + if check_for_codeberg_release "endurain" "endurain-project/endurain"; then msg_info "Stopping Service" systemctl stop endurain msg_ok "Stopped Service" @@ -38,7 +38,7 @@ function update_script() { cp /opt/endurain/frontend/app/dist/env.js /opt/endurain.env.js msg_ok "Created Backup" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "endurain" "endurain-project/endurain" "tarball" "latest" "/opt/endurain" + CLEAN_INSTALL=1 fetch_and_deploy_codeberg_release "endurain" "endurain-project/endurain" "tarball" "latest" "/opt/endurain" msg_info "Preparing Update" cd /opt/endurain diff --git a/ct/erpnext.sh b/ct/erpnext.sh new file mode 100644 index 000000000..94fb7b764 --- /dev/null +++ b/ct/erpnext.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/frappe/erpnext + +APP="ERPNext" +var_tags="${var_tags:-erp;business;accounting}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-4096}" +var_disk="${var_disk:-20}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -d /opt/frappe-bench ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + msg_info "Updating ERPNext" + $STD sudo -u frappe bash -c 'export PATH="$HOME/.local/bin:$PATH"; cd /opt/frappe-bench && bench update --reset' + msg_ok "Updated ERPNext" + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" +echo -e "${INFO}${YW} Credentials:${CL}" +echo -e "${TAB}${BGN}Username: Administrator${CL}" +echo -e "${TAB}${BGN}Password: see ~/erpnext.creds${CL}" diff --git a/ct/fileflows.sh b/ct/fileflows.sh index 07cf62d8e..f2e20f1c4 100644 --- a/ct/fileflows.sh +++ b/ct/fileflows.sh @@ -29,11 +29,11 @@ function update_script() { msg_error "No ${APP} Installation Found!" exit fi - + update_available=$(curl -fsSL -X 'GET' "http://localhost:19200/api/status/update-available" -H 'accept: application/json' | jq .UpdateAvailable) if [[ "${update_available}" == "true" ]]; then msg_info "Stopping Service" - systemctl stop fileflows + systemctl stop fileflows* msg_info "Stopped Service" msg_info "Creating Backup" @@ -45,7 +45,7 @@ function update_script() { fetch_and_deploy_from_url "https://fileflows.com/downloads/zip" "/opt/fileflows" msg_info "Starting Service" - systemctl start fileflows + systemctl start fileflows* msg_ok "Started Service" msg_ok "Updated successfully!" else diff --git a/ct/fireshare.sh b/ct/fireshare.sh new file mode 100644 index 000000000..c43f9b91e --- /dev/null +++ b/ct/fireshare.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Slaviša Arežina (tremor021) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/ShaneIsrael/fireshare + +APP="Fireshare" +var_tags="${var_tags:-sharing;video}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-10}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -d /opt/fireshare ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "fireshare" "ShaneIsrael/fireshare"; then + msg_info "Stopping Service" + systemctl stop fireshare + msg_ok "Stopped Service" + + mv /opt/fireshare/fireshare.env /opt + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "fireshare" "ShaneIsrael/fireshare" "tarball" + mv /opt/fireshare.env /opt/fireshare + rm -f /usr/local/bin/fireshare + + msg_info "Updating Fireshare" + cd /opt/fireshare + $STD uv venv --clear + $STD .venv/bin/python -m ensurepip --upgrade + $STD .venv/bin/python -m pip install --upgrade --break-system-packages pip + $STD .venv/bin/python -m pip install --no-cache-dir --break-system-packages --ignore-installed app/server + cp .venv/bin/fireshare /usr/local/bin/fireshare + export FLASK_APP="/opt/fireshare/app/server/fireshare:create_app()" + export DATA_DIRECTORY=/opt/fireshare-data + export IMAGE_DIRECTORY=/opt/fireshare-images + export VIDEO_DIRECTORY=/opt/fireshare-videos + export PROCESSED_DIRECTORY=/opt/fireshare-processed + $STD uv run flask db upgrade + msg_ok "Updated Fireshare" + + msg_info "Starting Service" + systemctl start fireshare + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + cleanup_lxc + + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" diff --git a/ct/flatnotes.sh b/ct/flatnotes.sh index 06e188e23..b88065247 100644 --- a/ct/flatnotes.sh +++ b/ct/flatnotes.sh @@ -38,7 +38,7 @@ function update_script() { cp -r /opt/flatnotes/data /opt/flatnotes_data_backup msg_ok "Backed up Configuration and Data" - fetch_and_deploy_gh_release "flatnotes" "dullage/flatnotes" + fetch_and_deploy_gh_release "flatnotes" "dullage/flatnotes" "tarball" msg_info "Updating Flatnotes" cd /opt/flatnotes/client diff --git a/ct/flowiseai.sh b/ct/flowiseai.sh index 0dea7fc76..59e068f1e 100644 --- a/ct/flowiseai.sh +++ b/ct/flowiseai.sh @@ -27,9 +27,16 @@ function update_script() { msg_error "No ${APP} Installation Found!" exit fi + + NODE_VERSION="20" NODE_MODULE="pnpm" setup_nodejs + msg_info "Updating FlowiseAI (this may take some time)" systemctl stop flowise - $STD npm install -g flowise --upgrade + $STD pnpm add -g flowise + if grep -q 'ExecStart=npx flowise start' /etc/systemd/system/flowise.service; then + sed -i 's|ExecStart=npx flowise start|ExecStart=flowise start|' /etc/systemd/system/flowise.service + systemctl daemon-reload + fi systemctl start flowise msg_ok "Updated FlowiseAI" msg_ok "Updated successfully!" diff --git a/ct/foldergram.sh b/ct/foldergram.sh new file mode 100644 index 000000000..47c4605d9 --- /dev/null +++ b/ct/foldergram.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Slaviša Arežina (tremor021) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/foldergram/foldergram + +APP="Foldergram" +var_tags="${var_tags:-photos}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -d /opt/foldergram ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "foldergram" "foldergram/foldergram"; then + msg_info "Stopping Service" + systemctl stop foldergram + msg_ok "Stopped Service" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "foldergram" "foldergram/foldergram" "tarball" + + msg_info "Installing Foldergram" + cd /opt/foldergram + $STD pnpm install --frozen-lockfile + $STD pnpm run build + msg_ok "Installed Foldergram" + + msg_info "Starting Service" + systemctl start foldergram + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + cleanup_lxc + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:4141${CL}" diff --git a/ct/frigate.sh b/ct/frigate.sh index 99a3c9184..229722e79 100644 --- a/ct/frigate.sh +++ b/ct/frigate.sh @@ -7,7 +7,7 @@ source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxV APP="Frigate" var_tags="${var_tags:-nvr}" -var_cpu="${var_cpu:-4}" +var_cpu="${var_cpu:-8}" var_ram="${var_ram:-4096}" var_disk="${var_disk:-20}" var_os="${var_os:-debian}" diff --git a/ct/geopulse.sh b/ct/geopulse.sh new file mode 100644 index 000000000..8354f17bf --- /dev/null +++ b/ct/geopulse.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: CrazyWolf13 +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/tess1o/geopulse + +APP="GeoPulse" +var_tags="${var_tags:-location;tracking;gps}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /opt/geopulse/backend/geopulse-backend ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "geopulse-backend" "tess1o/geopulse"; then + msg_info "Stopping Service" + systemctl stop geopulse-backend + msg_ok "Stopped Service" + + if [[ "$(uname -m)" == "aarch64" ]]; then + if grep -qi "raspberry\|bcm" /proc/cpuinfo 2>/dev/null; then + BINARY_PATTERN="geopulse-backend-native-arm64-compat-*" + else + BINARY_PATTERN="geopulse-backend-native-arm64-[!c]*" + fi + else + if grep -q avx2 /proc/cpuinfo && grep -q bmi2 /proc/cpuinfo && grep -q fma /proc/cpuinfo; then + BINARY_PATTERN="geopulse-backend-native-amd64-[!c]*" + else + BINARY_PATTERN="geopulse-backend-native-amd64-compat-*" + fi + fi + + fetch_and_deploy_gh_release "geopulse-backend" "tess1o/geopulse" "singlefile" "latest" "/opt/geopulse/backend" "${BINARY_PATTERN}" + fetch_and_deploy_gh_release "geopulse-frontend" "tess1o/geopulse" "prebuild" "latest" "/var/www/geopulse" "geopulse-frontend-*.tar.gz" + + msg_info "Starting Service" + systemctl start geopulse-backend + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" +echo -e "${INFO}${YW} To create an admin account, run:${CL}" +echo -e "${TAB}${BGN}/usr/local/bin/create-geopulse-admin${CL}" diff --git a/ct/ghost.sh b/ct/ghost.sh index 61ef14bb6..27c780227 100644 --- a/ct/ghost.sh +++ b/ct/ghost.sh @@ -25,7 +25,7 @@ function update_script() { check_container_resources setup_mariadb - NODE_VERSION="22" setup_nodejs + NODE_VERSION="22" NODE_MODULE="pnpm" setup_nodejs ensure_dependencies git msg_info "Updating Ghost" diff --git a/ct/ghostfolio.sh b/ct/ghostfolio.sh index a039d760d..3c20e0f2d 100644 --- a/ct/ghostfolio.sh +++ b/ct/ghostfolio.sh @@ -47,6 +47,7 @@ function update_script() { msg_info "Updating Ghostfolio" mv /opt/env.backup /opt/ghostfolio/.env + sed -i -E '/^DATABASE_URL=/ s/[?&]sslmode=prefer//g' /opt/ghostfolio/.env cd /opt/ghostfolio $STD npm ci $STD npm run build:production diff --git a/ct/github-runner.sh b/ct/github-runner.sh new file mode 100644 index 000000000..b959f5089 --- /dev/null +++ b/ct/github-runner.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/actions/runner + +APP="GitHub-Runner" +var_tags="${var_tags:-ci}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" +var_nesting="${var_nesting:-1}" +var_keyctl="${var_keyctl:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /opt/actions-runner/run.sh ]]; then + msg_error "No ${APP} Installation Found!" + exit 1 + fi + + if check_for_gh_release "actions-runner" "actions/runner"; then + msg_info "Stopping Service" + systemctl stop actions-runner + msg_ok "Stopped Service" + + msg_info "Backing up runner configuration" + BACKUP_DIR="/opt/actions-runner.backup" + mkdir -p "$BACKUP_DIR" + for f in .runner .credentials .credentials_rsaparams .env .path; do + [[ -f /opt/actions-runner/$f ]] && cp -a /opt/actions-runner/$f "$BACKUP_DIR/" + done + msg_ok "Backed up configuration" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "actions-runner" "actions/runner" "prebuild" "latest" "/opt/actions-runner" "actions-runner-linux-x64-*.tar.gz" + + msg_info "Restoring runner configuration" + for f in .runner .credentials .credentials_rsaparams .env .path; do + [[ -f "$BACKUP_DIR/$f" ]] && cp -a "$BACKUP_DIR/$f" /opt/actions-runner/ + done + rm -rf "$BACKUP_DIR" + msg_ok "Restored configuration" + + msg_info "Starting Service" + systemctl start actions-runner + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} After first boot, run config.sh with your token and start the service.${CL}" diff --git a/ct/gluetun.sh b/ct/gluetun.sh new file mode 100644 index 000000000..4ba2808f5 --- /dev/null +++ b/ct/gluetun.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/qdm12/gluetun + +APP="Gluetun" +var_tags="${var_tags:-vpn;wireguard;openvpn}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" +var_tun="${var_tun:-yes}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /usr/local/bin/gluetun ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "gluetun" "qdm12/gluetun"; then + msg_info "Stopping Service" + systemctl stop gluetun + msg_ok "Stopped Service" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "gluetun" "qdm12/gluetun" "tarball" + + msg_info "Building Gluetun" + cd /opt/gluetun + $STD go mod download + CGO_ENABLED=0 $STD go build -trimpath -ldflags="-s -w" -o /usr/local/bin/gluetun ./cmd/gluetun/ + msg_ok "Built Gluetun" + + msg_info "Starting Service" + systemctl start gluetun + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}" diff --git a/ct/gogs.sh b/ct/gogs.sh new file mode 100644 index 000000000..09d557682 --- /dev/null +++ b/ct/gogs.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://gogs.io/ + +APP="Gogs" +var_tags="${var_tags:-git;code;devops}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /opt/gogs/gogs ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "gogs" "gogs/gogs"; then + msg_info "Stopping Service" + systemctl stop gogs + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp -r /opt/gogs/custom /opt/gogs_custom_backup + cp -r /opt/gogs/data /opt/gogs_data_backup + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "gogs" "gogs/gogs" "prebuild" "latest" "/opt/gogs" "gogs_*_linux_amd64.tar.gz" + + msg_info "Restoring Data" + cp -r /opt/gogs_custom_backup/. /opt/gogs/custom + cp -r /opt/gogs_data_backup/. /opt/gogs/data + rm -rf /opt/gogs_custom_backup /opt/gogs_data_backup + msg_ok "Restored Data" + + msg_info "Starting Service" + systemctl start gogs + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" diff --git a/ct/gokapi.sh b/ct/gokapi.sh index a5d515693..087c6db8f 100644 --- a/ct/gokapi.sh +++ b/ct/gokapi.sh @@ -32,7 +32,16 @@ function update_script() { systemctl stop gokapi msg_ok "Stopped Service" - fetch_and_deploy_gh_release "gokapi" "Forceu/Gokapi" "prebuild" "latest" "/opt/gokapi" "gokapi-linux_amd64.zip" + fetch_and_deploy_gh_release "gokapi" "Forceu/Gokapi" "prebuild" "latest" "/opt/gokapi" "*linux*amd64.zip" + + # Migrate from pre-v2.2.4 binary name (gokapi-linux_amd64 -> gokapi) + if [[ -f /opt/gokapi/gokapi-linux_amd64 ]]; then + rm -f /opt/gokapi/gokapi-linux_amd64 + fi + if grep -q "gokapi-linux_amd64" /etc/systemd/system/gokapi.service 2>/dev/null; then + sed -i 's|gokapi-linux_amd64|gokapi|g' /etc/systemd/system/gokapi.service + systemctl daemon-reload + fi msg_info "Starting Service" systemctl start gokapi diff --git a/ct/graylog.sh b/ct/graylog.sh index 1c2256931..a1681e640 100644 --- a/ct/graylog.sh +++ b/ct/graylog.sh @@ -37,7 +37,7 @@ function update_script() { CURRENT_VERSION=$(apt list --installed 2>/dev/null | grep graylog-server | grep -oP '\d+\.\d+\.\d+') if dpkg --compare-versions "$CURRENT_VERSION" lt "6.3"; then - MONGO_VERSION="8.0" setup_mongodb + MONGO_VERSION="8.2" setup_mongodb msg_info "Updating Graylog" $STD apt update @@ -64,6 +64,12 @@ function update_script() { } start + +if [[ $(sysctl -n vm.max_map_count 2>/dev/null) -lt 262144 ]]; then + sysctl -w vm.max_map_count=262144 >/dev/null 2>&1 + echo "vm.max_map_count=262144" >/etc/sysctl.d/graylog.conf +fi + build_container description diff --git a/ct/grist.sh b/ct/grist.sh index f037922b1..2a9893152 100644 --- a/ct/grist.sh +++ b/ct/grist.sh @@ -46,12 +46,13 @@ function update_script() { msg_info "Updating Grist" mkdir -p /opt/grist/docs cp -n /opt/grist_bak/.env /opt/grist/.env - cp -r /opt/grist_bak/docs/* /opt/grist/docs/ - cp /opt/grist_bak/grist-sessions.db /opt/grist/grist-sessions.db - cp /opt/grist_bak/landing.db /opt/grist/landing.db + if ls /opt/grist_bak/docs/* &>/dev/null; then + cp -r /opt/grist_bak/docs/* /opt/grist/docs/ + fi + [[ -f /opt/grist_bak/grist-sessions.db ]] && cp /opt/grist_bak/grist-sessions.db /opt/grist/grist-sessions.db + [[ -f /opt/grist_bak/landing.db ]] && cp /opt/grist_bak/landing.db /opt/grist/landing.db cd /opt/grist $STD yarn install - $STD yarn run install:ee $STD yarn run build:prod $STD yarn run install:python msg_ok "Updated Grist" diff --git a/ct/headers/alpine-borgbackup-server b/ct/headers/alpine-borgbackup-server new file mode 100644 index 000000000..8adfaef2b --- /dev/null +++ b/ct/headers/alpine-borgbackup-server @@ -0,0 +1,6 @@ + ___ __ _ ____ ____ __ _____ + / | / /___ (_)___ ___ / __ )____ _________ _/ __ )____ ______/ /____ ______ / ___/___ ______ _____ _____ + / /| | / / __ \/ / __ \/ _ \______/ __ / __ \/ ___/ __ `/ __ / __ `/ ___/ //_/ / / / __ \______\__ \/ _ \/ ___/ | / / _ \/ ___/ + / ___ |/ / /_/ / / / / / __/_____/ /_/ / /_/ / / / /_/ / /_/ / /_/ / /__/ ,< / /_/ / /_/ /_____/__/ / __/ / | |/ / __/ / +/_/ |_/_/ .___/_/_/ /_/\___/ /_____/\____/_/ \__, /_____/\__,_/\___/_/|_|\__,_/ .___/ /____/\___/_/ |___/\___/_/ + /_/ /____/ /_/ diff --git a/ct/headers/alpine-ironclaw b/ct/headers/alpine-ironclaw new file mode 100644 index 000000000..f5e6dc2c3 --- /dev/null +++ b/ct/headers/alpine-ironclaw @@ -0,0 +1,6 @@ + ___ __ _ ____ ________ + / | / /___ (_)___ ___ / _/________ ____ / ____/ /___ __ __ + / /| | / / __ \/ / __ \/ _ \______ / // ___/ __ \/ __ \/ / / / __ `/ | /| / / + / ___ |/ / /_/ / / / / / __/_____// // / / /_/ / / / / /___/ / /_/ /| |/ |/ / +/_/ |_/_/ .___/_/_/ /_/\___/ /___/_/ \____/_/ /_/\____/_/\__,_/ |__/|__/ + /_/ diff --git a/ct/headers/alpine-ntfy b/ct/headers/alpine-ntfy new file mode 100644 index 000000000..bc4164342 --- /dev/null +++ b/ct/headers/alpine-ntfy @@ -0,0 +1,6 @@ + ___ __ _ __ ____ + / | / /___ (_)___ ___ ____ / /_/ __/_ __ + / /| | / / __ \/ / __ \/ _ \______/ __ \/ __/ /_/ / / / + / ___ |/ / /_/ / / / / / __/_____/ / / / /_/ __/ /_/ / +/_/ |_/_/ .___/_/_/ /_/\___/ /_/ /_/\__/_/ \__, / + /_/ /____/ diff --git a/ct/headers/alpine-wakapi b/ct/headers/alpine-wakapi new file mode 100644 index 000000000..350b826aa --- /dev/null +++ b/ct/headers/alpine-wakapi @@ -0,0 +1,6 @@ + ___ __ _ _ __ __ _ + / | / /___ (_)___ ___ | | / /___ _/ /______ _____ (_) + / /| | / / __ \/ / __ \/ _ \_____| | /| / / __ `/ //_/ __ `/ __ \/ / + / ___ |/ / /_/ / / / / / __/_____/ |/ |/ / /_/ / ,< / /_/ / /_/ / / +/_/ |_/_/ .___/_/_/ /_/\___/ |__/|__/\__,_/_/|_|\__,_/ .___/_/ + /_/ /_/ diff --git a/ct/headers/anchor b/ct/headers/anchor new file mode 100644 index 000000000..896f407c0 --- /dev/null +++ b/ct/headers/anchor @@ -0,0 +1,6 @@ + ___ __ + / | ____ _____/ /_ ____ _____ + / /| | / __ \/ ___/ __ \/ __ \/ ___/ + / ___ |/ / / / /__/ / / / /_/ / / +/_/ |_/_/ /_/\___/_/ /_/\____/_/ + diff --git a/ct/headers/anytype-server b/ct/headers/anytype-server new file mode 100644 index 000000000..9929fa06c --- /dev/null +++ b/ct/headers/anytype-server @@ -0,0 +1,6 @@ + ___ __ _____ + / | ____ __ __/ /___ ______ ___ / ___/___ ______ _____ _____ + / /| | / __ \/ / / / __/ / / / __ \/ _ \______\__ \/ _ \/ ___/ | / / _ \/ ___/ + / ___ |/ / / / /_/ / /_/ /_/ / /_/ / __/_____/__/ / __/ / | |/ / __/ / +/_/ |_/_/ /_/\__, /\__/\__, / .___/\___/ /____/\___/_/ |___/\___/_/ + /____/ /____/_/ diff --git a/ct/headers/apprise-api b/ct/headers/apprise-api new file mode 100644 index 000000000..a1f56c348 --- /dev/null +++ b/ct/headers/apprise-api @@ -0,0 +1,6 @@ + ___ _ ___ ____ ____ + / | ____ ____ _____(_)_______ / | / __ \/ _/ + / /| | / __ \/ __ \/ ___/ / ___/ _ \______/ /| | / /_/ // / + / ___ |/ /_/ / /_/ / / / (__ ) __/_____/ ___ |/ ____// / +/_/ |_/ .___/ .___/_/ /_/____/\___/ /_/ |_/_/ /___/ + /_/ /_/ diff --git a/ct/headers/bambuddy b/ct/headers/bambuddy new file mode 100644 index 000000000..dbb7eef63 --- /dev/null +++ b/ct/headers/bambuddy @@ -0,0 +1,6 @@ + ____ __ __ __ + / __ )____ _____ ___ / /_ __ ______/ /___/ /_ __ + / __ / __ `/ __ `__ \/ __ \/ / / / __ / __ / / / / + / /_/ / /_/ / / / / / / /_/ / /_/ / /_/ / /_/ / /_/ / +/_____/\__,_/_/ /_/ /_/_.___/\__,_/\__,_/\__,_/\__, / + /____/ diff --git a/ct/headers/birdnet-go b/ct/headers/birdnet-go new file mode 100644 index 000000000..87b1e6d17 --- /dev/null +++ b/ct/headers/birdnet-go @@ -0,0 +1,6 @@ + ____ _ ___ ______________ ______ + / __ )(_)________/ / | / / ____/_ __/ / ____/___ + / __ / / ___/ __ / |/ / __/ / /_____/ / __/ __ \ + / /_/ / / / / /_/ / /| / /___ / /_____/ /_/ / /_/ / +/_____/_/_/ \__,_/_/ |_/_____/ /_/ \____/\____/ + diff --git a/ct/headers/booklore b/ct/headers/booklore deleted file mode 100644 index 267b0346f..000000000 --- a/ct/headers/booklore +++ /dev/null @@ -1,6 +0,0 @@ - ____ __ __ - / __ )____ ____ / /__/ / ____ ________ - / __ / __ \/ __ \/ //_/ / / __ \/ ___/ _ \ - / /_/ / /_/ / /_/ / ,< / /___/ /_/ / / / __/ -/_____/\____/\____/_/|_/_____/\____/_/ \___/ - diff --git a/ct/headers/coredns b/ct/headers/coredns new file mode 100644 index 000000000..d9eeefd7b --- /dev/null +++ b/ct/headers/coredns @@ -0,0 +1,6 @@ + ______ ____ _ _______ + / ____/___ ________ / __ \/ | / / ___/ + / / / __ \/ ___/ _ \/ / / / |/ /\__ \ +/ /___/ /_/ / / / __/ /_/ / /| /___/ / +\____/\____/_/ \___/_____/_/ |_//____/ + diff --git a/ct/headers/daemonsync b/ct/headers/daemonsync deleted file mode 100644 index 532072375..000000000 --- a/ct/headers/daemonsync +++ /dev/null @@ -1,6 +0,0 @@ - ____ _____ - / __ \____ ____ ____ ___ ____ ____ / ___/__ ______ _____ - / / / / __ `/ _ \/ __ `__ \/ __ \/ __ \ \__ \/ / / / __ \/ ___/ - / /_/ / /_/ / __/ / / / / / /_/ / / / / ___/ / /_/ / / / / /__ -/_____/\__,_/\___/_/ /_/ /_/\____/_/ /_/ /____/\__, /_/ /_/\___/ - /____/ diff --git a/ct/headers/dagu b/ct/headers/dagu new file mode 100644 index 000000000..aab01f43c --- /dev/null +++ b/ct/headers/dagu @@ -0,0 +1,6 @@ + ____ + / __ \____ _____ ___ __ + / / / / __ `/ __ `/ / / / + / /_/ / /_/ / /_/ / /_/ / +/_____/\__,_/\__, /\__,_/ + /____/ diff --git a/ct/headers/dashy b/ct/headers/dashy new file mode 100644 index 000000000..330f9e3d0 --- /dev/null +++ b/ct/headers/dashy @@ -0,0 +1,6 @@ + ____ __ + / __ \____ ______/ /_ __ __ + / / / / __ `/ ___/ __ \/ / / / + / /_/ / /_/ (__ ) / / / /_/ / +/_____/\__,_/____/_/ /_/\__, / + /____/ diff --git a/ct/headers/drawdb b/ct/headers/drawdb new file mode 100644 index 000000000..b6720b510 --- /dev/null +++ b/ct/headers/drawdb @@ -0,0 +1,6 @@ + ____ ____ ____ + / __ \_________ __ __/ __ \/ __ ) + / / / / ___/ __ `/ | /| / / / / / __ | + / /_/ / / / /_/ /| |/ |/ / /_/ / /_/ / +/_____/_/ \__,_/ |__/|__/_____/_____/ + diff --git a/ct/headers/erpnext b/ct/headers/erpnext new file mode 100644 index 000000000..0fbb49f27 --- /dev/null +++ b/ct/headers/erpnext @@ -0,0 +1,6 @@ + __________ ____ _ __ __ + / ____/ __ \/ __ \/ | / /__ _ __/ /_ + / __/ / /_/ / /_/ / |/ / _ \| |/_/ __/ + / /___/ _, _/ ____/ /| / __/> > /etc/redis/redis.conf rm /etc/nginx/nginx.conf cp /opt/homarr/nginx.conf /etc/nginx/templates/nginx.conf msg_ok "Updated Homarr" diff --git a/ct/homeassistant.sh b/ct/homeassistant.sh index 8c58b2ab2..7818f1e90 100644 --- a/ct/homeassistant.sh +++ b/ct/homeassistant.sh @@ -73,7 +73,7 @@ function update_script() { $STD curl -fsSL https://github.com/filebrowser/filebrowser/releases/download/v2.23.0/linux-amd64-filebrowser.tar.gz | tar -xzv -C /usr/local/bin $STD filebrowser config init -a '0.0.0.0' $STD filebrowser config set -a '0.0.0.0' - $STD filebrowser users add admin helper-scripts.com --perm.admin + $STD filebrowser users add admin community-scripts.org --perm.admin msg_ok "Installed FileBrowser" msg_info "Creating Service" @@ -93,7 +93,7 @@ WantedBy=default.target" >$service_path msg_ok "Completed successfully!\n" echo -e "FileBrowser should be reachable by going to the following URL. - ${BL}http://$LOCAL_IP:8080${CL} admin|helper-scripts.com\n" + ${BL}http://$LOCAL_IP:8080${CL} admin|community-scripts.org\n" exit fi } diff --git a/ct/homelable.sh b/ct/homelable.sh new file mode 100644 index 000000000..eb5a67888 --- /dev/null +++ b/ct/homelable.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/Pouzor/homelable + +APP="Homelable" +var_tags="${var_tags:-monitoring;network;visualization}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/homelable ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "homelable" "Pouzor/homelable"; then + msg_info "Stopping Service" + systemctl stop homelable + msg_ok "Stopped Service" + + msg_info "Backing up Configuration and Data" + cp /opt/homelable/backend/.env /opt/homelable.env.bak + cp -r /opt/homelable/data /opt/homelable_data_bak + msg_ok "Backed up Configuration and Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "homelable" "Pouzor/homelable" "tarball" "latest" "/opt/homelable" + + msg_info "Updating Python Dependencies" + cd /opt/homelable/backend + $STD uv venv --clear /opt/homelable/backend/.venv + $STD uv pip install --python /opt/homelable/backend/.venv/bin/python -r requirements.txt + msg_ok "Updated Python Dependencies" + + msg_info "Rebuilding Frontend" + cd /opt/homelable/frontend + $STD npm ci + $STD npm run build + msg_ok "Rebuilt Frontend" + + msg_info "Restoring Configuration and Data" + cp /opt/homelable.env.bak /opt/homelable/backend/.env + cp -r /opt/homelable_data_bak/. /opt/homelable/data/ + rm -f /opt/homelable.env.bak + rm -rf /opt/homelable_data_bak + msg_ok "Restored Configuration and Data" + + msg_info "Starting Service" + systemctl start homelable + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" diff --git a/ct/homepage.sh b/ct/homepage.sh index 0d562c947..756c40e1a 100644 --- a/ct/homepage.sh +++ b/ct/homepage.sh @@ -54,6 +54,7 @@ function update_script() { msg_info "Updating Homepage (Patience)" RELEASE=$(get_latest_github_release "gethomepage/homepage") cd /opt/homepage + echo 'onlyBuiltDependencies=*' >> .npmrc $STD pnpm install $STD pnpm update --no-save caniuse-lite export NEXT_PUBLIC_VERSION="v$RELEASE" diff --git a/ct/hoodik.sh b/ct/hoodik.sh new file mode 100644 index 000000000..22fbda9d1 --- /dev/null +++ b/ct/hoodik.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/hudikhq/hoodik + +APP="Hoodik" +var_tags="${var_tags:-cloud;storage}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-5}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /opt/hoodik/hoodik ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "hoodik" "hudikhq/hoodik"; then + msg_info "Stopping Service" + systemctl stop hoodik + msg_ok "Stopped Service" + + msg_info "Backing up Configuration" + cp /opt/hoodik/.env /opt/hoodik.env.bak + msg_ok "Backed up Configuration" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "hoodik" "hudikhq/hoodik" "prebuild" "latest" "/opt/hoodik" "*x86_64.tar.gz" + + msg_info "Restoring Configuration" + cp /opt/hoodik.env.bak /opt/hoodik/.env + rm -f /opt/hoodik.env.bak + msg_ok "Restored Configuration" + + msg_info "Starting Service" + systemctl start hoodik + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5443/auth/register${CL}" diff --git a/ct/hortusfox.sh b/ct/hortusfox.sh index 4f3f6fb19..812ee9ce5 100644 --- a/ct/hortusfox.sh +++ b/ct/hortusfox.sh @@ -38,13 +38,15 @@ function update_script() { mv /opt/hortusfox/ /opt/hortusfox-backup msg_ok "Backed up current HortusFox installation" - fetch_and_deploy_gh_release "hortusfox" "danielbrendel/hortusfox-web" "tarball" + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "hortusfox" "danielbrendel/hortusfox-web" "tarball" msg_info "Updating HortusFox" cd /opt/hortusfox - mv /opt/hortusfox-backup/.env /opt/hortusfox/.env + cp /opt/hortusfox-backup/.env /opt/hortusfox/.env + cp -a /opt/hortusfox-backup/public/img/. /opt/hortusfox/public/img/ + export COMPOSER_ALLOW_SUPERUSER=1 $STD composer install --no-dev --optimize-autoloader - $STD php asatru migrate --no-interaction + $STD php asatru migrate:upgrade $STD php asatru plants:attributes $STD php asatru calendar:classes chown -R www-data:www-data /opt/hortusfox diff --git a/ct/igotify.sh b/ct/igotify.sh new file mode 100644 index 000000000..d48016563 --- /dev/null +++ b/ct/igotify.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: pfassina +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/androidseb25/iGotify-Notification-Assistent + +APP="iGotify" +var_tags="${var_tags:-notifications;gotify}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/igotify ]]; then + msg_error "No iGotify Installation Found!" + exit + fi + + if check_for_gh_release "igotify" "androidseb25/iGotify-Notification-Assistent"; then + msg_info "Stopping Service" + systemctl stop igotify + msg_ok "Stopped Service" + + msg_info "Backing up Configuration" + cp /opt/igotify/.env /opt/igotify.env.bak + msg_ok "Backed up Configuration" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "igotify" "androidseb25/iGotify-Notification-Assistent" "prebuild" "latest" "/opt/igotify" "iGotify-Notification-Service-amd64-v*.zip" + + msg_info "Restoring Configuration" + cp /opt/igotify.env.bak /opt/igotify/.env + rm -f /opt/igotify.env.bak + msg_ok "Restored Configuration" + + msg_info "Starting Service" + systemctl start igotify + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" diff --git a/ct/immich.sh b/ct/immich.sh index 4e40372ce..aed9cfa6b 100644 --- a/ct/immich.sh +++ b/ct/immich.sh @@ -76,16 +76,16 @@ EOF SOURCE_DIR=${STAGING_DIR}/image-source cd /tmp if [[ -f ~/.intel_version ]]; then - curl -fsSLO https://raw.githubusercontent.com/immich-app/immich/refs/heads/main/machine-learning/Dockerfile + curl_with_retry "https://raw.githubusercontent.com/immich-app/immich/refs/heads/main/machine-learning/Dockerfile" "Dockerfile" readarray -t INTEL_URLS < <( sed -n "/intel-[igc|opencl]/p" ./Dockerfile | awk '{print $3}' sed -n "/libigdgmm12/p" ./Dockerfile | awk '{print $3}' ) INTEL_RELEASE="$(grep "intel-opencl-icd_" ./Dockerfile | awk -F '_' '{print $2}')" if [[ "$INTEL_RELEASE" != "$(cat ~/.intel_version)" ]]; then - msg_info "Updating Intel iGPU dependencies" + msg_info "Updating Intel OpenVINO dependencies" for url in "${INTEL_URLS[@]}"; do - curl -fsSLO "$url" + curl_with_retry "$url" "$(basename "$url")" done $STD apt-mark unhold libigdgmm12 $STD apt install -y --allow-downgrades ./libigdgmm12*.deb @@ -94,9 +94,9 @@ EOF rm ./*.deb $STD apt-mark hold libigdgmm12 dpkg-query -W -f='${Version}\n' intel-opencl-icd >~/.intel_version - msg_ok "Intel iGPU dependencies updated" + rm -f ./Dockerfile + msg_ok "Updated Intel OpenVINO dependencies" fi - rm ./Dockerfile fi if [[ -f ~/.immich_library_revisions ]]; then libraries=("libjxl" "libheif" "libraw" "imagemagick" "libvips") @@ -109,8 +109,8 @@ EOF msg_ok "Image-processing libraries up to date" fi - RELEASE="2.5.6" - if check_for_gh_release "Immich" "immich-app/immich" "${RELEASE}"; then + RELEASE="v2.7.5" + if check_for_gh_release "Immich" "immich-app/immich" "${RELEASE}" "each release is tested individually before the version is updated. Please do not open issues for this"; then if [[ $(cat ~/.immich) > "2.5.1" ]]; then msg_info "Enabling Maintenance Mode" cd /opt/immich/app/bin @@ -125,7 +125,7 @@ EOF msg_ok "Stopped Services" VCHORD_RELEASE="0.5.3" [[ -f ~/.vchord_version ]] && mv ~/.vchord_version ~/.vectorchord - if check_for_gh_release "VectorChord" "tensorchord/VectorChord" "${VCHORD_RELEASE}"; then + if check_for_gh_release "VectorChord" "tensorchord/VectorChord" "${VCHORD_RELEASE}" "updated together with Immich after testing"; then fetch_and_deploy_gh_release "VectorChord" "tensorchord/VectorChord" "binary" "${VCHORD_RELEASE}" "/tmp" "postgresql-16-vchord_*_amd64.deb" systemctl restart postgresql $STD sudo -u postgres psql -d immich -c "ALTER EXTENSION vector UPDATE;" @@ -133,7 +133,7 @@ EOF $STD sudo -u postgres psql -d immich -c "REINDEX INDEX face_index;" $STD sudo -u postgres psql -d immich -c "REINDEX INDEX clip_index;" fi - ensure_dependencies ccache + ensure_dependencies ccache gcc-13 g++-13 INSTALL_DIR="/opt/${APP}" UPLOAD_DIR="$(sed -n '/^IMMICH_MEDIA_LOCATION/s/[^=]*=//p' /opt/immich/.env)" @@ -165,8 +165,8 @@ EOF ) setup_uv - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Immich" "immich-app/immich" "tarball" "v${RELEASE}" "$SRC_DIR" - PNPM_VERSION="$(jq -r '.packageManager | split("@")[1]' ${SRC_DIR}/package.json)" + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Immich" "immich-app/immich" "tarball" "${RELEASE}" "$SRC_DIR" + PNPM_VERSION="$(jq -r '.packageManager | split("@")[1] | split("+")[0]' ${SRC_DIR}/package.json)" NODE_VERSION="24" NODE_MODULE="pnpm@${PNPM_VERSION}" setup_nodejs msg_info "Updating Immich web and microservices" @@ -181,6 +181,12 @@ EOF unset SHARP_IGNORE_GLOBAL_LIBVIPS export SHARP_FORCE_GLOBAL_LIBVIPS=true $STD pnpm --filter immich --frozen-lockfile --prod --no-optional deploy "$APP_DIR" + + # Patch helmet.json: disable upgrade-insecure-requests for HTTP access + if [[ -f "$APP_DIR/helmet.json" ]]; then + jq '.contentSecurityPolicy.directives["upgrade-insecure-requests"] = null' "$APP_DIR/helmet.json" >"$APP_DIR/helmet.json.tmp" && mv "$APP_DIR/helmet.json.tmp" "$APP_DIR/helmet.json" + fi + cp "$APP_DIR"/package.json "$APP_DIR"/bin sed -i "s|^start|${APP_DIR}/bin/start|" "$APP_DIR"/bin/immich-admin @@ -214,18 +220,41 @@ EOF cd "$SRC_DIR"/machine-learning mkdir -p "$ML_DIR" - chown -R immich:immich "$INSTALL_DIR" + # chown excluding upload dir contents (may be a mount with restricted permissions) + chown immich:immich "$INSTALL_DIR" + find "$INSTALL_DIR" -maxdepth 1 -mindepth 1 ! -name upload -exec chown -R immich:immich {} + + chown immich:immich "${UPLOAD_DIR:-$INSTALL_DIR/upload}" 2>/dev/null || true chown immich:immich ./uv.lock export VIRTUAL_ENV="${ML_DIR}"/ml-venv + export UV_HTTP_TIMEOUT=300 if [[ -f ~/.openvino ]]; then - msg_info "Updating HW-accelerated machine-learning" - $STD uv add --no-sync --optional openvino onnxruntime-openvino==1.24.1 --active -n -p python3.13 --managed-python - $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.13 --managed-python + ML_PYTHON="python3.13" + msg_info "Pre-installing Python ${ML_PYTHON} for machine-learning" + for attempt in $(seq 1 3); do + $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv python install "${ML_PYTHON}" && break + [[ $attempt -lt 3 ]] && msg_warn "Python download attempt $attempt failed, retrying..." && sleep 5 + done + msg_ok "Pre-installed Python ${ML_PYTHON}" + msg_info "Updating Intel OpenVINO machine-learning" + for attempt in $(seq 1 3); do + $STD sudo --preserve-env=VIRTUAL_ENV,UV_HTTP_TIMEOUT -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p "${ML_PYTHON}" --managed-python && break + [[ $attempt -lt 3 ]] && msg_warn "uv sync attempt $attempt failed, retrying..." && sleep 10 + done patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.13/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-313-x86_64-linux-gnu.so" - msg_ok "Updated HW-accelerated machine-learning" + msg_ok "Updated Intel OpenVINO machine-learning" else + ML_PYTHON="python3.11" + msg_info "Pre-installing Python ${ML_PYTHON} for machine-learning" + for attempt in $(seq 1 3); do + $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv python install "${ML_PYTHON}" && break + [[ $attempt -lt 3 ]] && msg_warn "Python download attempt $attempt failed, retrying..." && sleep 5 + done + msg_ok "Pre-installed Python ${ML_PYTHON}" msg_info "Updating machine-learning" - $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra cpu --no-dev --active --link-mode copy -n -p python3.11 --managed-python + for attempt in $(seq 1 3); do + $STD sudo --preserve-env=VIRTUAL_ENV,UV_HTTP_TIMEOUT -nu immich uv sync --extra cpu --no-dev --active --link-mode copy -n -p "${ML_PYTHON}" --managed-python && break + [[ $attempt -lt 3 ]] && msg_warn "uv sync attempt $attempt failed, retrying..." && sleep 10 + done msg_ok "Updated machine-learning" fi cd "$SRC_DIR" @@ -243,11 +272,28 @@ EOF [[ ! -f /usr/bin/immich ]] && ln -sf "$APP_DIR"/cli/bin/immich /usr/bin/immich [[ ! -f /usr/bin/immich-admin ]] && ln -sf "$APP_DIR"/bin/immich-admin /usr/bin/immich-admin - chown -R immich:immich "$INSTALL_DIR" + if ! grep -q '^DB_HOSTNAME=' "$INSTALL_DIR"/.env; then + sed -i '/^DB_DATABASE_NAME/a DB_HOSTNAME=127.0.0.1' "$INSTALL_DIR"/.env + fi + if ! grep -q 'HELMET_FILE' "$INSTALL_DIR"/.env; then + sed -i -e '$a\' "$INSTALL_DIR"/.env + echo "IMMICH_HELMET_FILE=true" >>"$INSTALL_DIR"/.env + fi + + if grep -q 'ExecStart=/usr/bin/node' /etc/systemd/system/immich-web.service; then + sed -i '/^EnvironmentFile=/d' /etc/systemd/system/immich-web.service + sed -i "s|^ExecStart=.*|ExecStart=${APP_DIR}/bin/start.sh|" /etc/systemd/system/immich-web.service + systemctl daemon-reload + fi + + # chown excluding upload dir contents (may be a mount with restricted permissions) + chown immich:immich "$INSTALL_DIR" + find "$INSTALL_DIR" -maxdepth 1 -mindepth 1 ! -name upload -exec chown -R immich:immich {} + + chown immich:immich "${UPLOAD_DIR:-$INSTALL_DIR/upload}" 2>/dev/null || true if [[ "${MAINT_MODE:-0}" == 1 ]]; then msg_info "Disabling Maintenance Mode" cd /opt/immich/app/bin - $STD ./immich-admin disable-maintenance-mode + $STD ./immich-admin disable-maintenance-mode || true unset MAINT_MODE $STD cd - msg_ok "Disabled Maintenance Mode" @@ -263,7 +309,8 @@ function compile_libjxl() { SOURCE=${SOURCE_DIR}/libjxl JPEGLI_LIBJPEG_LIBRARY_SOVERSION="62" JPEGLI_LIBJPEG_LIBRARY_VERSION="62.3.0" - : "${LIBJXL_REVISION:=$(jq -cr '.revision' "$BASE_DIR"/server/sources/libjxl.json)}" + LIBJXL_REVISION="794a5dcf0d54f9f0b20d288a12e87afb91d20dfc" + # : "${LIBJXL_REVISION:=$(jq -cr '.revision' "$BASE_DIR"/server/sources/libjxl.json)}" if [[ "$LIBJXL_REVISION" != "$(grep 'libjxl' ~/.immich_library_revisions | awk '{print $2}')" ]]; then msg_info "Recompiling libjxl" [[ -d "$SOURCE" ]] && rm -rf "$SOURCE" @@ -307,7 +354,8 @@ function compile_libjxl() { function compile_libheif() { SOURCE=${SOURCE_DIR}/libheif ensure_dependencies libaom-dev - : "${LIBHEIF_REVISION:=$(jq -cr '.revision' "$BASE_DIR"/server/sources/libheif.json)}" + LIBHEIF_REVISION="35dad50a9145332a7bfdf1ff6aef6801fb613d68" + # : "${LIBHEIF_REVISION:=$(jq -cr '.revision' "$BASE_DIR"/server/sources/libheif.json)}" if [[ "${update:-}" ]] || [[ "$LIBHEIF_REVISION" != "$(grep 'libheif' ~/.immich_library_revisions | awk '{print $2}')" ]]; then msg_info "Recompiling libheif" [[ -d "$SOURCE" ]] && rm -rf "$SOURCE" @@ -338,7 +386,8 @@ function compile_libheif() { function compile_libraw() { SOURCE=${SOURCE_DIR}/libraw - : "${LIBRAW_REVISION:=$(jq -cr '.revision' "$BASE_DIR"/server/sources/libraw.json)}" + LIBRAW_REVISION="0b56545a4f828743f28a4345cdfdd4c49f9f9a2a" + # : "${LIBRAW_REVISION:=$(jq -cr '.revision' "$BASE_DIR"/server/sources/libraw.json)}" if [[ "$LIBRAW_REVISION" != "$(grep 'libraw' ~/.immich_library_revisions | awk '{print $2}')" ]]; then msg_info "Recompiling libraw" [[ -d "$SOURCE" ]] && rm -rf "$SOURCE" diff --git a/ct/iobroker.sh b/ct/iobroker.sh index 3856ff335..77d06babb 100644 --- a/ct/iobroker.sh +++ b/ct/iobroker.sh @@ -27,6 +27,9 @@ function update_script() { msg_error "No ${APP} Installation Found!" exit fi + + NODE_VERSION="24" setup_nodejs + msg_info "Updating ${APP} LXC" $STD apt update $STD apt -y upgrade diff --git a/ct/ironclaw.sh b/ct/ironclaw.sh new file mode 100644 index 000000000..256897f8f --- /dev/null +++ b/ct/ironclaw.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/nearai/ironclaw + +APP="IronClaw" +var_tags="${var_tags:-ai;agent;security}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /usr/local/bin/ironclaw ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "ironclaw-bin" "nearai/ironclaw"; then + msg_info "Stopping Service" + systemctl stop ironclaw + msg_ok "Stopped Service" + + msg_info "Backing up Configuration" + cp /root/.ironclaw/.env /root/ironclaw.env.bak + msg_ok "Backed up Configuration" + + fetch_and_deploy_gh_release "ironclaw-bin" "nearai/ironclaw" "prebuild" "latest" "/usr/local/bin" \ + "ironclaw-$(uname -m)-unknown-linux-$([[ -f /etc/alpine-release ]] && echo "musl" || echo "gnu").tar.gz" + chmod +x /usr/local/bin/ironclaw + + msg_info "Restoring Configuration" + cp /root/ironclaw.env.bak /root/.ironclaw/.env + rm -f /root/ironclaw.env.bak + msg_ok "Restored Configuration" + + msg_info "Starting Service" + systemctl start ironclaw + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Complete setup by running:${CL}" +echo -e "${TAB}${BGN}ironclaw onboard${CL}" +echo -e "${INFO}${YW} Then start the service:${CL}" +echo -e "${TAB}${BGN}systemctl start ironclaw${CL}" +echo -e "${INFO}${YW} Access the Web UI at:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" +echo -e "${INFO}${YW} Auth token and database credentials:${CL}" +echo -e "${TAB}${BGN}cat /root/.ironclaw/.env${CL}" diff --git a/ct/isponsorblocktv.sh b/ct/isponsorblocktv.sh new file mode 100644 index 000000000..f1b49425c --- /dev/null +++ b/ct/isponsorblocktv.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Matthew Stern (sternma) | MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/dmunozv04/iSponsorBlockTV + +APP="iSponsorBlockTV" +var_tags="${var_tags:-media;automation}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/isponsorblocktv ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "isponsorblocktv" "dmunozv04/iSponsorBlockTV"; then + msg_info "Stopping Service" + systemctl stop isponsorblocktv + msg_ok "Stopped Service" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "isponsorblocktv" "dmunozv04/iSponsorBlockTV" "singlefile" "latest" "/opt/isponsorblocktv" "iSponsorBlockTV-x86_64-linux" + + msg_info "Starting Service" + systemctl start isponsorblocktv + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Run the setup wizard inside the container with:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}iSponsorBlockTV setup${CL}" diff --git a/ct/itsm-ng.sh b/ct/itsm-ng.sh index dff219504..1fdeaa5fb 100644 --- a/ct/itsm-ng.sh +++ b/ct/itsm-ng.sh @@ -30,9 +30,14 @@ function update_script() { fi setup_mariadb - msg_info "Updating LXC" + msg_info "Updating ITSM-NG" $STD apt update $STD apt -y upgrade + chown -R www-data:www-data /var/lib/itsm-ng + mkdir -p /usr/share/itsm-ng/css/palettes + chown -R www-data:www-data /usr/share/itsm-ng/css + chown -R www-data:www-data /usr/share/itsm-ng/css_compiled + chown www-data:www-data /etc/itsm-ng/config_db.php msg_ok "Updated successfully!" exit } diff --git a/ct/jellyfin.sh b/ct/jellyfin.sh index 87918c79e..bf799a29a 100644 --- a/ct/jellyfin.sh +++ b/ct/jellyfin.sh @@ -32,21 +32,36 @@ function update_script() { if ! grep -qEi 'ubuntu' /etc/os-release; then msg_info "Updating Intel Dependencies" rm -f ~/.intel-* || true - fetch_and_deploy_gh_release "intel-igc-core-2" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" - fetch_and_deploy_gh_release "intel-igc-opencl-2" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" + + # Fetch compute-runtime first so /tmp/gh_rel.json is populated for IGC tag resolution fetch_and_deploy_gh_release "intel-libgdgmm12" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" fetch_and_deploy_gh_release "intel-opencl-icd" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" + + local igc_tag + _resolve_igc_tag igc_tag + + fetch_and_deploy_gh_release "intel-igc-core-2" "intel/intel-graphics-compiler" "binary" "$igc_tag" "" "intel-igc-core-2_*_amd64.deb" + fetch_and_deploy_gh_release "intel-igc-opencl-2" "intel/intel-graphics-compiler" "binary" "$igc_tag" "" "intel-igc-opencl-2_*_amd64.deb" msg_ok "Updated Intel Dependencies" fi + msg_info "Setting up Jellyfin Repository" + setup_deb822_repo \ + "jellyfin" \ + "https://repo.jellyfin.org/jellyfin_team.gpg.key" \ + "https://repo.jellyfin.org/$(get_os_info id)" \ + "$(get_os_info codename)" + msg_ok "Set up Jellyfin Repository" + msg_info "Updating Jellyfin" ensure_dependencies libjemalloc2 if [[ ! -f /usr/lib/libjemalloc.so ]]; then ln -sf /usr/lib/x86_64-linux-gnu/libjemalloc.so.2 /usr/lib/libjemalloc.so fi - $STD apt update $STD apt -y upgrade - $STD apt -y --with-new-pkgs upgrade jellyfin jellyfin-server + $STD apt -y --with-new-pkgs upgrade jellyfin jellyfin-server jellyfin-ffmpeg7 + ln -sf /usr/lib/jellyfin-ffmpeg/ffmpeg /usr/bin/ffmpeg + ln -sf /usr/lib/jellyfin-ffmpeg/ffprobe /usr/bin/ffprobe msg_ok "Updated Jellyfin" msg_ok "Updated successfully!" exit diff --git a/ct/jitsi-meet.sh b/ct/jitsi-meet.sh new file mode 100644 index 000000000..1b57f485e --- /dev/null +++ b/ct/jitsi-meet.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://jitsi.org/ + +APP="Jitsi-Meet" +var_tags="${var_tags:-video;conference;communication}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-4096}" +var_disk="${var_disk:-12}" +var_os="${var_os:-debian}" +var_version="${var_version:-12}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /etc/jitsi ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + msg_info "Updating Jitsi Meet" + $STD apt update + $STD apt install -y --only-upgrade \ + jitsi-meet \ + jicofo \ + jitsi-videobridge2 \ + prosody + msg_ok "Updated Jitsi Meet" + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}https://${IP}${CL}" diff --git a/ct/karakeep.sh b/ct/karakeep.sh index 300ef0ac6..94744c1ee 100644 --- a/ct/karakeep.sh +++ b/ct/karakeep.sh @@ -59,6 +59,7 @@ function update_script() { if command -v corepack >/dev/null; then $STD corepack disable fi + sed -i "s/^SERVER_VERSION=.*$/SERVER_VERSION=${CHECK_UPDATE_RELEASE#v}/" /etc/karakeep/karakeep.env MODULE_VERSION="$(jq -r '.packageManager | split("@")[1]' /opt/karakeep/package.json)" NODE_VERSION="24" NODE_MODULE="pnpm@${MODULE_VERSION}" setup_nodejs setup_meilisearch @@ -83,7 +84,6 @@ function update_script() { cd /opt/karakeep/packages/db $STD pnpm migrate $STD pnpm store prune - sed -i "s/^SERVER_VERSION=.*$/SERVER_VERSION=${CHECK_UPDATE_RELEASE#v}/" /etc/karakeep/karakeep.env msg_ok "Updated Karakeep" msg_info "Starting Services" diff --git a/ct/kasm.sh b/ct/kasm.sh index 8b490f9c2..2542c52d6 100644 --- a/ct/kasm.sh +++ b/ct/kasm.sh @@ -15,6 +15,7 @@ var_version="${var_version:-13}" var_unprivileged="${var_unprivileged:-0}" var_fuse="${var_fuse:-yes}" var_tun="${var_tun:-yes}" +var_kasm_version="${var_kasm_version:-}" header_info "$APP" variables @@ -32,18 +33,21 @@ function update_script() { msg_info "Checking for new version" CURRENT_VERSION=$(readlink -f /opt/kasm/current | awk -F'/' '{print $4}') - KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1) - if [[ -z "$KASM_URL" ]]; then - SERVICE_IMAGE_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_service_images_amd64_[0-9]+\.[0-9]+\.[0-9]+\.tar\.gz' | head -n 1) - if [[ -n "$SERVICE_IMAGE_URL" ]]; then - KASM_VERSION=$(echo "$SERVICE_IMAGE_URL" | sed -E 's/.*kasm_release_service_images_amd64_([0-9]+\.[0-9]+\.[0-9]+).*/\1/') - KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION}.tar.gz" - fi - else - KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/') - fi + KASM_VERSION=$(curl -s https://kasm.com/downloads | grep -oP ']*>.*?' | sed -E 's/<\/?h1[^>]*>//g' | grep -oP '\d+\.\d+\.\d+') + KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION:-var_kasm_version}.tar.gz" - if [[ -z "$KASM_URL" ]] || [[ -z "$KASM_VERSION" ]]; then + # KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1) + # if [[ -z "$KASM_URL" ]]; then + # SERVICE_IMAGE_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_service_images_amd64_[0-9]+\.[0-9]+\.[0-9]+\.tar\.gz' | head -n 1) + # if [[ -n "$SERVICE_IMAGE_URL" ]]; then + # KASM_VERSION=$(echo "$SERVICE_IMAGE_URL" | sed -E 's/.*kasm_release_service_images_amd64_([0-9]+\.[0-9]+\.[0-9]+).*/\1/') + # KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION}.tar.gz" + # fi + # else + # KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/') + # fi + + if [[ -z "$KASM_VERSION" ]] || [[ -z "$KASM_URL" ]]; then msg_error "Unable to detect latest Kasm release URL." exit 250 fi diff --git a/ct/kima-hub.sh b/ct/kima-hub.sh index a1f3da652..d93375e42 100644 --- a/ct/kima-hub.sh +++ b/ct/kima-hub.sh @@ -29,6 +29,8 @@ function update_script() { exit fi + NODE_VERSION="22" setup_nodejs + if check_for_gh_release "kima-hub" "Chevron7Locked/kima-hub"; then msg_info "Stopping Services" systemctl stop kima-frontend kima-backend kima-analyzer kima-analyzer-clap diff --git a/ct/koillection.sh b/ct/koillection.sh index bba9c6825..3ee42e9a1 100644 --- a/ct/koillection.sh +++ b/ct/koillection.sh @@ -48,7 +48,9 @@ function update_script() { # Ensure APP_RUNTIME is in .env.local for CLI commands (upgrades from older versions) if ! grep -q "APP_RUNTIME" /opt/koillection/.env.local 2>/dev/null; then - echo 'APP_RUNTIME="Symfony\Component\Runtime\SymfonyRuntime"' >> /opt/koillection/.env.local + # Ensure file ends with newline before appending to avoid concatenation + [[ -s /opt/koillection/.env.local && -n "$(tail -c 1 /opt/koillection/.env.local)" ]] && echo "" >>/opt/koillection/.env.local + echo 'APP_RUNTIME="Symfony\Component\Runtime\SymfonyRuntime"' >>/opt/koillection/.env.local fi export COMPOSER_ALLOW_SUPERUSER=1 diff --git a/ct/kometa.sh b/ct/kometa.sh index 2789e4ea1..72618bb82 100644 --- a/ct/kometa.sh +++ b/ct/kometa.sh @@ -61,5 +61,5 @@ description msg_ok "Completed successfully!\n" echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access the LXC at following IP address:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}${IP}${CL}" +echo -e "${INFO}${YW} Access Kometa Quickstart:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:7171${CL}" diff --git a/ct/komodo.sh b/ct/komodo.sh index 545862b41..55dc3521c 100644 --- a/ct/komodo.sh +++ b/ct/komodo.sh @@ -39,6 +39,8 @@ function update_script() { read -r -p "${TAB}Migrate update function now? [y/N]: " CONFIRM if [[ ! "${CONFIRM,,}" =~ ^(y|yes)$ ]]; then msg_warn "Migration skipped. The old update will continue to work for now." + msg_warn "⚠️ Komodo v2 uses :2 image tags. The :latest tag is deprecated and will not receive v2 updates." + msg_warn "Please migrate to the addon script to receive Komodo v2." msg_info "Updating ${APP} (legacy)" COMPOSE_FILE=$(find /opt/komodo -maxdepth 1 -type f -name '*.compose.yaml' ! -name 'compose.env' | head -n1) if [[ -z "$COMPOSE_FILE" ]]; then diff --git a/ct/librechat.sh b/ct/librechat.sh new file mode 100644 index 000000000..40bdaf4c2 --- /dev/null +++ b/ct/librechat.sh @@ -0,0 +1,101 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/danny-avila/LibreChat + +APP="LibreChat" +var_tags="${var_tags:-ai;chat}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-6144}" +var_disk="${var_disk:-20}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/librechat ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_tag "librechat" "danny-avila/LibreChat" "v"; then + msg_info "Stopping Services" + systemctl stop librechat rag-api + msg_ok "Stopped Services" + + msg_info "Backing up Configuration" + cp /opt/librechat/.env /opt/librechat.env.bak + msg_ok "Backed up Configuration" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_tag "librechat" "danny-avila/LibreChat" + + msg_info "Installing Dependencies" + cd /opt/librechat + $STD npm ci + msg_ok "Installed Dependencies" + + msg_info "Building Frontend" + $STD npm run frontend + $STD npm prune --production + $STD npm cache clean --force + msg_ok "Built Frontend" + + msg_info "Restoring Configuration" + cp /opt/librechat.env.bak /opt/librechat/.env + rm -f /opt/librechat.env.bak + msg_ok "Restored Configuration" + + msg_info "Starting Services" + systemctl start rag-api librechat + msg_ok "Started Services" + msg_ok "Updated LibreChat Successfully!" + fi + + if check_for_gh_release "rag-api" "danny-avila/rag_api"; then + msg_info "Stopping RAG API" + systemctl stop rag-api + msg_ok "Stopped RAG API" + + msg_info "Backing up RAG API Configuration" + cp /opt/rag-api/.env /opt/rag-api.env.bak + msg_ok "Backed up RAG API Configuration" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "rag-api" "danny-avila/rag_api" "tarball" + + msg_info "Updating RAG API Dependencies" + cd /opt/rag-api + $STD .venv/bin/pip install -r requirements.lite.txt + msg_ok "Updated RAG API Dependencies" + + msg_info "Restoring RAG API Configuration" + cp /opt/rag-api.env.bak /opt/rag-api/.env + rm -f /opt/rag-api.env.bak + msg_ok "Restored RAG API Configuration" + + msg_info "Starting RAG API" + systemctl start rag-api + msg_ok "Started RAG API" + msg_ok "Updated RAG API Successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3080${CL}" diff --git a/ct/linkding.sh b/ct/linkding.sh index d770b8be2..b0604291f 100644 --- a/ct/linkding.sh +++ b/ct/linkding.sh @@ -39,7 +39,7 @@ function update_script() { cp /opt/linkding/.env /opt/linkding_env_backup msg_ok "Backed up Data" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "linkding" "sissbruecker/linkding" + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "linkding" "sissbruecker/linkding" "tarball" msg_info "Restoring Data" cp -r /opt/linkding_data_backup/. /opt/linkding/data diff --git a/ct/litellm.sh b/ct/litellm.sh deleted file mode 100644 index 391a4acd1..000000000 --- a/ct/litellm.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: stout01 -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/BerriAI/litellm - -APP="LiteLLM" -var_tags="${var_tags:-ai;interface}" -var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-2048}" -var_disk="${var_disk:-4}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - if [[ ! -f /etc/systemd/system/litellm.service ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - msg_info "Stopping Service" - systemctl stop litellm - msg_ok "Stopped Service" - - VENV_PATH="/opt/litellm/.venv" - PYTHON_VERSION="3.13" USE_UVX="YES" setup_uv - - msg_info "Updating LiteLLM" - $STD "$VENV_PATH/bin/python" -m pip install --upgrade litellm[proxy] prisma - msg_ok "LiteLLM updated" - - msg_info "Updating DB Schema" - $STD uv --directory=/opt/litellm run litellm --config /opt/litellm/litellm.yaml --use_prisma_db_push --skip_server_startup - msg_ok "DB Schema Updated" - - msg_info "Starting Service" - systemctl start litellm - msg_ok "Started Service" - msg_ok "Updated successfully!" - exit -} - -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:4000${CL}" diff --git a/ct/lychee.sh b/ct/lychee.sh new file mode 100644 index 000000000..b2bd05d33 --- /dev/null +++ b/ct/lychee.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/LycheeOrg/Lychee + +APP="Lychee" +var_tags="${var_tags:-media;photos;gallery}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/lychee ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "lychee" "LycheeOrg/Lychee"; then + msg_info "Stopping Services" + systemctl stop caddy + msg_ok "Stopped Services" + + msg_info "Backing up Data" + cp /opt/lychee/.env /opt/lychee.env.bak + cp -r /opt/lychee/storage /opt/lychee_storage_backup + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "lychee" "LycheeOrg/Lychee" "prebuild" "latest" "/opt/lychee" "Lychee.zip" + + msg_info "Restoring Data" + cp /opt/lychee.env.bak /opt/lychee/.env + rm -f /opt/lychee.env.bak + cp -r /opt/lychee_storage_backup/. /opt/lychee/storage + rm -rf /opt/lychee_storage_backup + msg_ok "Restored Data" + + msg_info "Updating Application" + cd /opt/lychee + $STD php artisan migrate --force + $STD php artisan optimize:clear + chmod -R 775 /opt/lychee/storage /opt/lychee/bootstrap/cache + msg_ok "Updated Application" + + msg_info "Starting Services" + systemctl start caddy + msg_ok "Started Services" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" diff --git a/ct/lyrionmusicserver.sh b/ct/lyrionmusicserver.sh index b9532c1f6..d31f61cde 100644 --- a/ct/lyrionmusicserver.sh +++ b/ct/lyrionmusicserver.sh @@ -30,16 +30,16 @@ function update_script() { exit fi - DEB_URL=$(curl -s 'https://lyrion.org/getting-started/' | grep -oP ']*href="\K[^"]*amd64\.deb(?="[^>]*>)' | head -n 1) + DEB_URL=$(curl_with_retry 'https://lyrion.org/getting-started/' | grep -oP ']*href="\K[^"]*amd64\.deb(?="[^>]*>)' | head -n 1) RELEASE=$(echo "$DEB_URL" | grep -oP 'lyrionmusicserver_\K[0-9.]+(?=_amd64\.deb)') DEB_FILE="/tmp/lyrionmusicserver_${RELEASE}_amd64.deb" if [[ ! -f /opt/lyrion_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/lyrion_version.txt)" ]]; then msg_info "Updating $APP to ${RELEASE}" - curl -fsSL -o "$DEB_FILE" "$DEB_URL" + curl_with_retry "$DEB_URL" "$DEB_FILE" $STD apt install "$DEB_FILE" -y - systemctl restart lyrion - $STD rm -f "$DEB_FILE" - echo "${RELEASE}" >/opt/${APP}_version.txt + systemctl restart lyrionmusicserver + rm -f "$DEB_FILE" + echo "${RELEASE}" >/opt/lyrion_version.txt msg_ok "Updated $APP to ${RELEASE}" msg_ok "Updated successfully!" else diff --git a/ct/mail-archiver.sh b/ct/mail-archiver.sh index e0a1e6062..8101b2365 100644 --- a/ct/mail-archiver.sh +++ b/ct/mail-archiver.sh @@ -28,6 +28,8 @@ function update_script() { exit fi + ensure_dependencies libgssapi-krb5-2 + if check_for_gh_release "mail-archiver" "s1t5/mail-archiver"; then msg_info "Stopping Mail-Archiver" systemctl stop mail-archiver diff --git a/ct/matomo.sh b/ct/matomo.sh new file mode 100644 index 000000000..ea355a3be --- /dev/null +++ b/ct/matomo.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://matomo.org/ + +APP="Matomo" +var_tags="${var_tags:-analytics;tracking;privacy}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-16}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/matomo ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "matomo" "matomo-org/matomo"; then + msg_info "Stopping Services" + systemctl stop caddy + msg_ok "Stopped Services" + + msg_info "Backing up Data" + [[ -f /opt/matomo/config/config.ini.php ]] && cp /opt/matomo/config/config.ini.php /opt/matomo_config.bak + [[ -d /opt/matomo/misc/user ]] && cp -r /opt/matomo/misc/user /opt/matomo_user_backup + [[ -f /root/matomo.creds ]] && cp /root/matomo.creds /opt/matomo_db_creds.bak + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "matomo" "matomo-org/matomo" "prebuild" "latest" "/opt/matomo" "matomo-*.zip" + + msg_info "Restoring Data" + if [[ -f /opt/matomo_config.bak ]]; then + mkdir -p /opt/matomo/config + cp /opt/matomo_config.bak /opt/matomo/config/config.ini.php + fi + if [[ -d /opt/matomo_user_backup ]]; then + mkdir -p /opt/matomo/misc/user + cp -r /opt/matomo_user_backup/. /opt/matomo/misc/user + fi + [[ -f /opt/matomo_db_creds.bak ]] && cp /opt/matomo_db_creds.bak /root/matomo.creds + rm -f /opt/matomo_config.bak /opt/matomo_db_creds.bak + rm -rf /opt/matomo_user_backup + chown -R www-data:www-data /opt/matomo + msg_ok "Restored Data" + + msg_info "Starting Services" + systemctl start caddy + msg_ok "Started Services" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" diff --git a/ct/matter-server.sh b/ct/matter-server.sh new file mode 100644 index 000000000..4aebdc3e5 --- /dev/null +++ b/ct/matter-server.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/matter-js/python-matter-server + +APP="Matter-Server" +var_tags="${var_tags:-matter;iot;smart-home}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/matter-server ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "matter-server" "matter-js/python-matter-server"; then + msg_info "Stopping Service" + systemctl stop matter-server + msg_ok "Stopped Service" + + msg_info "Updating Matter Server" + MATTER_VERSION=$(get_latest_github_release "matter-js/python-matter-server") + $STD uv pip install --python /opt/matter-server/.venv/bin/python --upgrade "python-matter-server[server]==${MATTER_VERSION}" + echo "${MATTER_VERSION}" >~/.matter-server + msg_ok "Updated Matter Server" + + fetch_and_deploy_gh_release "chip-ota-provider-app" "home-assistant-libs/matter-linux-ota-provider" "singlefile" "latest" "/usr/local/bin" "chip-ota-provider-app-x86-64" + + msg_info "Starting Service" + systemctl start matter-server + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Matter Server WebSocket API is running on port 5580.${CL}" +echo -e "${TAB}${GATEWAY}${BGN}ws://${IP}:5580/ws${CL}" diff --git a/ct/mealie.sh b/ct/mealie.sh index 6a9d34bba..92f9db888 100644 --- a/ct/mealie.sh +++ b/ct/mealie.sh @@ -38,9 +38,26 @@ function update_script() { msg_info "Backing up Configuration" cp -f /opt/mealie/mealie.env /opt/mealie.env + [[ -f /opt/mealie/start.sh ]] && cp -f /opt/mealie/start.sh /opt/mealie.start.sh msg_ok "Backup completed" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "mealie" "mealie-recipes/mealie" "tarball" "latest" "/opt/mealie" + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "mealie" "mealie-recipes/mealie" "tarball" + + msg_info "Restoring Configuration" + mv -f /opt/mealie.env /opt/mealie/mealie.env + if [[ -f /opt/mealie.start.sh ]]; then + mv -f /opt/mealie.start.sh /opt/mealie/start.sh + else + cat <<'STARTEOF' >/opt/mealie/start.sh +#!/bin/bash +set -a +source /opt/mealie/mealie.env +set +a +exec uv run mealie +STARTEOF + fi + chmod +x /opt/mealie/start.sh + msg_ok "Configuration restored" msg_info "Installing Python Dependencies with uv" cd /opt/mealie @@ -49,9 +66,10 @@ function update_script() { msg_info "Building Frontend" MEALIE_VERSION=$(<$HOME/.mealie) - $STD sed -i "s|https://github.com/mealie-recipes/mealie/commit/|https://github.com/mealie-recipes/mealie/releases/tag/|g" /opt/mealie/frontend/pages/admin/site-settings.vue - $STD sed -i "s|value: data.buildId,|value: \"v${MEALIE_VERSION}\",|g" /opt/mealie/frontend/pages/admin/site-settings.vue - $STD sed -i "s|value: data.production ? i18n.t(\"about.production\") : i18n.t(\"about.development\"),|value: \"bare-metal\",|g" /opt/mealie/frontend/pages/admin/site-settings.vue + SITE_SETTINGS=$(find /opt/mealie/frontend -name "site-settings.vue" -path "*/admin/*" | head -1) + $STD sed -i "s|https://github.com/mealie-recipes/mealie/commit/|https://github.com/mealie-recipes/mealie/releases/tag/|g" "$SITE_SETTINGS" + $STD sed -i "s|value: data.buildId,|value: \"v${MEALIE_VERSION}\",|g" "$SITE_SETTINGS" + $STD sed -i "s|value: data.production ? i18n.t(\"about.production\") : i18n.t(\"about.development\"),|value: \"bare-metal\",|g" "$SITE_SETTINGS" export NUXT_TELEMETRY_DISABLED=1 cd /opt/mealie/frontend $STD yarn install --prefer-offline --frozen-lockfile --non-interactive --production=false --network-timeout 1000000 @@ -63,23 +81,7 @@ function update_script() { cp -r /opt/mealie/frontend/dist/* /opt/mealie/mealie/frontend/ msg_ok "Copied Frontend" - msg_info "Updating NLTK Data" - mkdir -p /nltk_data/ - cd /opt/mealie - $STD uv run python -m nltk.downloader -d /nltk_data averaged_perceptron_tagger_eng - msg_ok "Updated NLTK Data" - - msg_info "Restoring Configuration" - mv -f /opt/mealie.env /opt/mealie/mealie.env - cat <<'STARTEOF' >/opt/mealie/start.sh -#!/bin/bash -set -a -source /opt/mealie/mealie.env -set +a -exec uv run mealie -STARTEOF - chmod +x /opt/mealie/start.sh - msg_ok "Configuration restored" + setup_nltk "averaged_perceptron_tagger_eng" "/nltk_data" msg_info "Starting Service" systemctl start mealie @@ -97,4 +99,3 @@ msg_ok "Completed successfully!\n" echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" echo -e "${INFO}${YW} Access it using the following URL:${CL}" echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9000${CL}" - diff --git a/ct/metube.sh b/ct/metube.sh index 79d1e8b95..907f92c0a 100644 --- a/ct/metube.sh +++ b/ct/metube.sh @@ -62,6 +62,7 @@ function update_script() { $STD corepack enable $STD corepack prepare pnpm --activate || true fi + echo 'onlyBuiltDependencies=*' >> .npmrc $STD pnpm install --frozen-lockfile $STD pnpm run build msg_ok "Built Frontend" diff --git a/ct/pf2etools.sh b/ct/mini-qr.sh similarity index 57% rename from ct/pf2etools.sh rename to ct/mini-qr.sh index 7146db74f..5376b691a 100644 --- a/ct/pf2etools.sh +++ b/ct/mini-qr.sh @@ -1,14 +1,14 @@ #!/usr/bin/env bash source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) # Copyright (c) 2021-2026 community-scripts ORG -# Author: TheRealVira +# Author: doge0420 # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://pf2etools.com/ | Github: https://github.com/Pf2eToolsOrg/Pf2eTools +# Source: https://github.com/lyqht/mini-qr -APP="Pf2eTools" -var_tags="${var_tags:-wiki}" -var_cpu="${var_cpu:-1}" -var_ram="${var_ram:-512}" +APP="Mini-QR" +var_tags="${var_tags:-QRcode;}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" var_disk="${var_disk:-6}" var_os="${var_os:-debian}" var_version="${var_version:-13}" @@ -24,26 +24,30 @@ function update_script() { check_container_storage check_container_resources - if [[ ! -d "/opt/${APP}" ]]; then + if [[ ! -d /opt/mini-qr ]]; then msg_error "No ${APP} Installation Found!" exit fi - if check_for_gh_release "pf2etools" "Pf2eToolsOrg/Pf2eTools"; then - msg_info "Updating System" - $STD apt update - $STD apt -y upgrade - msg_ok "Updated System" - rm -rf /opt/Pf2eTools - fetch_and_deploy_gh_release "pf2etools" "Pf2eToolsOrg/Pf2eTools" "tarball" "latest" "/opt/Pf2eTools" + if check_for_gh_release "mini-qr" "lyqht/mini-qr"; then + msg_info "Stopping Service" + systemctl stop caddy + msg_ok "Stopped Service" - msg_info "Updating ${APP}" - cd /opt/Pf2eTools + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "mini-qr" "lyqht/mini-qr" "tarball" + + msg_info "Installing Dependencies" + cd /opt/mini-qr $STD npm install + msg_ok "Installed Dependencies" + + msg_info "Building MiniQR" $STD npm run build - chown -R www-data: "/opt/${APP}" - chmod -R 755 "/opt/${APP}" - msg_ok "Updated ${APP}" + msg_ok "Built MiniQR" + + msg_info "Starting Service" + systemctl start caddy + msg_ok "Started Service" msg_ok "Updated successfully!" fi exit diff --git a/ct/typesense.sh b/ct/minthcm.sh similarity index 56% rename from ct/typesense.sh rename to ct/minthcm.sh index 1f40cce43..90211bd7c 100644 --- a/ct/typesense.sh +++ b/ct/minthcm.sh @@ -1,15 +1,15 @@ #!/usr/bin/env bash source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) # Copyright (c) 2021-2026 community-scripts ORG -# Author: tlissak | Co-Author MickLesk +# Author: MintHCM # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://typesense.org/ +# Source: https://github.com/minthcm/minthcm -APP="TypeSense" -var_tags="${var_tags:-database}" -var_cpu="${var_cpu:-1}" -var_ram="${var_ram:-1024}" -var_disk="${var_disk:-4}" +APP="MintHCM" +var_tags="${var_tags:-hcm}" +var_disk="${var_disk:-20}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-4096}" var_os="${var_os:-debian}" var_version="${var_version:-13}" var_unprivileged="${var_unprivileged:-1}" @@ -18,21 +18,16 @@ header_info "$APP" variables color catch_errors - function update_script() { header_info check_container_storage check_container_resources - if [[ ! -f /etc/typesense/typesense-server.ini ]]; then + if [[ ! -d /var/www/MintHCM ]]; then msg_error "No ${APP} Installation Found!" exit fi - if check_for_gh_release "typesense" "typesense/typesense"; then - msg_info "Updating Typesense" - $STD apt update - $STD apt -y upgrade - msg_ok "Updated successfully!" - fi + + msg_custom "🚀" "${GN}" "The app offers a built-in updater. Please use it." exit } @@ -42,5 +37,5 @@ description msg_ok "Completed successfully!\n" echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following IP:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}${IP}:8108${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" diff --git a/ct/myip.sh b/ct/myip.sh index b648f5f4c..c092a3289 100644 --- a/ct/myip.sh +++ b/ct/myip.sh @@ -28,6 +28,8 @@ function update_script() { exit fi + NODE_VERSION="24" setup_nodejs + if check_for_gh_release "myip" "jason5ng32/MyIP"; then msg_info "Stopping Services" systemctl stop myip diff --git a/ct/n8n.sh b/ct/n8n.sh index 739de7e25..fb688d432 100644 --- a/ct/n8n.sh +++ b/ct/n8n.sh @@ -28,7 +28,7 @@ function update_script() { exit fi - ensure_dependencies graphicsmagick + ensure_dependencies build-essential python3-setuptools graphicsmagick NODE_VERSION="24" setup_nodejs msg_info "Updating n8n" diff --git a/ct/nagios.sh b/ct/nagios.sh new file mode 100644 index 000000000..7fe1665cc --- /dev/null +++ b/ct/nagios.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: CanbiZ (MickLesk) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/NagiosEnterprises/nagioscore + +APP="Nagios" +var_tags="${var_tags:-monitoring;alerts;infrastructure}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-20}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /usr/local/nagios/etc/nagios.cfg ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + msg_info "Backing up Configuration" + cp -a /usr/local/nagios/etc /opt/nagios-etc-backup + msg_ok "Backed up Configuration" + + if check_for_gh_release "nagios" "NagiosEnterprises/nagioscore"; then + msg_info "Stopping Nagios" + systemctl stop nagios + msg_ok "Stopped Nagios" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "nagios" "NagiosEnterprises/nagioscore" "tarball" + + msg_info "Building Nagios Core" + cd /opt/nagios + $STD ./configure --with-httpd-conf=/etc/apache2/sites-enabled + $STD make all + $STD make install-groups-users + usermod -a -G nagios www-data + $STD make install + $STD make install-daemoninit + $STD make install-commandmode + $STD make install-webconf + $STD a2enmod rewrite + $STD a2enmod cgi + setcap cap_net_raw+p /bin/ping + msg_ok "Built Nagios Core" + + msg_info "Starting Nagios" + systemctl restart apache2 + systemctl start nagios + msg_ok "Started Nagios" + fi + + if check_for_gh_release "nagios-plugins" "nagios-plugins/nagios-plugins"; then + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "nagios-plugins" "nagios-plugins/nagios-plugins" "tarball" + msg_info "Building Nagios Plugins" + cd /opt/nagios-plugins + $STD ./tools/setup + $STD ./configure + $STD make + $STD make install + msg_ok "Built Nagios Plugins" + fi + + msg_info "Restoring Configuration" + rm -rf /usr/local/nagios/etc + cp -a /opt/nagios-etc-backup /usr/local/nagios/etc + rm -rf /opt/nagios-etc-backup + msg_ok "Restored Configuration" + msg_ok "Updated successfully!" + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}/nagios${CL}" diff --git a/ct/nametag.sh b/ct/nametag.sh new file mode 100644 index 000000000..6fbb02eb4 --- /dev/null +++ b/ct/nametag.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/mattogodoy/nametag + +APP="Nametag" +var_tags="${var_tags:-contacts;crm}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/nametag ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "nametag" "mattogodoy/nametag"; then + msg_info "Stopping Service" + systemctl stop nametag + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp /opt/nametag/.env /opt/nametag.env.bak + cp -r /opt/nametag/data /opt/nametag_data_bak + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "nametag" "mattogodoy/nametag" "tarball" "latest" "/opt/nametag" + + msg_info "Rebuilding Application" + cd /opt/nametag + $STD npm ci + set -a + source /opt/nametag/.env + set +a + $STD npx prisma generate + $STD npm run build + cp -r /opt/nametag/.next/static /opt/nametag/.next/standalone/.next/static + cp -r /opt/nametag/public /opt/nametag/.next/standalone/public + msg_ok "Rebuilt Application" + + msg_info "Restoring Data" + cp /opt/nametag.env.bak /opt/nametag/.env + cp -r /opt/nametag_data_bak/. /opt/nametag/data/ + rm -f /opt/nametag.env.bak + rm -rf /opt/nametag_data_bak + msg_ok "Restored Data" + + msg_info "Running Migrations" + cd /opt/nametag + $STD npx prisma migrate deploy + msg_ok "Ran Migrations" + + msg_info "Starting Service" + systemctl start nametag + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" diff --git a/ct/neko.sh b/ct/neko.sh new file mode 100644 index 000000000..8f88c3945 --- /dev/null +++ b/ct/neko.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: CanbiZ (MickLesk) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://neko.m1k1o.net/ + +APP="Neko" +var_tags="${var_tags:-virtual-browser;webrtc;streaming}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-4096}" +var_disk="${var_disk:-12}" +var_os="${var_os:-debian}" +var_version="${var_version:-12}" +var_unprivileged="${var_unprivileged:-1}" +var_gpu="${var_gpu:-yes}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/neko ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "neko" "m1k1o/neko"; then + msg_info "Stopping Service" + systemctl stop neko + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp /etc/neko/neko.yaml /opt/neko.yaml.bak + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "neko" "m1k1o/neko" "tarball" + + msg_info "Building Client" + cd /opt/neko/client + $STD npm install + $STD npm run build + cp -r /opt/neko/client/dist/* /var/www/ + msg_ok "Built Client" + + msg_info "Building Server" + cd /opt/neko/server + $STD ./build + cp /opt/neko/server/bin/neko /usr/bin/neko + cp -r /opt/neko/server/bin/plugins/* /etc/neko/plugins/ 2>/dev/null || true + msg_ok "Built Server" + + msg_info "Restoring Data" + cp /opt/neko.yaml.bak /etc/neko/neko.yaml + rm -f /opt/neko.yaml.bak + msg_ok "Restored Data" + + msg_info "Starting Service" + systemctl start neko + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}" diff --git a/ct/netbird.sh b/ct/netbird.sh index 0d0de3088..7af3cadb5 100644 --- a/ct/netbird.sh +++ b/ct/netbird.sh @@ -25,7 +25,7 @@ function update_script() { check_container_storage check_container_resources - if [[ ! -f /etc/netbird/config.json ]]; then + if [[ ! -d /var/lib/netbird/ ]]; then msg_error "No ${APP} Installation Found!" exit fi diff --git a/ct/netboot-xyz.sh b/ct/netboot-xyz.sh new file mode 100644 index 000000000..4bb646774 --- /dev/null +++ b/ct/netboot-xyz.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://netboot.xyz + +APP="netboot.xyz" +var_tags="${var_tags:-network;pxe;boot}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +NSAPP="netboot-xyz" +var_install="${NSAPP}-install" +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f ~/.netboot-xyz ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "netboot-xyz" "netbootxyz/netboot.xyz"; then + msg_info "Backing up Configuration" + cp /var/www/html/boot.cfg /opt/netboot-xyz-boot.cfg.bak + msg_ok "Backed up Configuration" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "netboot-xyz" "netbootxyz/netboot.xyz" "prebuild" "latest" "/var/www/html" "menus.tar.gz" + + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-efi" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-efi-dsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.efi.dsk" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-snp" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-snp.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-snp-dsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-snp.efi.dsk" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-snponly" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-snponly.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-dsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal.efi.dsk" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-snp" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-snp.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-snp-dsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-snp.efi.dsk" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-snponly" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-snponly.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-kpxe" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.kpxe" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-undionly" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-undionly.kpxe" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-kpxe" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal.kpxe" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-lkrn" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.lkrn" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-linux-bin" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-linux.bin" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-dsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.dsk" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-pdsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.pdsk" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-arm64" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-arm64.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-arm64-snp" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-arm64-snp.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-arm64-snponly" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-arm64-snponly.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-arm64" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-arm64.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-arm64-snp" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-arm64-snp.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-arm64-snponly" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-arm64-snponly.efi" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-iso" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.iso" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-img" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.img" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-arm64-iso" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-arm64.iso" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-arm64-img" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-arm64.img" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-multiarch-iso" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-multiarch.iso" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-multiarch-img" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-multiarch.img" + USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-checksums" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-sha256-checksums.txt" + + msg_info "Restoring Configuration" + cp /opt/netboot-xyz-boot.cfg.bak /var/www/html/boot.cfg + rm -f /opt/netboot-xyz-boot.cfg.bak + msg_ok "Restored Configuration" + + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" diff --git a/ct/nextexplorer.sh b/ct/nextexplorer.sh new file mode 100644 index 000000000..fbb559a07 --- /dev/null +++ b/ct/nextexplorer.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: vhsdream +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/nxzai/nextExplorer + +APP="nextExplorer" +var_tags="${var_tags:-files;documents}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-3072}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/nextExplorer ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + NODE_VERSION="24" setup_nodejs + + if check_for_gh_release "nextExplorer" "nxzai/nextExplorer"; then + msg_info "Stopping nextExplorer" + $STD systemctl stop nextexplorer + msg_ok "Stopped nextExplorer" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "nextExplorer" "nxzai/nextExplorer" "tarball" "latest" "/opt/nextExplorer" + + msg_info "Updating nextExplorer" + APP_DIR="/opt/nextExplorer/app" + mkdir -p "$APP_DIR" + cd /opt/nextExplorer + export NODE_ENV=production + $STD npm ci --omit=dev --workspace backend + mv node_modules "$APP_DIR" + mv backend/{src,package.json} "$APP_DIR" + unset NODE_ENV + export NODE_ENV=development + $STD npm ci --workspace frontend + $STD npm run -w frontend build -- --sourcemap false + unset NODE_ENV + mv frontend/dist/ "$APP_DIR"/src/public + chown -R explorer:explorer "$APP_DIR" /etc/nextExplorer + sed -i "\|version|s|$(jq -cr '.version' ${APP_DIR}/package.json)|$(cat ~/.nextexplorer)|" "$APP_DIR"/package.json + sed -i 's/app.js/server.js/' /etc/systemd/system/nextexplorer.service && systemctl daemon-reload + msg_ok "Updated nextExplorer" + + msg_info "Starting nextExplorer" + $STD systemctl start nextexplorer + msg_ok "Started nextExplorer" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" diff --git a/ct/nginxproxymanager.sh b/ct/nginxproxymanager.sh index 715e5e1cb..a13c888c3 100644 --- a/ct/nginxproxymanager.sh +++ b/ct/nginxproxymanager.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) # Copyright (c) 2021-2026 community-scripts ORG -# Author: tteck (tteckster) | Co-Author: CrazyWolf13 +# Author: tteck (tteckster) | Co-Author: CrazyWolf13, MickLesk (CanbiZ) # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE # Source: https://nginxproxymanager.com/ | Github: https://github.com/NginxProxyManager/nginx-proxy-manager @@ -28,18 +28,12 @@ function update_script() { exit fi - if [[ $(grep -E '^VERSION_ID=' /etc/os-release) == *"12"* ]]; then - msg_error "Wrong Debian version detected!" - msg_error "Please create a snapshot first. You must upgrade your LXC to Debian Trixie before updating. Visit: https://github.com/community-scripts/ProxmoxVE/discussions/7489" - exit - fi - if command -v node &>/dev/null; then CURRENT_NODE_VERSION=$(node --version | cut -d'v' -f2 | cut -d'.' -f1) if [[ "$CURRENT_NODE_VERSION" != "22" ]]; then systemctl stop openresty - apt-get purge -y nodejs npm - apt-get autoremove -y + $STD apt purge -y nodejs npm + $STD apt autoremove -y rm -rf /usr/local/bin/node /usr/local/bin/npm rm -rf /usr/local/lib/node_modules rm -rf ~/.npm @@ -49,92 +43,158 @@ function update_script() { NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs - RELEASE=$(curl -fsSL https://api.github.com/repos/NginxProxyManager/nginx-proxy-manager/releases/latest | - grep "tag_name" | - awk '{print substr($2, 3, length($2)-4) }') - - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "nginxproxymanager" "NginxProxyManager/nginx-proxy-manager" "tarball" "v${RELEASE}" "/opt/nginxproxymanager" - - msg_info "Stopping Services" - systemctl stop openresty - systemctl stop npm - msg_ok "Stopped Services" - - msg_info "Cleaning old files" - $STD rm -rf /app \ - /var/www/html \ - /etc/nginx \ - /var/log/nginx \ - /var/lib/nginx \ - /var/cache/nginx - msg_ok "Cleaned old files" - - msg_info "Setting up Environment" - ln -sf /usr/bin/python3 /usr/bin/python - ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/sbin/nginx - ln -sf /usr/local/openresty/nginx/ /etc/nginx - sed -i "s|\"version\": \"2.0.0\"|\"version\": \"$RELEASE\"|" /opt/nginxproxymanager/backend/package.json - sed -i "s|\"version\": \"2.0.0\"|\"version\": \"$RELEASE\"|" /opt/nginxproxymanager/frontend/package.json - sed -i 's+^daemon+#daemon+g' /opt/nginxproxymanager/docker/rootfs/etc/nginx/nginx.conf - NGINX_CONFS=$(find /opt/nginxproxymanager -type f -name "*.conf") - for NGINX_CONF in $NGINX_CONFS; do - sed -i 's+include conf.d+include /etc/nginx/conf.d+g' "$NGINX_CONF" - done - - mkdir -p /var/www/html /etc/nginx/logs - cp -r /opt/nginxproxymanager/docker/rootfs/var/www/html/* /var/www/html/ - cp -r /opt/nginxproxymanager/docker/rootfs/etc/nginx/* /etc/nginx/ - cp /opt/nginxproxymanager/docker/rootfs/etc/letsencrypt.ini /etc/letsencrypt.ini - cp /opt/nginxproxymanager/docker/rootfs/etc/logrotate.d/nginx-proxy-manager /etc/logrotate.d/nginx-proxy-manager - ln -sf /etc/nginx/nginx.conf /etc/nginx/conf/nginx.conf - rm -f /etc/nginx/conf.d/dev.conf - - mkdir -p /tmp/nginx/body \ - /run/nginx \ - /data/nginx \ - /data/custom_ssl \ - /data/logs \ - /data/access \ - /data/nginx/default_host \ - /data/nginx/default_www \ - /data/nginx/proxy_host \ - /data/nginx/redirection_host \ - /data/nginx/stream \ - /data/nginx/dead_host \ - /data/nginx/temp \ - /var/lib/nginx/cache/public \ - /var/lib/nginx/cache/private \ - /var/cache/nginx/proxy_temp - - chmod -R 777 /var/cache/nginx - chown root /tmp/nginx - - echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" >/etc/nginx/conf.d/include/resolvers.conf - - if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]; then - $STD openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem + if dpkg -s openresty &>/dev/null 2>&1; then + msg_info "Migrating from packaged OpenResty to source" + rm -f /etc/apt/trusted.gpg.d/openresty-archive-keyring.gpg /etc/apt/trusted.gpg.d/openresty.gpg + rm -f /etc/apt/sources.list.d/openresty.list /etc/apt/sources.list.d/openresty.sources + $STD apt purge -y openresty + $STD apt autoremove -y + rm -f ~/.openresty + msg_ok "Migrated from packaged OpenResty to source" fi - mkdir -p /app/frontend/images - cp -r /opt/nginxproxymanager/backend/* /app - msg_ok "Set up Environment" + local pcre_pkg="libpcre3-dev" + if grep -qE 'VERSION_ID="1[3-9]"' /etc/os-release 2>/dev/null; then + pcre_pkg="libpcre2-dev" + fi + $STD apt install -y build-essential "$pcre_pkg" libssl-dev zlib1g-dev - msg_info "Building Frontend" - export NODE_OPTIONS="--max_old_space_size=2048 --openssl-legacy-provider" - cd /opt/nginxproxymanager/frontend - # Replace node-sass with sass in package.json before installation - sed -E -i 's/"node-sass" *: *"([^"]*)"/"sass": "\1"/g' package.json - $STD yarn install --network-timeout 600000 - $STD yarn locale-compile - $STD yarn build - cp -r /opt/nginxproxymanager/frontend/dist/* /app/frontend - cp -r /opt/nginxproxymanager/frontend/public/images/* /app/frontend/images - msg_ok "Built Frontend" + if check_for_gh_release "openresty" "openresty/openresty"; then + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "openresty" "openresty/openresty" "prebuild" "${CHECK_UPDATE_RELEASE}" "/opt/openresty" "openresty-*.tar.gz" - msg_info "Initializing Backend" - rm -rf /app/config/default.json - if [ ! -f /app/config/production.json ]; then - cat <<'EOF' >/app/config/production.json + msg_info "Building OpenResty" + cd /opt/openresty + $STD ./configure \ + --with-http_v2_module \ + --with-http_realip_module \ + --with-http_stub_status_module \ + --with-http_ssl_module \ + --with-http_sub_module \ + --with-http_auth_request_module \ + --with-pcre-jit \ + --with-stream \ + --with-stream_ssl_module + $STD make -j"$(nproc)" + $STD make install + rm -rf /opt/openresty + cat <<'EOF' >/lib/systemd/system/openresty.service +[Unit] +Description=The OpenResty Application Platform +After=syslog.target network-online.target remote-fs.target nss-lookup.target +Wants=network-online.target + +[Service] +Type=simple +ExecStartPre=-/bin/mkdir -p /tmp/nginx/body /run/nginx +ExecStartPre=/usr/local/openresty/nginx/sbin/nginx -t +ExecStart=/usr/local/openresty/nginx/sbin/nginx -g 'daemon off;' + +[Install] +WantedBy=multi-user.target +EOF + if [ -f /opt/nginxproxymanager/docker/rootfs/etc/nginx/nginx.conf ]; then + cp /opt/nginxproxymanager/docker/rootfs/etc/nginx/nginx.conf /usr/local/openresty/nginx/conf/nginx.conf + sed -i 's+^daemon+#daemon+g' /usr/local/openresty/nginx/conf/nginx.conf + sed -i 's+include conf.d+include /etc/nginx/conf.d+g' /usr/local/openresty/nginx/conf/nginx.conf + fi + sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/nginx.conf + systemctl daemon-reload + systemctl unmask openresty 2>/dev/null || true + systemctl restart openresty + msg_ok "Built OpenResty" + fi + + cd /root + if [ -d /opt/certbot ]; then + msg_info "Updating Certbot" + $STD /opt/certbot/bin/pip install --upgrade pip setuptools wheel + $STD /opt/certbot/bin/pip install --upgrade certbot certbot-dns-cloudflare + msg_ok "Updated Certbot" + fi + + if check_for_gh_release "nginxproxymanager" "NginxProxyManager/nginx-proxy-manager"; then + msg_info "Stopping Services" + systemctl stop openresty + systemctl stop npm + msg_ok "Stopped Services" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "nginxproxymanager" "NginxProxyManager/nginx-proxy-manager" "tarball" "${CHECK_UPDATE_RELEASE}" "/opt/nginxproxymanager" + + msg_info "Cleaning old files" + $STD rm -rf /app \ + /var/www/html \ + /etc/nginx \ + /var/log/nginx \ + /var/lib/nginx \ + /var/cache/nginx + msg_ok "Cleaned old files" + + local RELEASE="${CHECK_UPDATE_RELEASE#v}" + msg_info "Setting up Environment" + ln -sf /usr/bin/python3 /usr/bin/python + ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/sbin/nginx + ln -sf /usr/local/openresty/nginx/ /etc/nginx + sed -i "0,/\"version\": \"[^\"]*\"/s|\"version\": \"[^\"]*\"|\"version\": \"$RELEASE\"|" /opt/nginxproxymanager/backend/package.json + sed -i "0,/\"version\": \"[^\"]*\"/s|\"version\": \"[^\"]*\"|\"version\": \"$RELEASE\"|" /opt/nginxproxymanager/frontend/package.json + sed -i 's+^daemon+#daemon+g' /opt/nginxproxymanager/docker/rootfs/etc/nginx/nginx.conf + NGINX_CONFS=$(find /opt/nginxproxymanager -type f -name "*.conf") + for NGINX_CONF in $NGINX_CONFS; do + sed -i 's+include conf.d+include /etc/nginx/conf.d+g' "$NGINX_CONF" + done + + mkdir -p /var/www/html /etc/nginx/logs + cp -r /opt/nginxproxymanager/docker/rootfs/var/www/html/* /var/www/html/ + cp -r /opt/nginxproxymanager/docker/rootfs/etc/nginx/* /etc/nginx/ + cp /opt/nginxproxymanager/docker/rootfs/etc/letsencrypt.ini /etc/letsencrypt.ini + cp /opt/nginxproxymanager/docker/rootfs/etc/logrotate.d/nginx-proxy-manager /etc/logrotate.d/nginx-proxy-manager + ln -sf /etc/nginx/nginx.conf /etc/nginx/conf/nginx.conf + rm -f /etc/nginx/conf.d/dev.conf + + mkdir -p /tmp/nginx/body \ + /run/nginx \ + /data/nginx \ + /data/custom_ssl \ + /data/logs \ + /data/access \ + /data/nginx/default_host \ + /data/nginx/default_www \ + /data/nginx/proxy_host \ + /data/nginx/redirection_host \ + /data/nginx/stream \ + /data/nginx/dead_host \ + /data/nginx/temp \ + /var/lib/nginx/cache/public \ + /var/lib/nginx/cache/private \ + /var/cache/nginx/proxy_temp + + chmod -R 777 /var/cache/nginx + chown root /tmp/nginx + + echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" >/etc/nginx/conf.d/include/resolvers.conf + + if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]; then + $STD openssl req -new -newkey rsa:2048 -days 3650 -nodes -x509 -subj "/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost" -keyout /data/nginx/dummykey.pem -out /data/nginx/dummycert.pem + fi + + mkdir -p /app/frontend/images + cp -r /opt/nginxproxymanager/backend/* /app + msg_ok "Set up Environment" + + msg_info "Building Frontend" + export NODE_OPTIONS="--max_old_space_size=2048 --openssl-legacy-provider" + cd /opt/nginxproxymanager/frontend + sed -E -i 's/"node-sass" *: *"([^"]*)"/"sass": "\1"/g' package.json + $STD yarn install --network-timeout 600000 + $STD yarn locale-compile + $STD yarn build + cp -r /opt/nginxproxymanager/frontend/dist/* /app/frontend + cp -r /opt/nginxproxymanager/frontend/public/images/* /app/frontend/images + msg_ok "Built Frontend" + + msg_info "Initializing Backend" + rm -rf /app/config/default.json + if [ ! -f /app/config/production.json ]; then + cat <<'EOF' >/app/config/production.json { "database": { "engine": "knex-native", @@ -148,40 +208,21 @@ function update_script() { } } EOF + fi + sed -i 's/"client": "sqlite3"/"client": "better-sqlite3"/' /app/config/production.json + cd /app + $STD yarn install --network-timeout 600000 + msg_ok "Initialized Backend" + + msg_info "Starting Services" + sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/nginx.conf + sed -r -i 's/^([[:space:]]*)su npm npm/\1#su npm npm/g;' /etc/logrotate.d/nginx-proxy-manager + systemctl daemon-reload + systemctl enable -q --now openresty + systemctl enable -q --now npm + msg_ok "Started Services" + msg_ok "Updated successfully!" fi - sed -i 's/"client": "sqlite3"/"client": "better-sqlite3"/' /app/config/production.json - cd /app - $STD yarn install --network-timeout 600000 - msg_ok "Initialized Backend" - - msg_info "Updating Certbot" - [ -f /etc/apt/trusted.gpg.d/openresty-archive-keyring.gpg ] && rm -f /etc/apt/trusted.gpg.d/openresty-archive-keyring.gpg - [ -f /etc/apt/sources.list.d/openresty.list ] && rm -f /etc/apt/sources.list.d/openresty.list - [ ! -f /etc/apt/trusted.gpg.d/openresty.gpg ] && curl -fsSL https://openresty.org/package/pubkey.gpg | gpg --dearmor --yes -o /etc/apt/trusted.gpg.d/openresty.gpg - [ ! -f /etc/apt/sources.list.d/openresty.sources ] && cat <<'EOF' >/etc/apt/sources.list.d/openresty.sources -Types: deb -URIs: http://openresty.org/package/debian/ -Suites: bookworm -Components: openresty -Signed-By: /etc/apt/trusted.gpg.d/openresty.gpg -EOF - $STD apt update - $STD apt -y install openresty - if [ -d /opt/certbot ]; then - $STD /opt/certbot/bin/pip install --upgrade pip setuptools wheel - $STD /opt/certbot/bin/pip install --upgrade certbot certbot-dns-cloudflare - fi - msg_ok "Updated Certbot" - - msg_info "Starting Services" - sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/nginx.conf - sed -r -i 's/^([[:space:]]*)su npm npm/\1#su npm npm/g;' /etc/logrotate.d/nginx-proxy-manager - systemctl enable -q --now openresty - systemctl enable -q --now npm - systemctl restart openresty - msg_ok "Started Services" - - msg_ok "Updated successfully!" exit } diff --git a/ct/nocodb.sh b/ct/nocodb.sh index 6ada9ef6a..ea0d9fdd4 100644 --- a/ct/nocodb.sh +++ b/ct/nocodb.sh @@ -23,16 +23,18 @@ function update_script() { header_info check_container_storage check_container_resources + #RELEASE="0.301.1" if [[ ! -f /etc/systemd/system/nocodb.service ]]; then msg_error "No ${APP} Installation Found!" exit fi - if check_for_gh_release "nocodb" "nocodb/nocodb" "0.301.1"; then + #if check_for_gh_release "nocodb" "nocodb/nocodb" "${RELEASE}"; then + if check_for_gh_release "nocodb" "nocodb/nocodb"; then msg_info "Stopping Service" systemctl stop nocodb msg_ok "Stopped Service" - fetch_and_deploy_gh_release "nocodb" "nocodb/nocodb" "singlefile" "0.301.1" "/opt/nocodb/" "Noco-linux-x64" + fetch_and_deploy_gh_release "nocodb" "nocodb/nocodb" "singlefile" "latest" "/opt/nocodb/" "Noco-linux-x64" msg_info "Starting Service" systemctl start nocodb diff --git a/ct/nodecast-tv.sh b/ct/nodecast-tv.sh index 730b70136..63ec5bc8e 100644 --- a/ct/nodecast-tv.sh +++ b/ct/nodecast-tv.sh @@ -34,7 +34,7 @@ function update_script() { systemctl stop nodecast-tv msg_ok "Stopped Service" - fetch_and_deploy_gh_release "nodecast-tv" "technomancer702/nodecast-tv" + fetch_and_deploy_gh_release "nodecast-tv" "technomancer702/nodecast-tv" "tarball" msg_info "Updating Modules" cd /opt/nodecast-tv diff --git a/ct/opencloud.sh b/ct/opencloud.sh index 39f8b3577..6fb676546 100644 --- a/ct/opencloud.sh +++ b/ct/opencloud.sh @@ -29,8 +29,8 @@ function update_script() { exit fi - RELEASE="v5.2.0" - if check_for_gh_release "OpenCloud" "opencloud-eu/opencloud" "${RELEASE}"; then + RELEASE="v6.1.0" + if check_for_gh_release "OpenCloud" "opencloud-eu/opencloud" "${RELEASE}" "each release is tested individually before the version is updated. Please do not open issues for this"; then msg_info "Stopping services" systemctl stop opencloud opencloud-wopi msg_ok "Stopped services" diff --git a/ct/openthread-br.sh b/ct/openthread-br.sh new file mode 100644 index 000000000..310318cff --- /dev/null +++ b/ct/openthread-br.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://openthread.io/guides/border-router + +APP="OpenThread-BR" +var_tags="${var_tags:-thread;iot;border-router;matter}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-0}" +var_tun="${var_tun:-yes}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/ot-br-posix ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + cd /opt/ot-br-posix + LOCAL_COMMIT=$(git rev-parse HEAD) + $STD git fetch --depth 1 origin main + REMOTE_COMMIT=$(git rev-parse origin/main) + + if [[ "${LOCAL_COMMIT}" == "${REMOTE_COMMIT}" ]]; then + msg_ok "Already up to date (${LOCAL_COMMIT:0:7})" + exit + fi + + msg_info "Stopping Services" + systemctl stop otbr-web + systemctl stop otbr-agent + msg_ok "Stopped Services" + + msg_info "Updating Source" + $STD git reset --hard origin/main + $STD git submodule update --depth 1 --init --recursive + msg_ok "Updated Source" + + msg_info "Rebuilding OpenThread Border Router (Patience)" + cd /opt/ot-br-posix/build + $STD cmake -GNinja \ + -DBUILD_TESTING=OFF \ + -DCMAKE_INSTALL_PREFIX=/usr \ + -DOTBR_DBUS=ON \ + -DOTBR_MDNS=openthread \ + -DOTBR_REST=ON \ + -DOTBR_WEB=ON \ + -DOTBR_BORDER_ROUTING=ON \ + -DOTBR_BACKBONE_ROUTER=ON \ + -DOT_FIREWALL=ON \ + -DOT_POSIX_NAT64_CIDR="192.168.255.0/24" \ + .. + $STD ninja + $STD ninja install + msg_ok "Rebuilt OpenThread Border Router" + + msg_info "Starting Services" + systemctl start otbr-agent + systemctl start otbr-web + msg_ok "Started Services" + msg_ok "Updated successfully!" + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" diff --git a/ct/outline.sh b/ct/outline.sh index 9e1a45b6b..6f98fb938 100644 --- a/ct/outline.sh +++ b/ct/outline.sh @@ -28,7 +28,7 @@ function update_script() { exit fi - NODE_VERSION="22" setup_nodejs + NODE_VERSION="24" setup_nodejs if check_for_gh_release "outline" "outline/outline"; then msg_info "Stopping Services" diff --git a/ct/owncast.sh b/ct/owncast.sh index c3a8056fd..1e41ca728 100644 --- a/ct/owncast.sh +++ b/ct/owncast.sh @@ -9,7 +9,7 @@ APP="Owncast" var_tags="${var_tags:-broadcasting}" var_cpu="${var_cpu:-2}" var_ram="${var_ram:-2048}" -var_disk="${var_disk:-2}" +var_disk="${var_disk:-10}" var_os="${var_os:-debian}" var_version="${var_version:-13}" var_unprivileged="${var_unprivileged:-1}" diff --git a/ct/ownfoil.sh b/ct/ownfoil.sh new file mode 100644 index 000000000..3a770f614 --- /dev/null +++ b/ct/ownfoil.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: pajjski +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/a1ex4/ownfoil + +APP="ownfoil" +var_tags="${var_tags:-gaming}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/ownfoil ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "ownfoil" "a1ex4/ownfoil"; then + msg_info "Stopping Service" + systemctl stop ownfoil + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp -r /opt/ownfoil/app/config /opt/ownfoil_data_backup + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "ownfoil" "a1ex4/ownfoil" "tarball" + + msg_info "Installing Dependencies" + cd /opt/ownfoil + $STD source .venv/bin/activate + $STD uv pip install -r requirements.txt + msg_ok "Installed Dependencies" + + msg_info "Restoring Data" + cp -r /opt/ownfoil_data_backup /opt/ownfoil/app/config + rm -rf /opt/ownfoil_data_backup + msg_ok "Restored Data" + + msg_info "Starting Service" + systemctl start ownfoil + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8465${CL}" diff --git a/ct/pangolin.sh b/ct/pangolin.sh index b30455068..9092a3c48 100644 --- a/ct/pangolin.sh +++ b/ct/pangolin.sh @@ -6,6 +6,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV # Source: https://pangolin.net/ | Github: https://github.com/fosrl/pangolin APP="Pangolin" +PANGOLIN_VERSION="${PANGOLIN_VERSION:-1.18.3}" var_tags="${var_tags:-proxy}" var_cpu="${var_cpu:-2}" var_ram="${var_ram:-4096}" @@ -33,7 +34,7 @@ function update_script() { NODE_VERSION="24" setup_nodejs - if check_for_gh_release "pangolin" "fosrl/pangolin"; then + if check_for_gh_release "pangolin" "fosrl/pangolin" "$PANGOLIN_VERSION" "Pinned to a tested release because Pangolin's schema changes have repeatedly broken unattended updates. To try a newer version at your own risk, run: 'export PANGOLIN_VERSION=' and re-run update. If it breaks, please open an issue at https://github.com/community-scripts/ProxmoxVE/issues with the error log."; then msg_info "Stopping Service" systemctl stop pangolin systemctl stop gerbil @@ -41,9 +42,13 @@ function update_script() { msg_info "Creating backup" tar -czf /opt/pangolin_config_backup.tar.gz -C /opt/pangolin config + if [[ -f /opt/pangolin/config/db/db.sqlite ]]; then + cp -a /opt/pangolin/config/db/db.sqlite \ + "/opt/pangolin/config/db/db.sqlite.pre-${PANGOLIN_VERSION}-$(date +%Y%m%d-%H%M%S).bak" + fi msg_ok "Created backup" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball" + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball" "$PANGOLIN_VERSION" CLEAN_INSTALL=1 fetch_and_deploy_gh_release "gerbil" "fosrl/gerbil" "singlefile" "latest" "/usr/bin" "gerbil_linux_amd64" msg_info "Updating Pangolin" @@ -67,9 +72,16 @@ function update_script() { rm -f /opt/pangolin_config_backup.tar.gz msg_ok "Restored config" + if ! grep -q '^ExecStartPre=/usr/bin/node dist/migrations.mjs' /etc/systemd/system/pangolin.service 2>/dev/null; then + msg_info "Adding migration step to pangolin.service" + sed -i '/^ExecStart=\/usr\/bin\/node --enable-source-maps dist\/server.mjs/i ExecStartPre=/usr/bin/node dist/migrations.mjs' /etc/systemd/system/pangolin.service + systemctl daemon-reload + msg_ok "Updated pangolin.service" + fi + msg_info "Running database migrations" cd /opt/pangolin - ENVIRONMENT=prod $STD npx drizzle-kit push --config drizzle.sqlite.config.ts + ENVIRONMENT=prod $STD node dist/migrations.mjs msg_ok "Ran database migrations" msg_info "Updating Badger plugin version" diff --git a/ct/paperless-ngx.sh b/ct/paperless-ngx.sh index 37b88fad5..271932a37 100644 --- a/ct/paperless-ngx.sh +++ b/ct/paperless-ngx.sh @@ -8,7 +8,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV APP="Paperless-ngx" var_tags="${var_tags:-document;management}" var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-2048}" +var_ram="${var_ram:-3072}" var_disk="${var_disk:-12}" var_os="${var_os:-debian}" var_version="${var_version:-13}" @@ -164,6 +164,8 @@ function update_script() { fi fi + setup_nltk "snowball_data stopwords punkt_tab" "/usr/share/nltk_data" + msg_info "Starting all Paperless-ngx Services" systemctl start paperless-consumer paperless-webserver paperless-scheduler paperless-task-queue sleep 1 diff --git a/ct/papra.sh b/ct/papra.sh index 71051da02..4a5dc477d 100644 --- a/ct/papra.sh +++ b/ct/papra.sh @@ -35,14 +35,37 @@ function update_script() { msg_ok "Stopped Service" msg_info "Backing up Configuration" - cp /opt/papra/apps/papra-server/.env /opt/papra_env.bak + if [[ -f /opt/papra/apps/papra-server/.env ]]; then + cp /opt/papra/apps/papra-server/.env /opt/papra_env.bak + fi msg_ok "Backed up Configuration" CLEAN_INSTALL=1 fetch_and_deploy_gh_release "papra" "papra-hq/papra" "tarball" msg_info "Building Application" cd /opt/papra - cp /opt/papra_env.bak /opt/papra/apps/papra-server/.env + if [[ -f /opt/papra_env.bak ]]; then + cp /opt/papra_env.bak /opt/papra/apps/papra-server/.env + else + msg_warn ".env missing, regenerating from defaults" + LOCAL_IP=$(hostname -I | awk '{print $1}') + cat </opt/papra/apps/papra-server/.env +NODE_ENV=production +SERVER_SERVE_PUBLIC_DIR=true +PORT=1221 +DATABASE_URL=file:/opt/papra_data/db/db.sqlite +DOCUMENT_STORAGE_FILESYSTEM_ROOT=/opt/papra_data/documents +PAPRA_CONFIG_DIR=/opt/papra_data +AUTH_SECRET=$(cat /opt/papra_data/.secret) +BETTER_AUTH_SECRET=$(cat /opt/papra_data/.secret) +BETTER_AUTH_TELEMETRY=0 +CLIENT_BASE_URL=http://${LOCAL_IP}:1221 +SERVER_BASE_URL=http://${LOCAL_IP}:1221 +EMAILS_DRY_RUN=true +INGESTION_FOLDER_IS_ENABLED=true +INGESTION_FOLDER_ROOT_PATH=/opt/papra_data/ingestion +EOF + fi $STD pnpm install --frozen-lockfile $STD pnpm --filter "@papra/app-client..." run build $STD pnpm --filter "@papra/app-server..." run build diff --git a/ct/part-db.sh b/ct/part-db.sh index fe17d51a1..c58e73040 100644 --- a/ct/part-db.sh +++ b/ct/part-db.sh @@ -27,36 +27,27 @@ function update_script() { msg_error "No ${APP} Installation Found!" exit fi - - RELEASE=$(get_latest_github_release "Part-DB/Part-DB-server") + if check_for_gh_release "partdb" "Part-DB/Part-DB-server"; then msg_info "Stopping Service" systemctl stop apache2 msg_ok "Stopped Service" - msg_info "Updating $APP to v${RELEASE}" - cd /opt mv /opt/partdb/ /opt/partdb-backup - curl -fsSL "https://github.com/Part-DB/Part-DB-server/archive/refs/tags/v${RELEASE}.zip" -o "/opt/v${RELEASE}.zip" - $STD unzip "v${RELEASE}.zip" - mv /opt/Part-DB-server-${RELEASE}/ /opt/partdb + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "partdb" "Part-DB/Part-DB-server" "prebuild" "latest" "/opt/partdb" "partdb_with_assets.zip" + msg_info "Updating Part-DB" cd /opt/partdb/ - cp -r "/opt/partdb-backup/.env.local" /opt/partdb/ - cp -r "/opt/partdb-backup/public/media" /opt/partdb/public/ - cp -r "/opt/partdb-backup/config/banner.md" /opt/partdb/config/ - + cp -r /opt/partdb-backup/.env.local /opt/partdb/ + cp -r /opt/partdb-backup/public/media /opt/partdb/public/ + cp -r /opt/partdb-backup/config/banner.md /opt/partdb/config/ export COMPOSER_ALLOW_SUPERUSER=1 $STD composer install --no-dev -o --no-interaction - $STD yarn install - $STD yarn build $STD php bin/console cache:clear $STD php bin/console doctrine:migrations:migrate -n chown -R www-data:www-data /opt/partdb - rm -r "/opt/v${RELEASE}.zip" rm -r /opt/partdb-backup - echo "${RELEASE}" >~/.partdb - msg_ok "Updated $APP to v${RELEASE}" + msg_ok "Updated Part-DB" msg_info "Starting Service" systemctl start apache2 diff --git a/ct/patchmon.sh b/ct/patchmon.sh index 4684d3383..a8141d07e 100644 --- a/ct/patchmon.sh +++ b/ct/patchmon.sh @@ -3,7 +3,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV # Copyright (c) 2021-2026 community-scripts ORG # Author: vhsdream # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/PatcMmon/PatchMon +# Source: https://github.com/PatchMon/PatchMon APP="PatchMon" var_tags="${var_tags:-monitoring}" @@ -29,62 +29,75 @@ function update_script() { exit fi - if ! grep -q "PORT=3001" /opt/patchmon/backend/.env; then - msg_warn "⚠️ The next PatchMon update will include breaking changes (port changes)." - msg_warn "See details here: https://github.com/community-scripts/ProxmoxVE/pull/11888" - msg_warn "Press Enter to continue with the update, or Ctrl+C to abort..." - read -r - fi - - NODE_VERSION="24" setup_nodejs if check_for_gh_release "PatchMon" "PatchMon/PatchMon"; then msg_info "Stopping Service" systemctl stop patchmon-server msg_ok "Stopped Service" - msg_info "Creating Backup" - cp /opt/patchmon/backend/.env /opt/backend.env - cp /opt/patchmon/frontend/.env /opt/frontend.env - msg_ok "Backup Created" - - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "PatchMon" "PatchMon/PatchMon" "tarball" "latest" "/opt/patchmon" - - msg_info "Updating PatchMon" - VERSION=$(get_latest_github_release "PatchMon/PatchMon") - SERVER_PORT="$(sed -n '/SERVER_PORT/s/[^=]*=//p' /opt/backend.env)" - sed -i 's/PORT=3399/PORT=3001/' /opt/backend.env - sed -i -e "s/VERSION=.*/VERSION=$VERSION/" \ - -e '/^VITE_API_URL/d' /opt/frontend.env - export NODE_ENV=production - cd /opt/patchmon - $STD npm install --no-audit --no-fund --no-save --ignore-scripts - cd /opt/patchmon/frontend - mv /opt/frontend.env /opt/patchmon/frontend/.env - $STD npm install --no-audit --no-fund --no-save --ignore-scripts --include=dev - $STD npm run build - cd /opt/patchmon/backend - mv /opt/backend.env /opt/patchmon/backend/.env - $STD npm run db:generate - $STD npx prisma migrate deploy - cp /opt/patchmon/docker/nginx.conf.template /etc/nginx/sites-available/patchmon.conf - sed -i -e 's|proxy_pass .*|proxy_pass http://127.0.0.1:3001;|' \ - -e '\|try_files |i\ root /opt/patchmon/frontend/dist;' \ - -e 's|alias.*|alias /opt/patchmon/frontend/dist/assets;|' \ - -e '\|expires 1y|i\ root /opt/patchmon/frontend/dist;' /etc/nginx/sites-available/patchmon.conf - if [[ -n "$SERVER_PORT" ]] && [[ "$SERVER_PORT" != "443" ]]; then - sed -i "s/listen [[:digit:]].*/listen ${SERVER_PORT};/" /etc/nginx/sites-available/patchmon.conf + if [[ -d /opt/patchmon/backend ]]; then + msg_info "Legacy install detected - creating full backup, please wait..." + $STD tar czf ~/patchmon_legacy.tar.gz /opt/patchmon + cp /opt/patchmon/backend/.env /opt/legacy.env + msg_ok "Full backup saved in /root" + msg_info "Starting migration to PatchMon v2.x.x" + systemctl disable -q --now nginx + $STD npm cache clean --force + $STD apt autoremove --purge -y {nginx,nodejs} + if [[ -f /etc/apt/sources.list.d/nodesource.sources ]]; then + cp /etc/apt/sources.list.d/nodesource.sources /etc/apt/sources.list.d/nodesource.sources.bak + rm -f /etc/apt/sources.list.d/nodesource.sources + elif [[ -f /etc/apt/sources.list.d/nodesource.list ]]; then + cp /etc/apt/sources.list.d/nodesource.list /etc/apt/sources.list.d/nodesource.list.bak + rm -f /etc/apt/sources.list.d/nodesource.list + fi + rm -rf /opt/patchmon + mkdir -p /opt/patchmon/agents + cp /opt/legacy.env /opt/patchmon/.env + sed -i -e 's/^PORT=.*/PORT=3000/' \ + -e 's/^NODE_/APP_/' \ + -e '/^SERVER_*/d' \ + -e '/^# API*/,+2d' /opt/patchmon/.env + { + echo "" + echo "SESSION_SECRET=$(openssl rand -hex 64)" + echo "AI_ENCRYPTION_KEY=$(openssl rand -hex 64)" + echo "AGENT_BINARIES_DIR=/opt/patchmon/agents" + } >>/opt/patchmon/.env + sed -i -e '\|Directory|s|/backend||' \ + -e 's|^ExecStart=.*|ExecStart=/opt/patchmon/patchmon-server|' \ + -e 's|^Environment=NODE_.*|EnvironmentFile=/opt/patchmon/.env|' \ + /etc/systemd/system/patchmon-server.service + systemctl daemon-reload + rm /opt/legacy.env + msg_ok "Migration complete!" fi - ln -sf /etc/nginx/sites-available/patchmon.conf /etc/nginx/sites-enabled/ - rm -f /etc/nginx/sites-enabled/default - $STD nginx -t - systemctl restart nginx - msg_ok "Updated PatchMon" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "PatchMon" "PatchMon/PatchMon" "singlefile" "latest" "/opt/patchmon" "patchmon-server-linux-amd64" + mv /opt/patchmon/PatchMon /opt/patchmon/patchmon-server + + msg_info "Fetching PatchMon agent binaries" + RELEASE=$(get_latest_github_release "PatchMon/PatchMon") + [[ ! -d /opt/patchmon/agents ]] && mkdir -p /opt/patchmon/agents + FILE_URL="https://github.com/PatchMon/PatchMon/releases/download/v${RELEASE}/patchmon-agent-" + AGENT_NAME=( + "linux-amd64" + "linux-arm64" + "linux-arm" + "linux-386" + "freebsd-amd64" + "freebsd-arm64" + "freebsd-arm" + "freebsd-386" + "windows-amd64.exe" + "windows-arm64.exe" + ) + for arch in "${AGENT_NAME[@]}"; do + curl_with_retry "${FILE_URL}${arch}" "/opt/patchmon/agents/patchmon-agent-${arch}" + [[ "${arch}" != *.exe ]] && chmod 755 "/opt/patchmon/agents/patchmon-agent-${arch}" + done + msg_ok "Fetched PatchMon agent binaries" msg_info "Starting Service" - if grep -q '/usr/bin/node' /etc/systemd/system/patchmon-server.service; then - sed -i 's|ExecStart=.*|ExecStart=/usr/bin/npm run start|' /etc/systemd/system/patchmon-server.service - systemctl daemon-reload - fi systemctl start patchmon-server msg_ok "Started Service" msg_ok "Updated successfully!" diff --git a/ct/peanut.sh b/ct/peanut.sh index 38647c141..c90b27968 100644 --- a/ct/peanut.sh +++ b/ct/peanut.sh @@ -45,6 +45,33 @@ function update_script() { msg_ok "Fixed entrypoint" fi + if [[ ! -f /etc/peanut/peanut.env ]]; then + msg_info "Migrating service to EnvironmentFile" + mkdir -p /etc/peanut + cat </etc/peanut/peanut.env +NODE_ENV=production + +#WEB_HOST=0.0.0.0 +#WEB_PORT=8080 +#NUT_HOST=localhost +#NUT_PORT=3493 + +# Disable auth entirely: +#AUTH_DISABLED=true + +# Bootstrap initial account on first start (ignored afterwards): +#WEB_USERNAME=admin +#WEB_PASSWORD=changeme +EOF + chmod 600 /etc/peanut/peanut.env + sed -i '/^Environment=/d' /etc/systemd/system/peanut.service + if ! grep -q '^EnvironmentFile=/etc/peanut/peanut.env' /etc/systemd/system/peanut.service; then + sed -i '/^Type=simple/a EnvironmentFile=/etc/peanut/peanut.env' /etc/systemd/system/peanut.service + fi + systemctl daemon-reload + msg_ok "Migrated to /etc/peanut/peanut.env" + fi + msg_info "Updating PeaNUT" cd /opt/peanut $STD pnpm i diff --git a/ct/pelican-panel.sh b/ct/pelican-panel.sh index 31f8ee92e..c577e2c88 100644 --- a/ct/pelican-panel.sh +++ b/ct/pelican-panel.sh @@ -45,15 +45,23 @@ function update_script() { $STD php artisan down msg_ok "Stopped Service" - cp -r /opt/pelican-panel/.env /opt/ + mkdir -p /opt/backup + cp -a /opt/pelican-panel/.env /opt/backup + mkdir -p /opt/backup/storage/app/ + cp -a /opt/pelican-panel/storage/app/public /opt/backup/storage/app/ + SQLITE_INSTALL=$(ls /opt/pelican-panel/database/*.sqlite 1>/dev/null 2>&1 && echo "true" || echo "false") - $SQLITE_INSTALL && cp -r /opt/pelican-panel/database/*.sqlite /opt/ - rm -rf * .* + $SQLITE_INSTALL && cp -r /opt/pelican-panel/database/*.sqlite /opt/backup + + find /opt/pelican-panel -mindepth 1 -maxdepth 1 ! -name 'backup' ! -name 'plugins' -exec rm -rf {} + + fetch_and_deploy_gh_release "pelican-panel" "pelican-dev/panel" "prebuild" "latest" "/opt/pelican-panel" "panel.tar.gz" msg_info "Updating Pelican Panel" - mv /opt/.env /opt/pelican-panel/ - $SQLITE_INSTALL && mv /opt/*.sqlite /opt/pelican-panel/database/ + cp -a /opt/backup/.env /opt/pelican-panel/ + $SQLITE_INSTALL && mv /opt/backup/*.sqlite /opt/pelican-panel/database/ + cp -a /opt/backup/storage/app/public /opt/pelican-panel/storage/app/ + $STD composer install --no-dev --optimize-autoloader --no-interaction $STD php artisan p:environment:setup $STD php artisan view:clear diff --git a/ct/plant-it.sh b/ct/plant-it.sh index 445d84f96..130ca22fb 100644 --- a/ct/plant-it.sh +++ b/ct/plant-it.sh @@ -23,18 +23,19 @@ function update_script() { header_info check_container_storage check_container_resources + RELEASE="0.10.0" if [[ ! -d /opt/plant-it ]]; then msg_error "No ${APP} Installation Found!" exit fi setup_mariadb - if check_for_gh_release "plant-it" "MDeLuise/plant-it"; then + if check_for_gh_release "plant-it" "MDeLuise/plant-it" "${RELEASE}" "last version that includes the web frontend"; then msg_info "Stopping Service" systemctl stop plant-it msg_info "Stopped Service" - USE_ORIGINAL_FILENAME="true" fetch_and_deploy_gh_release "plant-it" "MDeLuise/plant-it" "singlefile" "0.10.0" "/opt/plant-it/backend" "server.jar" - fetch_and_deploy_gh_release "plant-it-front" "MDeLuise/plant-it" "prebuild" "0.10.0" "/opt/plant-it/frontend" "client.tar.gz" + USE_ORIGINAL_FILENAME="true" fetch_and_deploy_gh_release "plant-it" "MDeLuise/plant-it" "singlefile" "${RELEASE}" "/opt/plant-it/backend" "server.jar" + fetch_and_deploy_gh_release "plant-it-front" "MDeLuise/plant-it" "prebuild" "${RELEASE}" "/opt/plant-it/frontend" "client.tar.gz" msg_warn "Application is updated to latest Web version (v0.10.0). There will be no more updates available." msg_warn "Please read: https://github.com/MDeLuise/plant-it/releases/tag/1.0.0" diff --git a/ct/plex.sh b/ct/plex.sh index 656f44bb7..734860c23 100644 --- a/ct/plex.sh +++ b/ct/plex.sh @@ -46,11 +46,11 @@ function update_script() { "main" msg_ok "Migrated to new Plex repository" fi - elif [[ -f /etc/apt/sources.list.d/plexmediaserver.list ]]; then + elif compgen -G "/etc/apt/sources.list.d/plex*.list" >/dev/null; then msg_info "Migrating to new Plex repository (deb822)" - rm -f /etc/apt/sources.list.d/plexmediaserver.list - rm -f /etc/apt/sources.list.d/plex* + rm -f /etc/apt/sources.list.d/plex*.list rm -f /usr/share/keyrings/PlexSign.asc + rm -f /usr/share/keyrings/plexmediaserver.v2.gpg setup_deb822_repo \ "plexmediaserver" \ "https://downloads.plex.tv/plex-keys/PlexSign.v2.key" \ @@ -58,6 +58,15 @@ function update_script() { "public" \ "main" msg_ok "Migrated to new Plex repository (deb822)" + elif [[ ! -f /etc/apt/sources.list.d/plexmediaserver.sources ]]; then + msg_info "Setting up Plex repository" + setup_deb822_repo \ + "plexmediaserver" \ + "https://downloads.plex.tv/plex-keys/PlexSign.v2.key" \ + "https://repo.plex.tv/deb/" \ + "public" \ + "main" + msg_ok "Set up Plex repository" fi if [[ -f /usr/local/bin/plexupdate ]] || [[ -d /opt/plexupdate ]]; then msg_info "Removing legacy plexupdate" @@ -70,6 +79,11 @@ function update_script() { $STD apt update $STD apt install -y plexmediaserver msg_ok "Updated Plex Media Server" + + msg_info "Restarting Plex Media Server" + systemctl restart plexmediaserver + msg_ok "Restarted Plex Media Server" + msg_ok "Updated successfully!" exit } diff --git a/ct/podman-homeassistant.sh b/ct/podman-homeassistant.sh index 96c50e16d..3ebd798be 100644 --- a/ct/podman-homeassistant.sh +++ b/ct/podman-homeassistant.sh @@ -23,7 +23,7 @@ function update_script() { header_info check_container_storage check_container_resources - if [[ ! -f /etc/systemd/system/homeassistant.service ]]; then + if [[ ! -f /etc/containers/systemd/homeassistant.container ]]; then msg_error "No ${APP} Installation Found!" exit fi @@ -68,7 +68,7 @@ function update_script() { $STD curl -fsSL https://raw.githubusercontent.com/filebrowser/get/master/get.sh | bash $STD filebrowser config init -a '0.0.0.0' $STD filebrowser config set -a '0.0.0.0' - $STD filebrowser users add admin helper-scripts.com --perm.admin + $STD filebrowser users add admin community-scripts.org --perm.admin msg_ok "Installed FileBrowser" msg_info "Creating Service" @@ -90,7 +90,7 @@ EOF msg_ok "Completed successfully!\n" echo -e "FileBrowser should be reachable by going to the following URL. - ${BL}http://$LOCAL_IP:8080${CL} admin|helper-scripts.com\n" + ${BL}http://$LOCAL_IP:8080${CL} admin|community-scripts.org\n" exit fi if [ "$UPD" == "4" ]; then diff --git a/ct/profilarr.sh b/ct/profilarr.sh index 8d1ce0a96..7bf0582b2 100644 --- a/ct/profilarr.sh +++ b/ct/profilarr.sh @@ -44,7 +44,7 @@ function update_script() { msg_info "Installing Python Dependencies" cd /opt/profilarr/backend - $STD uv venv /opt/profilarr/backend/.venv + $STD uv venv --clear /opt/profilarr/backend/.venv sed 's/==/>=/g' requirements.txt >requirements-relaxed.txt $STD uv pip install --python /opt/profilarr/backend/.venv/bin/python -r requirements-relaxed.txt rm -f requirements-relaxed.txt diff --git a/ct/projectsend.sh b/ct/projectsend.sh index bcec0e43e..78c221f76 100644 --- a/ct/projectsend.sh +++ b/ct/projectsend.sh @@ -60,4 +60,4 @@ description msg_ok "Completed successfully!\n" echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}/install for the initial setup${CL}" diff --git a/ct/protonmail-bridge.sh b/ct/protonmail-bridge.sh new file mode 100644 index 000000000..48d16fecf --- /dev/null +++ b/ct/protonmail-bridge.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Stephen Chin (steveonjava) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/ProtonMail/proton-bridge + +APP="ProtonMail-Bridge" +var_tags="${var_tags:-mail;proton}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -x /usr/bin/protonmail-bridge ]]; then + msg_error "No ${APP} Installation Found!" + exit 1 + fi + + if check_for_gh_release "protonmail-bridge" "ProtonMail/proton-bridge"; then + local -a bridge_units=( + protonmail-bridge + protonmail-bridge-imap.socket + protonmail-bridge-smtp.socket + protonmail-bridge-imap-proxy + protonmail-bridge-smtp-proxy + ) + local unit + declare -A was_active + for unit in "${bridge_units[@]}"; do + if systemctl is-active --quiet "$unit" 2>/dev/null; then + was_active["$unit"]=1 + else + was_active["$unit"]=0 + fi + done + + msg_info "Stopping Services" + systemctl stop protonmail-bridge-imap.socket protonmail-bridge-smtp.socket protonmail-bridge-imap-proxy protonmail-bridge-smtp-proxy protonmail-bridge + msg_ok "Stopped Services" + + fetch_and_deploy_gh_release "protonmail-bridge" "ProtonMail/proton-bridge" "binary" + + if [[ -f /home/protonbridge/.protonmailbridge-initialized ]]; then + msg_info "Starting Services" + for unit in "${bridge_units[@]}"; do + if [[ "${was_active[$unit]:-0}" == "1" ]]; then + systemctl start "$unit" + fi + done + msg_ok "Started Services" + else + msg_ok "Initialization not completed. Services remain disabled." + fi + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW}One-time configuration is required before Bridge services are enabled.${CL}" +echo -e "${INFO}${YW}Run this command in the container: protonmailbridge-configure${CL}" diff --git a/ct/pve-scripts-local.sh b/ct/pve-scripts-local.sh index 9e5b3cbc5..09ecb91f6 100644 --- a/ct/pve-scripts-local.sh +++ b/ct/pve-scripts-local.sh @@ -9,7 +9,7 @@ APP="PVE-Scripts-Local" var_tags="${var_tags:-pve-scripts-local}" var_cpu="${var_cpu:-2}" var_ram="${var_ram:-4096}" -var_disk="${var_disk:-4}" +var_disk="${var_disk:-10}" var_os="${var_os:-debian}" var_version="${var_version:-13}" var_unprivileged="${var_unprivileged:-1}" diff --git a/ct/reactive-resume.sh b/ct/reactive-resume.sh index fbde8ba69..e67ef7564 100644 --- a/ct/reactive-resume.sh +++ b/ct/reactive-resume.sh @@ -33,11 +33,17 @@ function update_script() { systemctl stop reactive-resume msg_ok "Stopped services" + ensure_dependencies git + cp /opt/reactive-resume/.env /opt/reactive-resume.env.bak + NODE_VERSION="24" setup_nodejs CLEAN_INSTALL=1 fetch_and_deploy_gh_release "reactive-resume" "amruthpillai/reactive-resume" "tarball" "latest" "/opt/reactive-resume" msg_info "Updating Reactive Resume (Patience)" cd /opt/reactive-resume + export COREPACK_ENABLE_DOWNLOAD_PROMPT=0 + corepack enable + corepack prepare --activate export CI="true" export NODE_ENV="production" $STD pnpm install --frozen-lockfile diff --git a/ct/reitti.sh b/ct/reitti.sh index 3b52d99e4..26d3f5a38 100644 --- a/ct/reitti.sh +++ b/ct/reitti.sh @@ -37,39 +37,136 @@ function update_script() { fi fi - if [ ! -d /var/cache/nginx/tiles ]; then - msg_info "Installing Nginx Tile Cache" - mkdir -p /var/cache/nginx/tiles - $STD apt install -y nginx - cat </etc/nginx/nginx.conf -user www-data; - -events { - worker_connections 1024; -} -http { - proxy_cache_path /var/cache/nginx/tiles levels=1:2 keys_zone=tiles:10m max_size=1g inactive=30d use_temp_path=off; - server { - listen 80; - location / { - proxy_pass https://tile.openstreetmap.org/; - proxy_set_header Host tile.openstreetmap.org; - proxy_set_header User-Agent "Reitti/1.0"; - proxy_cache tiles; - proxy_cache_valid 200 30d; - proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; - } - } -} -EOF - chown -R www-data:www-data /var/cache/nginx - chmod -R 750 /var/cache/nginx - systemctl restart nginx - echo "reitti.ui.tiles.cache.url=http://127.0.0.1" >> /opt/reitti/application.properties - systemctl restart reitti - msg_info "Installed Nginx Tile Cache" + # Migrate v3 -> v4: Remove RabbitMQ (no longer required) / Photon / Spring Settings + if systemctl is-enabled --quiet rabbitmq-server 2>/dev/null; then + msg_info "Migrating to v4: Removing RabbitMQ" + systemctl stop rabbitmq-server + systemctl disable rabbitmq-server + $STD apt-get purge -y rabbitmq-server erlang-base + $STD apt-get autoremove -y + msg_ok "Removed RabbitMQ" fi - + + if systemctl is-enabled --quiet photon 2>/dev/null; then + msg_info "Migrating to v4: Removing Photon service" + systemctl stop photon + systemctl disable photon + rm -f /etc/systemd/system/photon.service + systemctl daemon-reload + msg_ok "Removed Photon service" + fi + + if grep -q "spring.rabbitmq\|PHOTON_BASE_URL\|PROCESSING_WAIT_TIME\|DANGEROUS_LIFE" /opt/reitti/application.properties 2>/dev/null; then + msg_info "Migrating to v4: Rewriting application.properties" + local DB_URL DB_USER DB_PASS + DB_URL=$(grep '^spring.datasource.url=' /opt/reitti/application.properties | cut -d'=' -f2-) + DB_USER=$(grep '^spring.datasource.username=' /opt/reitti/application.properties | cut -d'=' -f2-) + DB_PASS=$(grep '^spring.datasource.password=' /opt/reitti/application.properties | cut -d'=' -f2-) + cp /opt/reitti/application.properties /opt/reitti/application.properties.bak + cat </opt/reitti/application.properties +# Server configuration +server.port=8080 +server.servlet.context-path=/ +server.forward-headers-strategy=framework +server.compression.enabled=true +server.compression.min-response-size=1024 +server.compression.mime-types=text/plain,application/json + +# Logging configuration +logging.level.root=INFO +logging.level.org.hibernate.engine.jdbc.spi.SqlExceptionHelper=FATAL +logging.level.com.dedicatedcode.reitti=INFO + +# Internationalization +spring.messages.basename=messages +spring.messages.encoding=UTF-8 +spring.messages.cache-duration=3600 +spring.messages.fallback-to-system-locale=false + +# PostgreSQL configuration +spring.datasource.url=${DB_URL} +spring.datasource.username=${DB_USER} +spring.datasource.password=${DB_PASS} +spring.datasource.hikari.maximum-pool-size=20 + +# Redis configuration +spring.data.redis.host=127.0.0.1 +spring.data.redis.port=6379 +spring.data.redis.username= +spring.data.redis.password= +spring.data.redis.database=0 +spring.cache.redis.key-prefix= + +spring.cache.cache-names=processed-visits,significant-places,users,magic-links,configurations,transport-mode-configs,avatarThumbnails,avatarData,user-settings +spring.cache.redis.time-to-live=1d + +# Upload configuration +spring.servlet.multipart.max-file-size=5GB +spring.servlet.multipart.max-request-size=5GB +server.tomcat.max-part-count=100 + +# Rqueue configuration +rqueue.web.enable=false +rqueue.job.enabled=false +rqueue.message.durability.in-terminal-state=0 +rqueue.key.prefix=\${spring.cache.redis.key-prefix} +rqueue.message.converter.provider.class=com.dedicatedcode.reitti.config.RQueueCustomMessageConverter + +# Application-specific settings +reitti.server.advertise-uri= + +reitti.security.local-login.disable=false + +# OIDC / Security Settings +reitti.security.oidc.enabled=false +reitti.security.oidc.registration.enabled=false + +reitti.import.batch-size=10000 +reitti.import.processing-idle-start-time=10 + +reitti.geo-point-filter.max-speed-kmh=1000 +reitti.geo-point-filter.max-accuracy-meters=100 +reitti.geo-point-filter.history-lookback-hours=24 +reitti.geo-point-filter.window-size=50 + +reitti.process-data.schedule=0 */10 * * * * +reitti.process-data.refresh-views.schedule=0 0 4 * * * +reitti.imports.schedule=0 5/10 * * * * +reitti.imports.owntracks-recorder.schedule=\${reitti.imports.schedule} + +# Geocoding service configuration +reitti.geocoding.max-errors=10 +reitti.geocoding.photon.base-url= + +# Tiles Configuration +reitti.ui.tiles.cache.url=http://127.0.0.1 +reitti.ui.tiles.default.service=https://tile.openstreetmap.org/{z}/{x}/{y}.png +reitti.ui.tiles.default.attribution=© OpenStreetMap contributors + +# Data management configuration +reitti.data-management.enabled=false +reitti.data-management.preview-cleanup.cron=0 0 4 * * * + +reitti.storage.path=data/ +reitti.storage.cleanup.cron=0 0 4 * * * + +# Location data density normalization +reitti.location.density.target-points-per-minute=4 + +# Logging buffer +reitti.logging.buffer-size=1000 +reitti.logging.max-buffer-size=10000 + +spring.config.import=optional:oidc.properties +PROPEOF + # Update reitti.service dependencies + if [[ -f /etc/systemd/system/reitti.service ]]; then + sed -i 's/ rabbitmq-server\.service//g; s/ photon\.service//g' /etc/systemd/system/reitti.service + systemctl daemon-reload + fi + msg_ok "Rewrote application.properties (backup: application.properties.bak)" + fi + if check_for_gh_release "reitti" "dedicatedcode/reitti"; then msg_info "Stopping Service" systemctl stop reitti @@ -83,23 +180,6 @@ EOF msg_info "Starting Service" systemctl start reitti - chown -R www-data:www-data /var/cache/nginx - chmod -R 750 /var/cache/nginx - systemctl restart nginx - msg_ok "Started Service" - msg_ok "Updated successfully!" - fi - if check_for_gh_release "photon" "komoot/photon"; then - msg_info "Stopping Service" - systemctl stop photon - msg_ok "Stopped Service" - - rm -f /opt/photon/photon.jar - USE_ORIGINAL_FILENAME="true" fetch_and_deploy_gh_release "photon" "komoot/photon" "singlefile" "latest" "/opt/photon" "photon-0*.jar" - mv /opt/photon/photon-*.jar /opt/photon/photon.jar - - msg_info "Starting Service" - systemctl start photon msg_ok "Started Service" msg_ok "Updated successfully!" fi diff --git a/ct/revealjs.sh b/ct/revealjs.sh index 462434c6a..bc9a237a1 100644 --- a/ct/revealjs.sh +++ b/ct/revealjs.sh @@ -40,7 +40,7 @@ function update_script() { cd /opt/revealjs $STD npm install cp -f /opt/index.html /opt/revealjs - sed -i '25s/localhost/0.0.0.0/g' /opt/revealjs/gulpfile.js + sed -i 's/"vite"/"vite --host"/g' package.json rm -f /opt/index.html msg_ok "Updated RevealJS" diff --git a/ct/romm.sh b/ct/romm.sh index a1010c98e..c74394e5b 100644 --- a/ct/romm.sh +++ b/ct/romm.sh @@ -54,8 +54,12 @@ function update_script() { # Merge static assets into dist folder cp -rf /opt/romm/frontend/assets/* /opt/romm/frontend/dist/assets/ mkdir -p /opt/romm/frontend/dist/assets/romm - ln -sfn /var/lib/romm/resources /opt/romm/frontend/dist/assets/romm/resources - ln -sfn /var/lib/romm/assets /opt/romm/frontend/dist/assets/romm/assets + ROMM_BASE=$(grep '^ROMM_BASE_PATH=' /opt/romm/.env | cut -d'=' -f2) + ROMM_BASE=${ROMM_BASE:-/var/lib/romm} + ln -sfn "$ROMM_BASE"/resources /opt/romm/frontend/dist/assets/romm/resources + ln -sfn "$ROMM_BASE"/assets /opt/romm/frontend/dist/assets/romm/assets + sed -i "s|alias .*/library/;|alias ${ROMM_BASE}/library/;|" /etc/nginx/sites-available/romm + systemctl reload nginx msg_ok "Updated ROMM" msg_info "Starting Services" diff --git a/ct/rustdeskserver.sh b/ct/rustdeskserver.sh index adfb42e23..5109c5a35 100644 --- a/ct/rustdeskserver.sh +++ b/ct/rustdeskserver.sh @@ -48,8 +48,6 @@ function update_script() { msg_ok "Services started" msg_ok "Updated successfully!" - else - msg_ok "No update required. ${APP} is already at v${RELEASE}" fi exit } diff --git a/ct/scanopy.sh b/ct/scanopy.sh index e06de906e..b9f59b23e 100644 --- a/ct/scanopy.sh +++ b/ct/scanopy.sh @@ -7,9 +7,9 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV APP="Scanopy" var_tags="${var_tags:-analytics}" -var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-3072}" -var_disk="${var_disk:-6}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-4096}" +var_disk="${var_disk:-8}" var_os="${var_os:-debian}" var_version="${var_version:-13}" var_unprivileged="${var_unprivileged:-1}" diff --git a/ct/seerr.sh b/ct/seerr.sh index f1052be36..a518b5205 100644 --- a/ct/seerr.sh +++ b/ct/seerr.sh @@ -128,6 +128,8 @@ EOF CLEAN_INSTALL=1 fetch_and_deploy_gh_release "seerr" "seerr-team/seerr" "tarball" + ensure_dependencies build-essential python3-setuptools + msg_info "Updating PNPM Version" pnpm_desired=$(grep -Po '"pnpm":\s*"\K[^"]+' /opt/seerr/package.json) NODE_VERSION="22" NODE_MODULE="pnpm@$pnpm_desired" setup_nodejs diff --git a/ct/semaphore.sh b/ct/semaphore.sh index d2d18a207..f1d1cb594 100644 --- a/ct/semaphore.sh +++ b/ct/semaphore.sh @@ -29,40 +29,38 @@ function update_script() { exit fi - if [[ -f /opt/semaphore/semaphore_db.bolt ]]; then - msg_warn "WARNING: Due to bugs with BoltDB database, update script will move your application" - msg_warn "to use SQLite database instead. Unfortunately, this will reset your application and make it a fresh" - msg_warn "installation. All your data will be lost!" - echo "" - read -r -p "${TAB3}Do you want to continue? (y/N): " CONFIRM - if [[ ! "$CONFIRM" =~ ^[Yy]$ ]]; then - exit 0 - else - msg_info "Moving from BoltDB to SQLite" - systemctl stop semaphore - rm -rf /opt/semaphore/semaphore_db.bolt - sed -i \ - -e 's|"bolt": {|"sqlite": {|' \ - -e 's|/semaphore_db.bolt"|/database.sqlite"|' \ - -e '/semaphore_db.bolt/d' \ - -e '/"dialect"/d' \ - -e '/^ },$/a\ "dialect": "sqlite",' \ - /opt/semaphore/config.json - SEM_PW=$(cat ~/semaphore.creds) - systemctl start semaphore - $STD semaphore user add --admin --login admin --email admin@helper-scripts.com --name Administrator --password "${SEM_PW}" --config /opt/semaphore/config.json - - msg_ok "Moved from BoltDB to SQLite" - fi - fi - if check_for_gh_release "semaphore" "semaphoreui/semaphore"; then + if [[ -f /opt/semaphore/semaphore_db.bolt ]]; then + msg_warn "WARNING: Due to bugs with BoltDB database, update script will move your application" + msg_warn "to use SQLite database instead. Make sure you have a backup of your data!" + echo "" + read -r -p "${TAB3}Do you want to continue? (y/N): " CONFIRM + if [[ ! "$CONFIRM" =~ ^[Yy]$ ]]; then + exit 0 + else + msg_info "Moving from BoltDB to SQLite" + sed -i \ + -e 's|"bolt": {|"sqlite": {|' \ + -e 's|/semaphore_db.bolt"|/database.sqlite"|' \ + -e '/semaphore_db.bolt/d' \ + -e '/"dialect"/d' \ + -e '/^ },$/a\ "dialect": "sqlite",' \ + /opt/semaphore/config.json + msg_ok "Moved from BoltDB to SQLite" + fi + fi + msg_info "Stopping Service" systemctl stop semaphore msg_ok "Stopped Service" fetch_and_deploy_gh_release "semaphore" "semaphoreui/semaphore" "binary" "latest" "/opt/semaphore" "semaphore_*_linux_amd64.deb" + if [[ -f /opt/semaphore/semaphore_db.bolt ]]; then + $STD semaphore migrate --from-boltdb /opt/semaphore/semaphore_db.bolt --config /opt/semaphore/config.json + rm -f /opt/semaphore/semaphore_db.bolt + fi + msg_info "Starting Service" systemctl start semaphore msg_ok "Started Service" diff --git a/ct/shelfmark.sh b/ct/shelfmark.sh index 04a4fa4bd..1fac461cc 100644 --- a/ct/shelfmark.sh +++ b/ct/shelfmark.sh @@ -29,8 +29,8 @@ function update_script() { exit fi - NODE_VERSION="22" setup_nodejs - PYTHON_VERSION="3.12" setup_uv + NODE_VERSION="24" setup_nodejs + PYTHON_VERSION="3.14" setup_uv if check_for_gh_release "shelfmark" "calibrain/shelfmark"; then msg_info "Stopping Service(s)" @@ -59,6 +59,7 @@ function update_script() { RELEASE_VERSION=$(cat "$HOME/.shelfmark") msg_info "Updating Shelfmark" + export VIRTUAL_ENV=/opt/shelfmark/venv sed -i "s/^RELEASE_VERSION=.*/RELEASE_VERSION=$RELEASE_VERSION/" /etc/shelfmark/.env cd /opt/shelfmark/src/frontend $STD npm ci @@ -67,9 +68,10 @@ function update_script() { cd /opt/shelfmark $STD uv venv -c ./venv $STD source ./venv/bin/activate - $STD uv pip install -r ./requirements-base.txt if [[ $(sed -n '/_BYPASS=/s/[^=]*=//p' /etc/shelfmark/.env) == "true" ]] && [[ $(sed -n '/BYPASSER=/s/[^=]*=//p' /etc/shelfmark/.env) == "false" ]]; then - $STD uv pip install -r ./requirements-shelfmark.txt + $STD uv sync --active --locked --no-default-groups --extra browser + else + $STD uv sync --active --locked --no-default-groups fi mv /opt/start.sh.bak /opt/shelfmark/start.sh msg_ok "Updated Shelfmark" diff --git a/ct/shlink.sh b/ct/shlink.sh new file mode 100644 index 000000000..e7fbef3a9 --- /dev/null +++ b/ct/shlink.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://shlink.io/ + +APP="Shlink" +var_tags="${var_tags:-url-shortener;analytics;php}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/shlink ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "shlink" "shlinkio/shlink"; then + msg_info "Stopping Service" + systemctl stop shlink + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp /opt/shlink/.env /opt/shlink.env.bak + cp -r /opt/shlink/data /opt/shlink_data_backup + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "shlink" "shlinkio/shlink" "prebuild" "latest" "/opt/shlink" "shlink*_php8.5_dist.zip" + + msg_info "Restoring Data" + cp /opt/shlink.env.bak /opt/shlink/.env + rm -f /opt/shlink.env.bak + cp -r /opt/shlink_data_backup/. /opt/shlink/data + rm -rf /opt/shlink_data_backup + msg_ok "Restored Data" + + msg_info "Updating Application" + cd /opt/shlink + $STD php ./vendor/bin/rr get --no-interaction --location bin/ + chmod +x bin/rr + set -a + source /opt/shlink/.env + set +a + $STD php vendor/bin/shlink-installer init --no-interaction --clear-db-cache --skip-download-geolite + msg_ok "Updated Application" + + msg_info "Starting Service" + systemctl start shlink + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + + if [[ -d /opt/shlink-web-client ]]; then + if check_for_gh_release "shlink-web-client" "shlinkio/shlink-web-client"; then + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "shlink-web-client" "shlinkio/shlink-web-client" "prebuild" "latest" "/opt/shlink-web-client" "shlink-web-client_*_dist.zip" + msg_ok "Updated Web Client" + fi + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access Shlink Web Client using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" +echo -e "${INFO}${YW} Shlink HTTP API:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}" diff --git a/ct/slskd.sh b/ct/slskd.sh index 89c1bf36f..39d7d5352 100644 --- a/ct/slskd.sh +++ b/ct/slskd.sh @@ -43,6 +43,10 @@ function update_script() { msg_info "Restoring config" mv /opt/slskd.yml.bak /opt/slskd/config/slskd.yml + + # Migrate 0.25.0 breaking config key renames + sed -i 's/^global:/transfers:/' /opt/slskd/config/slskd.yml + sed -i 's/^integration:/integrations:/' /opt/slskd/config/slskd.yml msg_ok "Restored config" msg_info "Starting Service(s)" diff --git a/ct/solidtime.sh b/ct/solidtime.sh new file mode 100644 index 000000000..9b8ba0de9 --- /dev/null +++ b/ct/solidtime.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://www.solidtime.io/ + +APP="SolidTime" +var_tags="${var_tags:-time-tracking;productivity;business}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-4096}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/solidtime ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "solidtime" "solidtime-io/solidtime"; then + msg_info "Stopping Services" + systemctl stop caddy + msg_ok "Stopped Services" + + msg_info "Backing up Data" + cp /opt/solidtime/.env /opt/solidtime.env.bak + cp -r /opt/solidtime/storage /opt/solidtime_storage_backup + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "solidtime" "solidtime-io/solidtime" "tarball" + + msg_info "Restoring Data" + cp /opt/solidtime.env.bak /opt/solidtime/.env + rm -f /opt/solidtime.env.bak + cp -r /opt/solidtime_storage_backup/. /opt/solidtime/storage + rm -rf /opt/solidtime_storage_backup + msg_ok "Restored Data" + + msg_info "Updating Application" + cd /opt/solidtime + $STD composer install --no-dev --optimize-autoloader + $STD npm install + $STD npm run build + $STD php artisan migrate --force + $STD php artisan optimize:clear + chown -R www-data:www-data /opt/solidtime + msg_ok "Updated Application" + + msg_info "Starting Services" + systemctl start caddy + msg_ok "Started Services" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" +echo -e "${INFO}${YW}HTTPS is not enabled by default (use domain + reverse proxy/TLS if needed).${CL}" diff --git a/ct/sonarqube.sh b/ct/sonarqube.sh index 0ac979445..ab7cd3c47 100644 --- a/ct/sonarqube.sh +++ b/ct/sonarqube.sh @@ -43,6 +43,7 @@ function update_script() { RELEASE=$(get_latest_github_release "SonarSource/sonarqube") curl -fsSL "https://binaries.sonarsource.com/Distribution/sonarqube/sonarqube-${RELEASE}.zip" -o $temp_file unzip -q "$temp_file" -d /opt + rm -f "$temp_file" mv /opt/sonarqube-${RELEASE} /opt/sonarqube echo "${RELEASE}" > ~/.sonarqube msg_ok "Updated SonarQube" diff --git a/ct/sonarr.sh b/ct/sonarr.sh index 7ee458645..a7f31f8b7 100644 --- a/ct/sonarr.sh +++ b/ct/sonarr.sh @@ -23,21 +23,24 @@ function update_script() { header_info check_container_storage check_container_resources + if [[ ! -d /var/lib/sonarr/ ]]; then msg_error "No ${APP} Installation Found!" exit fi - msg_info "Stopping Service" - systemctl stop sonarr - msg_ok "Stopped Service" + if check_for_gh_release "Sonarr" "Sonarr/Sonarr"; then + msg_info "Stopping Service" + systemctl stop sonarr + msg_ok "Stopped Service" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Sonarr" "Sonarr/Sonarr" "prebuild" "latest" "/opt/Sonarr" "Sonarr.main.*.linux-x64.tar.gz" + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Sonarr" "Sonarr/Sonarr" "prebuild" "latest" "/opt/Sonarr" "Sonarr.main.*.linux-x64.tar.gz" - msg_info "Starting Service" - systemctl start sonarr - msg_ok "Started Service" - msg_ok "Updated successfully!" + msg_info "Starting Service" + systemctl start sonarr + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi exit } diff --git a/ct/soulsync.sh b/ct/soulsync.sh new file mode 100644 index 000000000..d714a5e4b --- /dev/null +++ b/ct/soulsync.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/Nezreka/SoulSync + +APP="SoulSync" +var_tags="${var_tags:-music;automation;media}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f ~/.soulsync ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "soulsync" "Nezreka/SoulSync"; then + msg_info "Stopping Service" + systemctl stop soulsync + msg_ok "Stopped Service" + + msg_info "Backing up Data" + mv /opt/soulsync/config /opt/soulsync-config.bak + mv /opt/soulsync/data /opt/soulsync-data.bak + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "soulsync" "Nezreka/SoulSync" "tarball" + + msg_info "Updating Python Dependencies" + cd /opt/soulsync + $STD uv venv --clear /opt/soulsync/.venv --python 3.11 + $STD /opt/soulsync/.venv/bin/pip install -r requirements.txt + msg_ok "Updated Python Dependencies" + + mv /opt/soulsync-config.bak /opt/soulsync/config + mv /opt/soulsync-data.bak /opt/soulsync/data + + msg_info "Starting Service" + systemctl start soulsync + msg_ok "Started Service" + msg_ok "Updated ${APP}" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8008${CL}" diff --git a/ct/sparkyfitness.sh b/ct/sparkyfitness.sh index 5ef6c7609..394091ffd 100644 --- a/ct/sparkyfitness.sh +++ b/ct/sparkyfitness.sh @@ -51,16 +51,38 @@ function update_script() { msg_info "Updating Sparky Fitness Backend" cd /opt/sparkyfitness/SparkyFitnessServer - $STD npm install + $STD pnpm install msg_ok "Updated Sparky Fitness Backend" msg_info "Updating Sparky Fitness Frontend (Patience)" - cd /opt/sparkyfitness/SparkyFitnessFrontend + cd /opt/sparkyfitness $STD pnpm install + cd /opt/sparkyfitness/SparkyFitnessFrontend $STD pnpm run build cp -a /opt/sparkyfitness/SparkyFitnessFrontend/dist/. /var/www/sparkyfitness/ msg_ok "Updated Sparky Fitness Frontend" + msg_info "Refreshing SparkyFitness Service" + cat </etc/systemd/system/sparkyfitness-server.service + [Unit] + Description=SparkyFitness Backend Service + After=network.target postgresql.service + Requires=postgresql.service + + [Service] + Type=simple + WorkingDirectory=/opt/sparkyfitness/SparkyFitnessServer + EnvironmentFile=/etc/sparkyfitness/.env + ExecStart=/opt/sparkyfitness/SparkyFitnessServer/node_modules/.bin/tsx SparkyFitnessServer.js + Restart=always + RestartSec=5 + + [Install] + WantedBy=multi-user.target +EOF + systemctl daemon-reload + msg_ok "Refreshed SparkyFitness Service" + msg_info "Restoring data" cp -r /opt/sparkyfitness_backup/. /opt/sparkyfitness/SparkyFitnessServer/ rm -rf /opt/sparkyfitness_backup diff --git a/ct/split-pro.sh b/ct/split-pro.sh new file mode 100644 index 000000000..148eeff52 --- /dev/null +++ b/ct/split-pro.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: johanngrobe +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/oss-apps/split-pro + +APP="Split-Pro" +var_tags="${var_tags:-finance;expense-sharing}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-4096}" +var_disk="${var_disk:-6}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/split-pro ]]; then + msg_error "No Split Pro Installation Found!" + exit + fi + + if check_for_gh_release "split-pro" "oss-apps/split-pro"; then + msg_info "Stopping Service" + systemctl stop split-pro + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp /opt/split-pro/.env /opt/split-pro.env + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "split-pro" "oss-apps/split-pro" "tarball" + + msg_info "Building Application" + cd /opt/split-pro + $STD pnpm install --frozen-lockfile + $STD pnpm build + cp /opt/split-pro.env /opt/split-pro/.env + rm -f /opt/split-pro.env + ln -sf /opt/split-pro_data/uploads /opt/split-pro/uploads + $STD pnpm exec prisma migrate deploy + msg_ok "Built Application" + + msg_info "Starting Service" + systemctl start split-pro + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" diff --git a/ct/step-ca.sh b/ct/step-ca.sh new file mode 100644 index 000000000..082222282 --- /dev/null +++ b/ct/step-ca.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Joerg Heinemann (heinemannj) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/smallstep/certificates + +APP="step-ca" +var_tags="${var_tags:-certificate-authority;pki;acme-server}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-2}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -f /etc/apt/sources.list.d/smallstep.sources ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + msg_info "Updating step-ca and step-cli" + $STD apt update + $STD apt upgrade -y step-ca step-cli + + # Patch for making $STD happy (/usr/bin/step is a symlink to /usr/bin/step-cli) + STEPBIN="$(which step)" + rm -f "$STEPBIN" + cp -f "$(which step-cli)" "$STEPBIN" + + $STD systemctl restart step-ca + msg_ok "Updated step-ca and step-cli" + + if check_for_gh_release "step-badger" "lukasz-lobocki/step-badger"; then + fetch_and_deploy_gh_release "step-badger" "lukasz-lobocki/step-badger" "prebuild" "latest" "/opt/step-badger" "step-badger_Linux_x86_64.tar.gz" + msg_ok "Updated step-badger" + fi + exit +} + +start +build_container +description + +msg_ok "Completed successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}https://${IP}/provisioners${CL}" diff --git a/ct/verdaccio.sh b/ct/storybook.sh similarity index 59% rename from ct/verdaccio.sh rename to ct/storybook.sh index c8a181c62..06e89cf3d 100644 --- a/ct/verdaccio.sh +++ b/ct/storybook.sh @@ -1,12 +1,13 @@ #!/usr/bin/env bash source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: BrynnJKnight -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://verdaccio.org/ | Github: https://github.com/verdaccio/verdaccio -APP="Verdaccio" -var_tags="${var_tags:-dev-tools;npm;registry}" +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/storybookjs/storybook + +APP="Storybook" +var_tags="${var_tags:-dev-tools;frontend;ui}" var_cpu="${var_cpu:-2}" var_ram="${var_ram:-2048}" var_disk="${var_disk:-8}" @@ -23,19 +24,23 @@ function update_script() { header_info check_container_storage check_container_resources - if [[ ! -f /etc/systemd/system/verdaccio.service ]]; then + + if [[ ! -f /opt/storybook/.projectpath ]]; then msg_error "No ${APP} Installation Found!" exit fi - msg_info "Updating LXC Container" - $STD apt update - $STD apt upgrade -y - msg_ok "Updated LXC Container" + PROJECT_PATH=$(cat /opt/storybook/.projectpath) - NODE_VERSION="24" NODE_MODULE="verdaccio" setup_nodejs - systemctl restart verdaccio - msg_ok "Updated successfully!" + if [[ ! -d "$PROJECT_PATH" ]]; then + msg_error "Project directory not found: $PROJECT_PATH" + exit + fi + + msg_info "Updating Storybook" + cd "$PROJECT_PATH" + $STD npx storybook@latest upgrade --yes + msg_ok "Updated Storybook" exit } @@ -43,7 +48,7 @@ start build_container description -msg_ok "Completed successfully!\n" +msg_ok "Completed Successfully!\n" echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:4873${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:6006${CL}" diff --git a/ct/storyteller.sh b/ct/storyteller.sh new file mode 100644 index 000000000..5f6d6c189 --- /dev/null +++ b/ct/storyteller.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://gitlab.com/storyteller-platform/storyteller + +APP="Storyteller" +var_tags="${var_tags:-media;ebook;audiobook}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-10240}" +var_disk="${var_disk:-20}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/storyteller ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gl_release "storyteller" "storyteller-platform/storyteller"; then + msg_info "Stopping Service" + systemctl stop storyteller + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp /opt/storyteller/.env /opt/storyteller_env.bak + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gl_release "storyteller" "storyteller-platform/storyteller" "tarball" "latest" "/opt/storyteller" + + msg_info "Restoring Configuration" + mv /opt/storyteller_env.bak /opt/storyteller/.env + msg_ok "Restored Configuration" + + msg_info "Rebuilding Storyteller" + cd /opt/storyteller + export NODE_OPTIONS="--max-old-space-size=4096" + $STD yarn install --network-timeout 600000 + $STD gcc -g -fPIC -rdynamic -shared web/sqlite/uuid.c -o web/sqlite/uuid.c.so + export CI=1 + export NODE_ENV=production + export NEXT_TELEMETRY_DISABLED=1 + export SQLITE_NATIVE_BINDING=/opt/storyteller/node_modules/better-sqlite3/build/Release/better_sqlite3.node + $STD yarn workspaces foreach -Rpt --from @storyteller-platform/web --exclude @storyteller-platform/eslint run build + mkdir -p /opt/storyteller/web/.next/standalone/web/.next/static + cp -rT /opt/storyteller/web/.next/static /opt/storyteller/web/.next/standalone/web/.next/static + if [[ -d /opt/storyteller/web/public ]]; then + mkdir -p /opt/storyteller/web/.next/standalone/web/public + cp -rT /opt/storyteller/web/public /opt/storyteller/web/.next/standalone/web/public + fi + mkdir -p /opt/storyteller/web/.next/standalone/web/migrations + cp -rT /opt/storyteller/web/migrations /opt/storyteller/web/.next/standalone/web/migrations + mkdir -p /opt/storyteller/web/.next/standalone/web/sqlite + cp -rT /opt/storyteller/web/sqlite /opt/storyteller/web/.next/standalone/web/sqlite + ln -sf /opt/storyteller/.env /opt/storyteller/web/.next/standalone/web/.env + msg_ok "Rebuilt Storyteller" + + msg_info "Starting Service" + systemctl start storyteller + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8001${CL}" diff --git a/ct/tandoor.sh b/ct/tandoor.sh index 4a1ce1739..451ed639e 100644 --- a/ct/tandoor.sh +++ b/ct/tandoor.sh @@ -33,6 +33,10 @@ function update_script() { exit fi + if ! grep -q "^ALLOWED_HOSTS=" /opt/tandoor/.env; then + echo "ALLOWED_HOSTS=${LOCAL_IP}" >>/opt/tandoor/.env + fi + if check_for_gh_release "tandoor" "TandoorRecipes/recipes"; then msg_info "Stopping Service" systemctl stop tandoor diff --git a/ct/tautulli.sh b/ct/tautulli.sh index 42add27fe..690320d7c 100644 --- a/ct/tautulli.sh +++ b/ct/tautulli.sh @@ -51,6 +51,7 @@ function update_script() { $STD source /opt/Tautulli/.venv/bin/activate $STD uv pip install -r requirements.txt $STD uv pip install pyopenssl + $STD uv pip install "setuptools<81" msg_ok "Updated Tautulli" msg_info "Restoring config and database" diff --git a/ct/tdarr.sh b/ct/tdarr.sh index fc983befb..d8aaedaf0 100644 --- a/ct/tdarr.sh +++ b/ct/tdarr.sh @@ -33,12 +33,16 @@ function update_script() { $STD apt upgrade -y rm -rf /opt/tdarr/Tdarr_Updater cd /opt/tdarr - RELEASE=$(curl -fsSL https://f000.backblazeb2.com/file/tdarrs/versions.json | grep -oP '(?<="Tdarr_Updater": ")[^"]+' | grep linux_x64 | head -n 1) - curl -fsSL "$RELEASE" -o Tdarr_Updater.zip + RELEASE=$(curl_with_retry "https://f000.backblazeb2.com/file/tdarrs/versions.json" "-" | grep -oP '(?<="Tdarr_Updater": ")[^"]+' | grep linux_x64 | head -n 1) + curl_with_retry "$RELEASE" "Tdarr_Updater.zip" $STD unzip Tdarr_Updater.zip chmod +x Tdarr_Updater $STD ./Tdarr_Updater rm -rf /opt/tdarr/Tdarr_Updater.zip + [[ -f /opt/tdarr/Tdarr_Server/Tdarr_Server ]] || { + msg_error "Tdarr_Updater failed — tdarr.io may be blocked by local DNS" + exit 250 + } msg_ok "Updated Tdarr" msg_ok "Updated successfully!" exit diff --git a/ct/teable.sh b/ct/teable.sh new file mode 100644 index 000000000..0dcd93b69 --- /dev/null +++ b/ct/teable.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/teableio/teable + +APP="Teable" +var_tags="${var_tags:-database;no-code;spreadsheet}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-10240}" +var_disk="${var_disk:-25}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/teable ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "teable" "teableio/teable"; then + msg_info "Stopping Service" + systemctl stop teable + msg_ok "Stopped Service" + + msg_info "Backing up Configuration" + cp /opt/teable/.env /opt/teable.env.bak + msg_ok "Backed up Configuration" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "teable" "teableio/teable" "tarball" + + msg_info "Restoring Configuration" + mv /opt/teable.env.bak /opt/teable/.env + msg_ok "Restored Configuration" + + msg_info "Rebuilding Teable" + cd /opt/teable + TEABLE_VERSION=$(cat ~/.teable) + echo "NEXT_PUBLIC_BUILD_VERSION=\"${TEABLE_VERSION}\"" >>apps/nextjs-app/.env + export HUSKY=0 + export NODE_OPTIONS="--max-old-space-size=8192" + $STD pnpm install --frozen-lockfile + $STD pnpm -F @teable/db-main-prisma prisma-generate --schema ./prisma/postgres/schema.prisma + NODE_ENV=production NEXT_BUILD_ENV_TYPECHECK=false \ + $STD pnpm -r --filter '!playground' run build + msg_ok "Rebuilt Teable" + + msg_info "Running Database Migrations" + source /opt/teable/.env + $STD pnpm -F @teable/db-main-prisma prisma-migrate deploy --schema ./prisma/postgres/schema.prisma + msg_ok "Ran Database Migrations" + + msg_info "Starting Service" + systemctl start teable + msg_ok "Started Service" + msg_ok "Updated successfully!" + else + msg_ok "No update available." + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" diff --git a/ct/technitiumdns.sh b/ct/technitiumdns.sh index 21a13d306..6375c8462 100644 --- a/ct/technitiumdns.sh +++ b/ct/technitiumdns.sh @@ -32,8 +32,8 @@ function update_script() { systemctl daemon-reload systemctl enable -q --now technitium fi - if is_package_installed "aspnetcore-runtime-8.0"; then - $STD apt remove -y aspnetcore-runtime-8.0 + if ! is_package_installed "aspnetcore-runtime-10.0"; then + $STD apt remove -y aspnetcore-runtime-8.0 aspnetcore-runtime-9.0 2>/dev/null || true [ -f /etc/apt/sources.list.d/microsoft-prod.list ] && rm -f /etc/apt/sources.list.d/microsoft-prod.list [ -f /usr/share/keyrings/microsoft-prod.gpg ] && rm -f /usr/share/keyrings/microsoft-prod.gpg setup_deb822_repo \ @@ -42,18 +42,15 @@ function update_script() { "https://packages.microsoft.com/debian/13/prod/" \ "trixie" \ "main" - $STD apt install -y aspnetcore-runtime-9.0 + $STD apt install -y aspnetcore-runtime-10.0 fi RELEASE=$(curl -fsSL https://technitium.com/dns/ | grep -oP 'Version \K[\d.]+') - if [[ ! -f ~/.technitium || ${RELEASE} != "$(cat ~/.technitium)" ]]; then - msg_info "Updating Technitium DNS" - curl -fsSL "https://download.technitium.com/dns/DnsServerPortable.tar.gz" -o /opt/DnsServerPortable.tar.gz - $STD tar zxvf /opt/DnsServerPortable.tar.gz -C /opt/technitium/dns/ - rm -f /opt/DnsServerPortable.tar.gz + if [[ ! -f ~/.technitium || ${RELEASE} != "$(cat ~/.technitium 2>/dev/null)" ]]; then + systemctl stop technitium + fetch_and_deploy_from_url "https://download.technitium.com/dns/DnsServerPortable.tar.gz" /opt/technitium/dns echo "${RELEASE}" >~/.technitium - systemctl restart technitium - msg_ok "Updated Technitium DNS" + systemctl start technitium msg_ok "Updated successfully!" else msg_ok "No update required. Technitium DNS is already at v${RELEASE}." diff --git a/ct/daemonsync.sh b/ct/teleport.sh similarity index 68% rename from ct/daemonsync.sh rename to ct/teleport.sh index 23344e210..fe5e1ebbb 100644 --- a/ct/daemonsync.sh +++ b/ct/teleport.sh @@ -1,15 +1,16 @@ #!/usr/bin/env bash source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) -# Copyright (c) 2021-2026 tteck -# Author: tteck (tteckster) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://daemonsync.me/ -APP="Daemon Sync" -var_tags="${var_tags:-sync}" +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Slaviša Arežina (tremor021) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://goteleport.com/ + +APP="Teleport" +var_tags="${var_tags:-zero-trust}" var_cpu="${var_cpu:-1}" -var_ram="${var_ram:-512}" -var_disk="${var_disk:-8}" +var_ram="${var_ram:-1024}" +var_disk="${var_disk:-4}" var_os="${var_os:-debian}" var_version="${var_version:-13}" var_unprivileged="${var_unprivileged:-1}" @@ -23,14 +24,14 @@ function update_script() { header_info check_container_storage check_container_resources - if [[ ! -d /var ]]; then + if [[ ! -f /etc/teleport.yaml ]]; then msg_error "No ${APP} Installation Found!" exit fi - msg_info "Updating LXC" + + msg_info "Updating Teleport" $STD apt update - $STD apt -y upgrade - msg_ok "Updated LXC" + $STD apt upgrade -y msg_ok "Updated successfully!" exit } @@ -42,4 +43,4 @@ description msg_ok "Completed successfully!\n" echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8084${CL}" +echo -e "${TAB}${GATEWAY}${BGN}https://${IP}:3080${CL}" diff --git a/ct/termix.sh b/ct/termix.sh index de97d2a44..ec410befb 100644 --- a/ct/termix.sh +++ b/ct/termix.sh @@ -29,17 +29,123 @@ function update_script() { exit fi + if check_for_gh_tag "guacd" "apache/guacamole-server"; then + msg_info "Stopping guacd" + systemctl stop guacd 2>/dev/null || true + msg_ok "Stopped guacd" + + ensure_dependencies \ + libcairo2-dev \ + libjpeg62-turbo-dev \ + libpng-dev \ + libtool-bin \ + uuid-dev \ + libvncserver-dev \ + freerdp3-dev \ + libssh2-1-dev \ + libtelnet-dev \ + libwebsockets-dev \ + libpulse-dev \ + libvorbis-dev \ + libwebp-dev \ + libssl-dev \ + libpango1.0-dev \ + libswscale-dev \ + libavcodec-dev \ + libavutil-dev \ + libavformat-dev + + msg_info "Updating Guacamole Server (guacd)" + fetch_and_deploy_gh_tag "guacd" "apache/guacamole-server" "${CHECK_UPDATE_RELEASE}" "/opt/guacamole-server" + cd /opt/guacamole-server + export CPPFLAGS="-Wno-error=deprecated-declarations" + $STD autoreconf -fi + $STD ./configure --with-init-dir=/etc/init.d --enable-allow-freerdp-snapshots + $STD make + $STD make install + $STD ldconfig + cd /opt + rm -rf /opt/guacamole-server + msg_ok "Updated Guacamole Server (guacd) to ${CHECK_UPDATE_RELEASE}" + + if [[ ! -f /etc/guacamole/guacd.conf ]]; then + mkdir -p /etc/guacamole + cat </etc/guacamole/guacd.conf +[server] +bind_host = 127.0.0.1 +bind_port = 4822 +EOF + fi + + if [[ ! -f /etc/systemd/system/guacd.service ]] || grep -q "Type=forking" /etc/systemd/system/guacd.service 2>/dev/null; then + cat </etc/systemd/system/guacd.service +[Unit] +Description=Guacamole Proxy Daemon (guacd) +After=network.target + +[Service] +Type=simple +ExecStart=/usr/local/sbin/guacd -f -b 127.0.0.1 -l 4822 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF + fi + + if ! grep -q "guacd.service" /etc/systemd/system/termix.service 2>/dev/null; then + sed -i '/^After=network.target/s/$/ guacd.service/' /etc/systemd/system/termix.service + sed -i '/^\[Unit\]/a Wants=guacd.service' /etc/systemd/system/termix.service + fi + + systemctl daemon-reload + systemctl enable -q --now guacd + fi + if check_for_gh_release "termix" "Termix-SSH/Termix"; then - msg_info "Stopping Service" + msg_info "Stopping Termix" systemctl stop termix - msg_ok "Stopped Service" + msg_ok "Stopped Termix" + + msg_info "Migrating Configuration" + if [[ ! -f /opt/termix/.env ]]; then + cat </opt/termix/.env +NODE_ENV=production +DATA_DIR=/opt/termix/data +GUACD_HOST=127.0.0.1 +GUACD_PORT=4822 +EOF + fi + if ! grep -q "EnvironmentFile" /etc/systemd/system/termix.service 2>/dev/null; then + cat </etc/systemd/system/termix.service +[Unit] +Description=Termix Backend +After=network.target guacd.service +Wants=guacd.service + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/termix +EnvironmentFile=/opt/termix/.env +ExecStart=/usr/bin/node /opt/termix/dist/backend/backend/starter.js +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF + systemctl daemon-reload + fi + msg_ok "Migrated Configuration" msg_info "Backing up Data" cp -r /opt/termix/data /opt/termix_data_backup cp -r /opt/termix/uploads /opt/termix_uploads_backup msg_ok "Backed up Data" - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "termix" "Termix-SSH/Termix" + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "termix" "Termix-SSH/Termix" "tarball" msg_info "Recreating Directories" mkdir -p /opt/termix/html \ @@ -49,6 +155,8 @@ function update_script() { /opt/termix/nginx/client_body msg_ok "Recreated Directories" + NODE_VERSION="24" setup_nodejs + msg_info "Building Frontend" cd /opt/termix export COREPACK_ENABLE_DOWNLOAD_PROMPT=0 @@ -91,20 +199,22 @@ function update_script() { cp /etc/nginx/nginx.conf /etc/nginx/nginx.conf.bak curl -fsSL "https://raw.githubusercontent.com/Termix-SSH/Termix/main/docker/nginx.conf" -o /etc/nginx/nginx.conf sed -i '/^master_process/d' /etc/nginx/nginx.conf - sed -i '/^pid \/app\/nginx/d' /etc/nginx/nginx.conf + sed -i 's|pid /tmp/nginx/nginx.pid;|pid /run/nginx.pid;|' /etc/nginx/nginx.conf + sed -i 's|error_log /tmp/nginx/error.log|error_log /var/log/nginx/error.log|' /etc/nginx/nginx.conf + sed -i 's|access_log /tmp/nginx/access.log|access_log /var/log/nginx/access.log|' /etc/nginx/nginx.conf sed -i 's|/app/html|/opt/termix/html|g' /etc/nginx/nginx.conf sed -i 's|/app/nginx|/opt/termix/nginx|g' /etc/nginx/nginx.conf sed -i 's|listen ${PORT};|listen 80;|g' /etc/nginx/nginx.conf - + nginx -t && systemctl reload nginx msg_ok "Updated Nginx Configuration" else msg_warn "Nginx configuration not updated. If Termix doesn't work, restore from backup or update manually." fi - msg_info "Starting Service" + msg_info "Starting Termix" systemctl start termix - msg_ok "Started Service" + msg_ok "Started Termix" msg_ok "Updated successfully!" fi exit diff --git a/ct/threadfin.sh b/ct/threadfin.sh index 49334dcdf..38d798512 100644 --- a/ct/threadfin.sh +++ b/ct/threadfin.sh @@ -29,12 +29,13 @@ function update_script() { exit fi - if check_for_gh_release "threadfin" "threadfin/threadfin"; then + if check_for_gh_release "threadfin-app" "threadfin/threadfin"; then msg_info "Stopping Service" systemctl stop threadfin msg_ok "Stopped Service" - fetch_and_deploy_gh_release "threadfin" "threadfin/threadfin" "singlefile" "latest" "/opt/threadfin" "Threadfin_linux_amd64" + fetch_and_deploy_gh_release "threadfin-app" "threadfin/threadfin" "singlefile" "latest" "/opt/threadfin" "Threadfin_linux_amd64" + mv /opt/threadfin/threadfin-app /opt/threadfin/threadfin msg_info "Starting Service" systemctl start threadfin diff --git a/ct/tracearr.sh b/ct/tracearr.sh index e79736932..5a1596fcb 100644 --- a/ct/tracearr.sh +++ b/ct/tracearr.sh @@ -8,7 +8,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV APP="Tracearr" var_tags="${var_tags:-media}" var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-2048}" +var_ram="${var_ram:-8192}" var_disk="${var_disk:-10}" var_os="${var_os:-debian}" var_version="${var_version:-13}" @@ -102,7 +102,7 @@ EOF if check_for_gh_release "tracearr" "connorgallopo/Tracearr"; then msg_info "Stopping Services" - systemctl stop tracearr postgresql redis + systemctl stop tracearr postgresql redis-server msg_ok "Stopped Services" msg_info "Updating pnpm" @@ -115,6 +115,7 @@ EOF msg_info "Building Tracearr" export TZ=$(cat /etc/timezone) + export NODE_OPTIONS="--max-old-space-size=4096" cd /opt/tracearr.build $STD pnpm install --frozen-lockfile --force $STD pnpm turbo telemetry disable @@ -140,7 +141,7 @@ EOF msg_ok "Built Tracearr" msg_info "Configuring Tracearr" - sed -i "s/^APP_VERSION=.*/APP_VERSION=$(cat /root/.tracearr)/" /data/tracearr/.env + sed -i "s|^APP_VERSION=.*|APP_VERSION=${CHECK_UPDATE_RELEASE#v}|" /data/tracearr/.env chmod 600 /data/tracearr/.env chown -R tracearr:tracearr /data/tracearr mkdir -p /data/backup @@ -148,7 +149,7 @@ EOF msg_ok "Configured Tracearr" msg_info "Starting services" - systemctl start postgresql redis tracearr + systemctl start postgresql redis-server tracearr msg_ok "Started services" msg_ok "Updated successfully!" else diff --git a/ct/transmute.sh b/ct/transmute.sh new file mode 100644 index 000000000..9d80279a7 --- /dev/null +++ b/ct/transmute.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/transmute-app/transmute + +APP="Transmute" +var_tags="${var_tags:-files;converter}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-4096}" +var_disk="${var_disk:-16}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/transmute ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + fetch_and_deploy_gh_release "calibre" "kovidgoyal/calibre" "prebuild" "latest" "/opt/calibre" "calibre-*-x86_64.txz" + ln -sf /opt/calibre/ebook-convert /usr/bin/ebook-convert + fetch_and_deploy_gh_release "drawio" "jgraph/drawio-desktop" "binary" "latest" "" "drawio-amd64-*.deb" + fetch_and_deploy_gh_release "pandoc" "jgm/pandoc" "binary" "latest" "" "pandoc-*-amd64.deb" + + if check_for_gh_release "transmute" "transmute-app/transmute"; then + msg_info "Stopping Service" + systemctl stop transmute + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp /opt/transmute/backend/.env /opt/transmute.env.bak + cp -r /opt/transmute/data /opt/transmute_data_bak + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "transmute" "transmute-app/transmute" "tarball" + + msg_info "Updating Python Dependencies" + cd /opt/transmute + $STD uv venv --clear /opt/transmute/.venv + $STD uv pip install --python /opt/transmute/.venv/bin/python -r requirements.txt + msg_ok "Updated Python Dependencies" + + msg_info "Rebuilding Frontend" + cd /opt/transmute/frontend + $STD npm ci + $STD npm run build + msg_ok "Rebuilt Frontend" + + msg_info "Restoring Data" + cp /opt/transmute.env.bak /opt/transmute/backend/.env + cp -r /opt/transmute_data_bak/. /opt/transmute/data/ + rm -f /opt/transmute.env.bak + rm -rf /opt/transmute_data_bak + msg_ok "Restored Data" + + msg_info "Starting Service" + systemctl start transmute + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3313${CL}" diff --git a/ct/trek.sh b/ct/trek.sh new file mode 100644 index 000000000..0e3f679a9 --- /dev/null +++ b/ct/trek.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/mauriceboe/TREK + +APP="TREK" +var_tags="${var_tags:-travel;planning;collaboration}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/trek ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "trek" "mauriceboe/TREK"; then + msg_info "Stopping Service" + systemctl stop trek + msg_ok "Stopped Service" + + msg_info "Backing up Data" + cp /opt/trek/server/.env /opt/trek.env.bak + mv /opt/trek/data /opt/trek-data.bak + mv /opt/trek/uploads /opt/trek-uploads.bak + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "trek" "mauriceboe/TREK" "tarball" + + msg_info "Building Client" + cd /opt/trek/client + $STD npm ci + $STD npm run build + mkdir -p /opt/trek/server/public + cp -r /opt/trek/client/dist/* /opt/trek/server/public/ + cp -r /opt/trek/client/public/fonts /opt/trek/server/public/fonts 2>/dev/null || true + msg_ok "Built Client" + + msg_info "Installing Server Dependencies" + cd /opt/trek/server + $STD npm ci + msg_ok "Installed Server Dependencies" + + msg_info "Restoring Data" + mv /opt/trek-data.bak /opt/trek/data + mv /opt/trek-uploads.bak /opt/trek/uploads + rm -rf /opt/trek/server/data /opt/trek/server/uploads + ln -s /opt/trek/data /opt/trek/server/data + ln -s /opt/trek/uploads /opt/trek/server/uploads + cp /opt/trek.env.bak /opt/trek/server/.env + rm -f /opt/trek.env.bak + msg_ok "Restored Data" + + msg_info "Starting Service" + systemctl start trek + msg_ok "Started Service" + msg_ok "Updated Successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" diff --git a/ct/tubearchivist.sh b/ct/tubearchivist.sh new file mode 100644 index 000000000..d7e6f316f --- /dev/null +++ b/ct/tubearchivist.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/tubearchivist/tubearchivist + +APP="Tube Archivist" +var_tags="${var_tags:-media;youtube;archiving}" +var_cpu="${var_cpu:-4}" +var_ram="${var_ram:-6144}" +var_disk="${var_disk:-30}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/tubearchivist ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "tubearchivist" "tubearchivist/tubearchivist"; then + msg_info "Stopping Services" + systemctl stop tubearchivist tubearchivist-celery tubearchivist-beat + msg_ok "Stopped Services" + + msg_info "Backing up Data" + cp /opt/tubearchivist/.env /opt/tubearchivist_env.bak + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "tubearchivist" "tubearchivist/tubearchivist" "tarball" + + msg_info "Rebuilding Tube Archivist" + cd /opt/tubearchivist/frontend + $STD npm install + $STD npm run build:deploy + mkdir -p /opt/tubearchivist/backend/static + cp -r /opt/tubearchivist/frontend/dist/* /opt/tubearchivist/backend/static/ + cp /opt/tubearchivist/docker_assets/backend_start.py /opt/tubearchivist/backend/ + $STD uv pip install --python /opt/tubearchivist/.venv/bin/python -r /opt/tubearchivist/backend/requirements.txt + if [[ -f /opt/tubearchivist/backend/requirements.plugins.txt ]]; then + mkdir -p /opt/yt_plugins/bgutil + $STD uv pip install --python /opt/tubearchivist/.venv/bin/python --target /opt/yt_plugins/bgutil -r /opt/tubearchivist/backend/requirements.plugins.txt + fi + msg_ok "Rebuilt Tube Archivist" + + msg_info "Restoring Configuration" + mv /opt/tubearchivist_env.bak /opt/tubearchivist/.env + sed -i 's|^TA_APP_DIR=/opt/tubearchivist$|TA_APP_DIR=/opt/tubearchivist/backend|' /opt/tubearchivist/.env + sed -i 's|^TA_CACHE_DIR=/opt/tubearchivist/cache$|TA_CACHE_DIR=/cache|' /opt/tubearchivist/.env + sed -i 's|^TA_MEDIA_DIR=/opt/tubearchivist/media$|TA_MEDIA_DIR=/youtube|' /opt/tubearchivist/.env + ln -sf /opt/tubearchivist/cache /cache + ln -sf /opt/tubearchivist/media /youtube + ln -sf /opt/tubearchivist/.env /opt/tubearchivist/backend/.env + msg_ok "Restored Configuration" + + msg_info "Starting Services" + systemctl start tubearchivist tubearchivist-celery tubearchivist-beat + systemctl reload nginx + msg_ok "Started Services" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}" diff --git a/ct/twingate-connector.sh b/ct/twingate-connector.sh index 1c662686b..0f97f1877 100644 --- a/ct/twingate-connector.sh +++ b/ct/twingate-connector.sh @@ -29,8 +29,9 @@ function update_script() { exit fi - msg_info "Updating ${APP}" - ensure_dependencies twingate-connector + msg_info "Updating Twingate Connector" + $STD apt update + $STD apt install -y --only-upgrade twingate-connector $STD systemctl restart twingate-connector msg_ok "Updated successfully!" exit diff --git a/ct/uhf.sh b/ct/uhf.sh index 2b19fb5d1..7e55ac5e8 100644 --- a/ct/uhf.sh +++ b/ct/uhf.sh @@ -38,8 +38,14 @@ function update_script() { $STD apt -y upgrade msg_ok "Updated LXC" + msg_info "Updating UHF Server" + if dpkg -l ffmpeg 2>&1 | grep -q "ii"; then + apt remove ffmpeg -y && apt autoremove -y + fi + setup_ffmpeg fetch_and_deploy_gh_release "comskip" "swapplications/comskip" "prebuild" "latest" "/opt/comskip" "comskip-x64-*.zip" fetch_and_deploy_gh_release "uhf-server" "swapplications/uhf-server-dist" "prebuild" "latest" "/opt/uhf-server" "UHF.Server-linux-x64-*.zip" + msg_ok "Updated UHF Server" msg_info "Starting Service" systemctl start uhf-server diff --git a/ct/umami.sh b/ct/umami.sh index aefc74a36..6ac28415d 100644 --- a/ct/umami.sh +++ b/ct/umami.sh @@ -33,7 +33,9 @@ function update_script() { systemctl stop umami msg_ok "Stopped Service" - fetch_and_deploy_gh_release "umami" "umami-software/umami" "tarball" + mv /opt/umami/.env /opt/.env.bak + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "umami" "umami-software/umami" "tarball" + mv /opt/.env.bak /opt/umami/.env msg_info "Updating Umami" cd /opt/umami diff --git a/ct/versitygw.sh b/ct/versitygw.sh new file mode 100644 index 000000000..07e2411bd --- /dev/null +++ b/ct/versitygw.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/versity/versitygw + +APP="VersityGW" +var_tags="${var_tags:-s3;storage;gateway}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /usr/bin/versitygw ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "versitygw" "versity/versitygw"; then + msg_info "Stopping Service" + systemctl stop versitygw@gateway + msg_ok "Stopped Service" + + fetch_and_deploy_gh_release "versitygw" "versity/versitygw" "binary" + + msg_info "Starting Service" + systemctl start versitygw@gateway + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:7070 (Gateway) or http://${IP}:7070 (WebUI)${CL}" \ No newline at end of file diff --git a/ct/victoriametrics.sh b/ct/victoriametrics.sh index f181104f1..e3790fc21 100644 --- a/ct/victoriametrics.sh +++ b/ct/victoriametrics.sh @@ -32,26 +32,30 @@ function update_script() { msg_info "Stopping Service" systemctl stop victoriametrics [[ -f /etc/systemd/system/victoriametrics-logs.service ]] && systemctl stop victoriametrics-logs + [[ -f /etc/systemd/system/vmagent.service ]] && systemctl stop vmagent + [[ -f /etc/systemd/system/vmalert.service ]] && systemctl stop vmalert msg_ok "Stopped Service" - victoriametrics_filename=$(curl -fsSL "https://api.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases/latest" | - jq -r '.assets[].name' | - grep -E '^victoria-metrics-linux-amd64-v[0-9.]+\.tar\.gz$') - vmutils_filename=$(curl -fsSL "https://api.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases/latest" | - jq -r '.assets[].name' | - grep -E '^vmutils-linux-amd64-v[0-9.]+\.tar\.gz$') + victoriametrics_release=$(curl -fsSL "https://api.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases" | + jq -r '.[] | select(.assets[].name | match("^victoria-metrics-linux-amd64-v[0-9.]+.tar.gz$")) | .tag_name' | + head -n 1) - fetch_and_deploy_gh_release "victoriametrics" "VictoriaMetrics/VictoriaMetrics" "prebuild" "latest" "/opt/victoriametrics" "$victoriametrics_filename" - fetch_and_deploy_gh_release "vmutils" "VictoriaMetrics/VictoriaMetrics" "prebuild" "latest" "/opt/victoriametrics" "$vmutils_filename" + msg_debug "Using release $victoriametrics_release" + + victoriametrics_filename="victoria-metrics-linux-amd64-${victoriametrics_release}.tar.gz" + vmutils_filename="vmutils-linux-amd64-${victoriametrics_release}.tar.gz" + + fetch_and_deploy_gh_release "victoriametrics" "VictoriaMetrics/VictoriaMetrics" "prebuild" "$victoriametrics_release" "/opt/victoriametrics" "$victoriametrics_filename" + fetch_and_deploy_gh_release "vmutils" "VictoriaMetrics/VictoriaMetrics" "prebuild" "$victoriametrics_release" "/opt/victoriametrics" "$vmutils_filename" if [[ -f /etc/systemd/system/victoriametrics-logs.service ]]; then vmlogs_filename=$(curl -fsSL "https://api.github.com/repos/VictoriaMetrics/VictoriaLogs/releases/latest" | jq -r '.assets[].name' | - grep -E '^victoria-logs-linux-amd64-v[0-9.]+\.tar\.gz$') + grep -E '^victoria-logs-linux-amd64-v[0-9.]+\.tar\.gz$') vlutils_filename=$(curl -fsSL "https://api.github.com/repos/VictoriaMetrics/VictoriaLogs/releases/latest" | jq -r '.assets[].name' | grep -E '^vlutils-linux-amd64-v[0-9.]+\.tar\.gz$') - + fetch_and_deploy_gh_release "victorialogs" "VictoriaMetrics/VictoriaLogs" "prebuild" "latest" "/opt/victoriametrics" "$vmlogs_filename" fetch_and_deploy_gh_release "vlutils" "VictoriaMetrics/VictoriaLogs" "prebuild" "latest" "/opt/victoriametrics" "$vlutils_filename" fi @@ -60,6 +64,8 @@ function update_script() { msg_info "Starting Service" systemctl start victoriametrics [[ -f /etc/systemd/system/victoriametrics-logs.service ]] && systemctl start victoriametrics-logs + [[ -f /etc/systemd/system/vmagent.service ]] && systemctl start vmagent + [[ -f /etc/systemd/system/vmalert.service ]] && systemctl start vmalert msg_ok "Started Service" msg_ok "Updated successfully!" fi diff --git a/ct/wealthfolio.sh b/ct/wealthfolio.sh index 09652ff23..9f98cc6a0 100644 --- a/ct/wealthfolio.sh +++ b/ct/wealthfolio.sh @@ -29,6 +29,8 @@ function update_script() { exit fi + NODE_VERSION="24" NODE_MODULE="pnpm" setup_nodejs + if grep -q '^WF_CORS_ALLOW_ORIGINS=\*$' /opt/wealthfolio/.env; then sed -i "s|^WF_CORS_ALLOW_ORIGINS=\*$|WF_CORS_ALLOW_ORIGINS=http://${LOCAL_IP}:8080|" /opt/wealthfolio/.env fi diff --git a/ct/web-check.sh b/ct/web-check.sh index 16dd05cc1..008ca76ad 100644 --- a/ct/web-check.sh +++ b/ct/web-check.sh @@ -28,7 +28,7 @@ function update_script() { exit fi - if check_for_gh_release "web-check" "CrazyWolf13/web-check"; then + if check_for_gh_release "web-check" "Lissy93/web-check"; then msg_info "Stopping Service" systemctl stop web-check msg_ok "Stopped Service" @@ -38,7 +38,7 @@ function update_script() { msg_ok "Created backup" NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "web-check" "CrazyWolf13/web-check" "tarball" + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "web-check" "Lissy93/web-check" "tarball" msg_info "Restoring backup" mv /opt/.env /opt/web-check diff --git a/ct/wger.sh b/ct/wger.sh index 7417e941e..85637beb8 100644 --- a/ct/wger.sh +++ b/ct/wger.sh @@ -53,6 +53,8 @@ function update_script() { set -a && source /opt/wger/.env && set +a export DJANGO_SETTINGS_MODULE=settings.main $STD uv pip install . + $STD npm install + $STD npm run build:css:sass $STD uv run python manage.py migrate $STD uv run python manage.py collectstatic --no-input msg_ok "Updated wger" diff --git a/ct/whodb.sh b/ct/whodb.sh new file mode 100644 index 000000000..dc1770f37 --- /dev/null +++ b/ct/whodb.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://whodb.com/ + +APP="WhoDB" +var_tags="${var_tags:-database;management;gui}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-2}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /opt/whodb/whodb ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "whodb" "clidey/whodb"; then + msg_info "Stopping Service" + systemctl stop whodb + msg_ok "Stopped Service" + + fetch_and_deploy_gh_release "whodb" "clidey/whodb" "singlefile" "latest" "/opt/whodb" "whodb-*-linux-amd64" + + msg_info "Starting Service" + systemctl start whodb + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}" diff --git a/ct/wishlist.sh b/ct/wishlist.sh index 80ec6fdb4..220423e91 100644 --- a/ct/wishlist.sh +++ b/ct/wishlist.sh @@ -46,7 +46,7 @@ function update_script() { msg_info "Updating Wishlist" cd /opt/wishlist - $STD pnpm install + $STD pnpm install --frozen-lockfile $STD pnpm svelte-kit sync $STD pnpm prisma generate sed -i 's|/usr/src/app/|/opt/wishlist/|g' $(grep -rl '/usr/src/app/' /opt/wishlist) diff --git a/ct/yamtrack.sh b/ct/yamtrack.sh new file mode 100644 index 000000000..714125fea --- /dev/null +++ b/ct/yamtrack.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/FuzzyGrim/Yamtrack + +APP="Yamtrack" +var_tags="${var_tags:-media;tracker;movies;anime}" +var_cpu="${var_cpu:-2}" +var_ram="${var_ram:-2048}" +var_disk="${var_disk:-8}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -d /opt/yamtrack ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "yamtrack" "FuzzyGrim/Yamtrack"; then + msg_info "Stopping Services" + systemctl stop yamtrack yamtrack-celery + msg_ok "Stopped Services" + + msg_info "Backing up Data" + cp /opt/yamtrack/src/.env /opt/yamtrack_env.bak + msg_ok "Backed up Data" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "yamtrack" "FuzzyGrim/Yamtrack" "tarball" + + msg_info "Installing Python Dependencies" + cd /opt/yamtrack + $STD uv venv --clear .venv + $STD uv pip install --no-cache-dir -r requirements.txt + msg_ok "Installed Python Dependencies" + + msg_info "Restoring Data" + cp /opt/yamtrack_env.bak /opt/yamtrack/src/.env + rm -f /opt/yamtrack_env.bak + msg_ok "Restored Data" + + msg_info "Updating Yamtrack" + cd /opt/yamtrack/src + $STD /opt/yamtrack/.venv/bin/python manage.py migrate + $STD /opt/yamtrack/.venv/bin/python manage.py collectstatic --noinput + msg_ok "Updated Yamtrack" + + msg_info "Updating Nginx Configuration" + cp /opt/yamtrack/nginx.conf /etc/nginx/nginx.conf + sed -i 's|user abc;|user www-data;|' /etc/nginx/nginx.conf + sed -i 's|pid /tmp/nginx.pid;|pid /run/nginx.pid;|' /etc/nginx/nginx.conf + sed -i 's|/yamtrack/staticfiles/|/opt/yamtrack/src/staticfiles/|' /etc/nginx/nginx.conf + sed -i 's|error_log /dev/stderr|error_log /var/log/nginx/error.log|' /etc/nginx/nginx.conf + sed -i 's|access_log /dev/stdout|access_log /var/log/nginx/access.log|' /etc/nginx/nginx.conf + $STD systemctl reload nginx + msg_ok "Updated Nginx Configuration" + + msg_info "Starting Services" + systemctl start yamtrack yamtrack-celery + msg_ok "Started Services" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} Access it using the following URL:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8000${CL}" diff --git a/ct/yourls.sh b/ct/yourls.sh new file mode 100644 index 000000000..a723a506e --- /dev/null +++ b/ct/yourls.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://yourls.org/ + +APP="YOURLS" +var_tags="${var_tags:-url-shortener;php}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-4}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + + if [[ ! -f /opt/yourls/yourls-loader.php ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + + if check_for_gh_release "yourls" "YOURLS/YOURLS"; then + msg_info "Stopping Service" + systemctl stop nginx + msg_ok "Stopped Service" + + msg_info "Backing up Configuration" + cp -r /opt/yourls/user /opt/yourls_user.bak + msg_ok "Backed up Configuration" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "yourls" "YOURLS/YOURLS" "tarball" + chown -R www-data:www-data /opt/yourls + + msg_info "Restoring Configuration" + cp -r /opt/yourls_user.bak/. /opt/yourls/user/ + rm -rf /opt/yourls_user.bak + msg_ok "Restored Configuration" + + msg_info "Starting Service" + systemctl start nginx + msg_ok "Started Service" + msg_ok "Updated successfully!" + fi + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" +echo -e "${INFO}${YW} First, complete the database setup at:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}http://${IP}/admin/install.php${CL}" +echo -e "${INFO}${YW} Admin credentials are in the install log:${CL}" +echo -e "${TAB}${GATEWAY}${BGN}grep -A2 'admin' /opt/yourls/user/config.php${CL}" diff --git a/ct/yubal.sh b/ct/yubal.sh index ae307efe0..c36fd96b6 100644 --- a/ct/yubal.sh +++ b/ct/yubal.sh @@ -47,7 +47,7 @@ function update_script() { msg_info "Installing Python Dependencies" cd /opt/yubal - $STD uv sync --no-dev --frozen + $STD uv sync --package yubal-api --no-dev --frozen msg_ok "Installed Python Dependencies" msg_info "Starting Services" diff --git a/ct/zerobyte.sh b/ct/zerobyte.sh index 53b4b6a20..ab001130a 100644 --- a/ct/zerobyte.sh +++ b/ct/zerobyte.sh @@ -38,6 +38,7 @@ function update_script() { cp /opt/zerobyte/.env /opt/zerobyte.env.bak msg_ok "Backed up Configuration" + ensure_dependencies git NODE_VERSION="24" setup_nodejs CLEAN_INSTALL=1 fetch_and_deploy_gh_release "zerobyte" "nicotsx/zerobyte" "tarball" diff --git a/ct/zigbee2mqtt.sh b/ct/zigbee2mqtt.sh index e61e21e57..05a141dbb 100644 --- a/ct/zigbee2mqtt.sh +++ b/ct/zigbee2mqtt.sh @@ -50,7 +50,7 @@ function update_script() { rm -rf /opt/zigbee2mqtt/data mv /opt/z2m_backup/data /opt/zigbee2mqtt cd /opt/zigbee2mqtt - grep -q "^packageImportMethod" ./pnpm-workspace.yaml || echo "packageImportMethod: hardlink" >>./pnpm-workspace.yaml + grep -q "^packageImportMethod" ./pnpm-workspace.yaml 2>/dev/null || echo "packageImportMethod: hardlink" >>./pnpm-workspace.yaml $STD pnpm install --frozen-lockfile $STD pnpm build rm -rf /opt/z2m_backup diff --git a/docs/DEV_MODE.md b/docs/DEV_MODE.md deleted file mode 100644 index 5994068f1..000000000 --- a/docs/DEV_MODE.md +++ /dev/null @@ -1,532 +0,0 @@ -# Dev Mode - Debugging & Development Guide - -Development modes provide powerful debugging and testing capabilities for container creation and installation processes. - -## Quick Start - -```bash -# Single mode -export dev_mode="motd" -bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/wallabag.sh)" - -# Multiple modes (comma-separated) -export dev_mode="motd,keep,trace" -bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/wallabag.sh)" - -# Combine with verbose output -export var_verbose="yes" -export dev_mode="pause,logs" -bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/wallabag.sh)" -``` - -## Available Modes - -### 1. **motd** - Early SSH/MOTD Setup - -Sets up SSH access and MOTD **before** the main application installation. - -**Use Case**: - -- Quick access to container for manual debugging -- Continue installation manually if something goes wrong -- Verify container networking before main install - -**Behavior**: - -``` -✔ Container created -✔ Network configured -[DEV] Setting up MOTD and SSH before installation -✔ [DEV] MOTD/SSH ready - container accessible -# Container is now accessible via SSH while installation proceeds -``` - -**Combined with**: `keep`, `breakpoint`, `logs` - ---- - -### 2. **keep** - Preserve Container on Failure - -Never delete the container when installation fails. Skips cleanup prompt. - -**Use Case**: - -- Repeated tests of the same installation -- Debugging failed installations -- Manual fix attempts - -**Behavior**: - -``` -✖ Installation failed in container 107 (exit code: 1) -✔ Container creation log: /tmp/create-lxc-107-abc12345.log -✔ Installation log: /tmp/install-lxc-107-abc12345.log - -🔧 [DEV] Keep mode active - container 107 preserved -root@proxmox:~# -``` - -**Container remains**: `pct enter 107` to access and debug - -**Combined with**: `motd`, `trace`, `logs` - ---- - -### 3. **trace** - Bash Command Tracing - -Enables `set -x` for complete command-line tracing. Shows every command before execution. - -**Use Case**: - -- Deep debugging of installation logic -- Understanding script flow -- Identifying where errors occur exactly - -**Behavior**: - -``` -+(/opt/wallabag/bin/console): /opt/wallabag/bin/console cache:warmup -+(/opt/wallabag/bin/console): env APP_ENV=prod /opt/wallabag/bin/console cache:warmup -+(/opt/wallabag/bin/console): [[ -d /opt/wallabag/app/cache ]] -+(/opt/wallabag/bin/console): rm -rf /opt/wallabag/app/cache/* -``` - -**⚠️ Warning**: Exposes passwords and secrets in log output! Only use in isolated environments. - -**Log Output**: All trace output saved to logs (see `logs` mode) - -**Combined with**: `keep`, `pause`, `logs` - ---- - -### 4. **pause** - Step-by-Step Execution - -Pauses after each major step (`msg_info`). Requires manual Enter press to continue. - -**Use Case**: - -- Inspect container state between steps -- Understand what each step does -- Identify which step causes problems - -**Behavior**: - -``` -⏳ Setting up Container OS -[PAUSE] Press Enter to continue... -⏳ Updating Container OS -[PAUSE] Press Enter to continue... -⏳ Installing Dependencies -[PAUSE] Press Enter to continue... -``` - -**Between pauses**: You can open another terminal and inspect the container - -```bash -# In another terminal while paused -pct enter 107 -root@container:~# df -h # Check disk usage -root@container:~# ps aux # Check running processes -``` - -**Combined with**: `motd`, `keep`, `logs` - ---- - -### 5. **breakpoint** - Interactive Shell on Error - -Opens interactive shell inside the container when an error occurs instead of cleanup prompt. - -**Use Case**: - -- Live debugging in the actual container -- Manual command testing -- Inspect container state at point of failure - -**Behavior**: - -``` -✖ Installation failed in container 107 (exit code: 1) -✔ Container creation log: /tmp/create-lxc-107-abc12345.log -✔ Installation log: /tmp/install-lxc-107-abc12345.log - -🐛 [DEV] Breakpoint mode - opening shell in container 107 -Type 'exit' to return to host -root@wallabag:~# - -# Now you can debug: -root@wallabag:~# tail -f /root/.install-abc12345.log -root@wallabag:~# mysql -u root -p$PASSWORD wallabag -root@wallabag:~# apt-get install -y strace -root@wallabag:~# exit - -Container 107 still running. Remove now? (y/N): n -🔧 Container 107 kept for debugging -``` - -**Combined with**: `keep`, `logs`, `trace` - ---- - -### 6. **logs** - Persistent Logging - -Saves all logs to `/var/log/community-scripts/` with timestamps. Logs persist even on successful installation. - -**Use Case**: - -- Post-mortem analysis -- Performance analysis -- Automated testing with log collection -- CI/CD integration - -**Behavior**: - -``` -Logs location: /var/log/community-scripts/ - -create-lxc-abc12345-20251117_143022.log (host-side creation) -install-abc12345-20251117_143022.log (container-side installation) -``` - -**Access logs**: - -```bash -# View creation log -tail -f /var/log/community-scripts/create-lxc-*.log - -# Search for errors -grep ERROR /var/log/community-scripts/*.log - -# Analyze performance -grep "msg_info\|msg_ok" /var/log/community-scripts/create-*.log -``` - -**With trace mode**: Creates detailed trace of all commands - -```bash -grep "^+" /var/log/community-scripts/install-*.log -``` - -**Combined with**: All other modes (recommended for CI/CD) - ---- - -### 7. **dryrun** - Simulation Mode - -Shows all commands that would be executed without actually running them. - -**Use Case**: - -- Test script logic without making changes -- Verify command syntax -- Understand what will happen -- Pre-flight checks - -**Behavior**: - -``` -[DRYRUN] apt-get update -[DRYRUN] apt-get install -y curl -[DRYRUN] mkdir -p /opt/wallabag -[DRYRUN] cd /opt/wallabag -[DRYRUN] git clone https://github.com/wallabag/wallabag.git . -``` - -**No actual changes made**: Container/system remains unchanged - -**Combined with**: `trace` (shows dryrun trace), `logs` (shows what would run) - ---- - -## Mode Combinations - -### Development Workflow - -```bash -# First test: See what would happen -export dev_mode="dryrun,logs" -bash -c "$(curl ...)" - -# Then test with tracing and pauses -export dev_mode="pause,trace,logs" -bash -c "$(curl ...)" - -# Finally full debug with early SSH access -export dev_mode="motd,keep,breakpoint,logs" -bash -c "$(curl ...)" -``` - -### CI/CD Integration - -```bash -# Automated testing with full logging -export dev_mode="logs" -export var_verbose="yes" -bash -c "$(curl ...)" - -# Capture logs for analysis -tar czf installation-logs-$(date +%s).tar.gz /var/log/community-scripts/ -``` - -### Production-like Testing - -```bash -# Keep containers for manual verification -export dev_mode="keep,logs" -for i in {1..5}; do - bash -c "$(curl ...)" -done - -# Inspect all created containers -pct list -pct enter 100 -``` - -### Live Debugging - -```bash -# SSH in early, step through installation, debug on error -export dev_mode="motd,pause,breakpoint,keep" -bash -c "$(curl ...)" -``` - ---- - -## Environment Variables Reference - -### Dev Mode Variables - -- `dev_mode` (string): Comma-separated list of modes - - Format: `"motd,keep,trace"` - - Default: Empty (no dev modes) - -### Output Control - -- `var_verbose="yes"`: Show all command output (disables silent mode) - - Pairs well with: `trace`, `pause`, `logs` - -### Examples with vars - -```bash -# Maximum verbosity and debugging -export var_verbose="yes" -export dev_mode="motd,trace,pause,logs" -bash -c "$(curl ...)" - -# Silent debug (logs only) -export dev_mode="keep,logs" -bash -c "$(curl ...)" - -# Interactive debugging -export var_verbose="yes" -export dev_mode="motd,breakpoint" -bash -c "$(curl ...)" -``` - ---- - -## Troubleshooting with Dev Mode - -### "Installation failed at step X" - -```bash -export dev_mode="pause,logs" -# Step through until the failure point -# Check container state between pauses -pct enter 107 -``` - -### "Password/credentials not working" - -```bash -export dev_mode="motd,keep,trace" -# With trace mode, see exact password handling (be careful with logs!) -# Use motd to SSH in and test manually -ssh root@container-ip -``` - -### "Permission denied errors" - -```bash -export dev_mode="breakpoint,keep" -# Get shell at failure point -# Check file permissions, user context, SELinux status -ls -la /path/to/file -whoami -``` - -### "Networking issues" - -```bash -export dev_mode="motd" -# SSH in with motd mode before main install -ssh root@container-ip -ping 8.8.8.8 -nslookup example.com -``` - -### "Need to manually complete installation" - -```bash -export dev_mode="motd,keep" -# Container accessible via SSH while installation runs -# After failure, SSH in and manually continue -ssh root@container-ip -# ... manual commands ... -exit -# Then use 'keep' mode to preserve container for inspection -``` - ---- - -## Log Files Locations - -### Default (without `logs` mode) - -- Host creation: `/tmp/create-lxc-.log` -- Container install: Copied to `/tmp/install-lxc--.log` on failure - -### With `logs` mode - -- Host creation: `/var/log/community-scripts/create-lxc--.log` -- Container install: `/var/log/community-scripts/install--.log` - -### View logs - -```bash -# Tail in real-time -tail -f /var/log/community-scripts/*.log - -# Search for errors -grep -r "exit code [1-9]" /var/log/community-scripts/ - -# Filter by session -grep "ed563b19" /var/log/community-scripts/*.log -``` - ---- - -## Best Practices - -### ✅ DO - -- Use `logs` mode for CI/CD and automated testing -- Use `motd` for early SSH access during long installations -- Use `pause` when learning the installation flow -- Use `trace` when debugging logic issues (watch for secrets!) -- Combine modes for comprehensive debugging -- Archive logs after successful tests - -### ❌ DON'T - -- Use `trace` in production or with untrusted networks (exposes secrets) -- Leave `keep` mode enabled for unattended scripts (containers accumulate) -- Use `dryrun` and expect actual changes -- Commit `dev_mode` exports to production deployment scripts -- Use `breakpoint` in non-interactive environments (will hang) - ---- - -## Examples - -### Example 1: Debug a Failed Installation - -```bash -# Initial test to see the failure -export dev_mode="keep,logs" -bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/wallabag.sh)" - -# Container 107 kept, check logs -tail /var/log/community-scripts/install-*.log - -# SSH in to debug -pct enter 107 -root@wallabag:~# cat /root/.install-*.log | tail -100 -root@wallabag:~# apt-get update # Retry the failing command -root@wallabag:~# exit - -# Re-run with manual step-through -export dev_mode="motd,pause,keep" -bash -c "$(curl ...)" -``` - -### Example 2: Verify Installation Steps - -```bash -export dev_mode="pause,logs" -export var_verbose="yes" -bash -c "$(curl ...)" - -# Press Enter through each step -# Monitor container in another terminal -# pct enter 107 -# Review logs in real-time -``` - -### Example 3: CI/CD Pipeline Integration - -```bash -#!/bin/bash -export dev_mode="logs" -export var_verbose="no" - -for app in wallabag nextcloud wordpress; do - echo "Testing $app installation..." - APP="$app" bash -c "$(curl ...)" || { - echo "FAILED: $app" - tar czf logs-$app.tar.gz /var/log/community-scripts/ - exit 1 - } - echo "SUCCESS: $app" -done - -echo "All installations successful" -tar czf all-logs.tar.gz /var/log/community-scripts/ -``` - ---- - -## Advanced Usage - -### Custom Log Analysis - -```bash -# Extract all errors -grep "ERROR\|exit code [1-9]" /var/log/community-scripts/*.log - -# Performance timeline -grep "^$(date +%Y-%m-%d)" /var/log/community-scripts/*.log | grep "msg_" - -# Memory usage during install -grep "free\|available" /var/log/community-scripts/*.log -``` - -### Integration with External Tools - -```bash -# Send logs to Elasticsearch -curl -X POST "localhost:9200/installation-logs/_doc" \ - -H 'Content-Type: application/json' \ - -d @/var/log/community-scripts/install-*.log - -# Archive for compliance -tar czf installation-records-$(date +%Y%m).tar.gz \ - /var/log/community-scripts/ -gpg --encrypt installation-records-*.tar.gz -``` - ---- - -## Support & Issues - -When reporting installation issues, always include: - -```bash -# Collect all relevant information -export dev_mode="logs" -# Run the failing installation -# Then provide: -tar czf debug-logs.tar.gz /var/log/community-scripts/ -``` - -Include the `debug-logs.tar.gz` when reporting issues for better diagnostics. diff --git a/docs/EXIT_CODES.md b/docs/EXIT_CODES.md deleted file mode 100644 index 091f92664..000000000 --- a/docs/EXIT_CODES.md +++ /dev/null @@ -1,298 +0,0 @@ -# Exit Code Reference - -Comprehensive documentation of all exit codes used in ProxmoxVE scripts. - -## Table of Contents - -- [Generic/Shell Errors (1-255)](#genericshell-errors) -- [Package Manager Errors (100-101, 255)](#package-manager-errors) -- [Node.js/npm Errors (243-254)](#nodejsnpm-errors) -- [Python/pip Errors (210-212)](#pythonpip-errors) -- [Database Errors (231-254)](#database-errors) -- [Proxmox Custom Codes (200-231)](#proxmox-custom-codes) - ---- - -## Generic/Shell Errors - -Standard Unix/Linux exit codes used across all scripts. - -| Code | Description | Common Causes | Solutions | -| ------- | --------------------------------------- | ----------------------------------------- | ---------------------------------------------- | -| **1** | General error / Operation not permitted | Permission denied, general failure | Check user permissions, run as root if needed | -| **2** | Misuse of shell builtins | Syntax error in script | Review script syntax, check bash version | -| **126** | Command cannot execute | Permission problem, not executable | `chmod +x script.sh` or check file permissions | -| **127** | Command not found | Missing binary, wrong PATH | Install required package, check PATH variable | -| **128** | Invalid argument to exit | Invalid exit code passed | Use exit codes 0-255 only | -| **130** | Terminated by Ctrl+C (SIGINT) | User interrupted script | Expected behavior, no action needed | -| **137** | Killed (SIGKILL) | Out of memory, forced termination | Check memory usage, increase RAM allocation | -| **139** | Segmentation fault | Memory access violation, corrupted binary | Reinstall package, check system stability | -| **143** | Terminated (SIGTERM) | Graceful shutdown signal | Expected during container stops | - ---- - -## Package Manager Errors - -APT, DPKG, and package installation errors. - -| Code | Description | Common Causes | Solutions | -| ------- | -------------------------- | --------------------------------------- | ------------------------------------------------- | -| **100** | APT: Package manager error | Broken packages, dependency conflicts | `apt --fix-broken install`, `dpkg --configure -a` | -| **101** | APT: Configuration error | Malformed sources.list, bad repo config | Check `/etc/apt/sources.list`, run `apt update` | -| **255** | DPKG: Fatal internal error | Corrupted package database | `dpkg --configure -a`, restore from backup | - ---- - -## Node.js/npm Errors - -Node.js runtime and package manager errors. - -| Code | Description | Common Causes | Solutions | -| ------- | ------------------------------------------ | ------------------------------ | ---------------------------------------------- | -| **243** | Node.js: Out of memory | JavaScript heap exhausted | Increase `--max-old-space-size`, optimize code | -| **245** | Node.js: Invalid command-line option | Wrong Node.js flags | Check Node.js version, verify CLI options | -| **246** | Node.js: Internal JavaScript Parse Error | Syntax error in JS code | Review JavaScript syntax, check dependencies | -| **247** | Node.js: Fatal internal error | Node.js runtime crash | Update Node.js, check for known bugs | -| **248** | Node.js: Invalid C++ addon / N-API failure | Native module incompatibility | Rebuild native modules, update packages | -| **249** | Node.js: Inspector error | Debug/inspect protocol failure | Disable inspector, check port conflicts | -| **254** | npm/pnpm/yarn: Unknown fatal error | Package manager crash | Clear cache, reinstall package manager | - ---- - -## Python/pip Errors - -Python runtime and package installation errors. - -| Code | Description | Common Causes | Solutions | -| ------- | ------------------------------------ | --------------------------------------- | -------------------------------------------------------- | -| **210** | Python: Virtualenv missing or broken | venv not created, corrupted environment | `python3 -m venv venv`, recreate virtualenv | -| **211** | Python: Dependency resolution failed | Conflicting package versions | Use `pip install --upgrade`, check requirements.txt | -| **212** | Python: Installation aborted | EXTERNALLY-MANAGED, permission denied | Use `--break-system-packages` or venv, check permissions | - ---- - -## Database Errors - -### PostgreSQL (231-234) - -| Code | Description | Common Causes | Solutions | -| ------- | ----------------------- | ---------------------------------- | ----------------------------------------------------- | -| **231** | Connection failed | Server not running, wrong socket | `systemctl start postgresql`, check connection string | -| **232** | Authentication failed | Wrong credentials | Verify username/password, check `pg_hba.conf` | -| **233** | Database does not exist | Database not created | `CREATE DATABASE`, restore from backup | -| **234** | Fatal error in query | Syntax error, constraint violation | Review SQL syntax, check constraints | - -### MySQL/MariaDB (241-244) - -| Code | Description | Common Causes | Solutions | -| ------- | ----------------------- | ---------------------------------- | ---------------------------------------------------- | -| **241** | Connection failed | Server not running, wrong socket | `systemctl start mysql`, check connection parameters | -| **242** | Authentication failed | Wrong credentials | Verify username/password, grant privileges | -| **243** | Database does not exist | Database not created | `CREATE DATABASE`, restore from backup | -| **244** | Fatal error in query | Syntax error, constraint violation | Review SQL syntax, check constraints | - -### MongoDB (251-254) - -| Code | Description | Common Causes | Solutions | -| ------- | --------------------- | -------------------- | ------------------------------------------ | -| **251** | Connection failed | Server not running | `systemctl start mongod`, check port 27017 | -| **252** | Authentication failed | Wrong credentials | Verify username/password, create user | -| **253** | Database not found | Database not created | Database auto-created on first write | -| **254** | Fatal query error | Invalid query syntax | Review MongoDB query syntax | - ---- - -## Proxmox Custom Codes - -Custom exit codes specific to ProxmoxVE scripts. - -### Container Creation Errors (200-209) - -| Code | Description | Common Causes | Solutions | -| ------- | ---------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | -| **200** | Failed to create lock file | Permission denied, disk full | Check `/tmp` permissions, free disk space | -| **203** | Missing CTID variable | Script configuration error | Set CTID in script or via prompt | -| **204** | Missing PCT_OSTYPE variable | Template selection failed | Verify template availability | -| **205** | Invalid CTID (<100) | CTID below minimum value | Use CTID ≥ 100 (1-99 reserved for Proxmox) | -| **206** | CTID already in use | Container/VM with same ID exists | Check `pct list` and `/etc/pve/lxc/`, use different ID | -| **207** | Password contains unescaped special characters | Special chars like `-`, `/`, `\`, `*` at start/end | Avoid leading special chars, use alphanumeric passwords | -| **208** | Invalid configuration | DNS format (`.home` vs `home`), MAC format (`-` vs `:`) | Remove leading dots from DNS, use `:` in MAC addresses | -| **209** | Container creation failed | Multiple possible causes | Check logs in `/tmp/pct_create_*.log`, verify template | - -### Cluster & Storage Errors (210, 214, 217) - -| Code | Description | Common Causes | Solutions | -| ------- | --------------------------------- | ---------------------------------- | ----------------------------------------------------------- | -| **210** | Cluster not quorate | Cluster nodes down, network issues | Check cluster status: `pvecm status`, fix node connectivity | -| **211** | Timeout waiting for template lock | Concurrent download in progress | Wait for other download to complete (60s timeout) | -| **214** | Not enough storage space | Disk full, quota exceeded | Free disk space, increase storage allocation | -| **217** | Storage does not support rootdir | Wrong storage type selected | Use storage supporting containers (dir, zfspool, lvm-thin) | - -### Container Verification Errors (215-216) - -| Code | Description | Common Causes | Solutions | -| ------- | -------------------------------- | -------------------------------- | --------------------------------------------------------- | -| **215** | Container created but not listed | Ghost state, incomplete creation | Check `/etc/pve/lxc/CTID.conf`, remove manually if needed | -| **216** | RootFS entry missing in config | Incomplete container creation | Delete container, retry creation | - -### Template Errors (218, 220-223, 225) - -| Code | Description | Common Causes | Solutions | -| ------- | ----------------------------------------- | ------------------------------------------------ | ----------------------------------------------------------- | -| **218** | Template file corrupted or incomplete | Download interrupted, file <1MB, invalid archive | Delete template, run `pveam update && pveam download` | -| **220** | Unable to resolve template path | Template storage not accessible | Check storage availability, verify permissions | -| **221** | Template file exists but not readable | Permission denied | `chmod 644 template.tar.zst`, check storage permissions | -| **222** | Template download failed after 3 attempts | Network issues, storage problems | Check internet connectivity, verify storage space | -| **223** | Template not available after download | Storage sync issue, I/O delay | Wait a few seconds, verify storage is mounted | -| **225** | No template available for OS/Version | Unsupported OS version, catalog outdated | Run `pveam update`, check `pveam available -section system` | - -### LXC Stack Errors (231) - -| Code | Description | Common Causes | Solutions | -| ------- | ------------------------------ | ------------------------------------------- | -------------------------------------------- | -| **231** | LXC stack upgrade/retry failed | Outdated `pve-container`, Debian 13.1 issue | See [Debian 13.1 Fix Guide](#debian-131-fix) | - ---- - -## Special Case: Debian 13.1 "unsupported version" Error - -### Problem - -``` -TASK ERROR: unable to create CT 129 - unsupported debian version '13.1' -``` - -### Root Cause - -Outdated `pve-container` package doesn't recognize Debian 13 (Trixie). - -### Solutions - -#### Option 1: Full System Upgrade (Recommended) - -```bash -apt update -apt full-upgrade -y -reboot -``` - -Verify fix: - -```bash -dpkg -l pve-container -# PVE 8: Should show 5.3.3+ -# PVE 9: Should show 6.0.13+ -``` - -#### Option 2: Update Only pve-container - -```bash -apt update -apt install --only-upgrade pve-container -y -``` - -**Warning:** If Proxmox fails to boot after this, your system was inconsistent. Perform Option 1 instead. - -#### Option 3: Verify Repository Configuration - -Many users disable Enterprise repos but forget to add no-subscription repos. - -**For PVE 9 (Trixie):** - -```bash -cat /etc/apt/sources.list.d/pve-no-subscription.list -``` - -Should contain: - -``` -deb http://download.proxmox.com/debian/pve trixie pve-no-subscription -deb http://download.proxmox.com/debian/ceph-squid trixie no-subscription -``` - -**For PVE 8 (Bookworm):** - -``` -deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription -deb http://download.proxmox.com/debian/ceph-quincy bookworm no-subscription -``` - -Then: - -```bash -apt update -apt full-upgrade -y -``` - -### Reference - -Official discussion: [GitHub #8126](https://github.com/community-scripts/ProxmoxVE/discussions/8126) - ---- - -## Troubleshooting Tips - -### Finding Error Details - -1. **Check logs:** - - ```bash - tail -n 50 /tmp/pct_create_*.log - ``` - -2. **Enable verbose mode:** - - ```bash - bash -x script.sh # Shows every command executed - ``` - -3. **Check container status:** - - ```bash - pct list - pct status CTID - ``` - -4. **Verify storage:** - ```bash - pvesm status - df -h - ``` - -### Common Patterns - -- **Exit 0 with error message:** Configuration validation failed (check DNS, MAC, password format) -- **Exit 206 but container not visible:** Ghost container state - check `/etc/pve/lxc/` manually -- **Exit 209 generic error:** Check `/tmp/pct_create_*.log` for specific `pct create` failure reason -- **Exit 218 or 222:** Template issues - delete and re-download template - ---- - -## Quick Reference Chart - -| Exit Code Range | Category | Typical Issue | -| --------------- | ------------------ | ------------------------------------------- | -| 1-2, 126-143 | Shell/System | Permissions, signals, missing commands | -| 100-101, 255 | Package Manager | APT/DPKG errors, broken packages | -| 200-209 | Container Creation | CTID, password, configuration | -| 210-217 | Storage/Cluster | Disk space, quorum, storage type | -| 218-225 | Templates | Download, corruption, availability | -| 231-254 | Databases/Runtime | PostgreSQL, MySQL, MongoDB, Node.js, Python | - ---- - -## Contributing - -Found an undocumented exit code or have a solution to share? Please: - -1. Open an issue on [GitHub](https://github.com/community-scripts/ProxmoxVE/issues) -2. Include: - - Exit code number - - Error message - - Steps to reproduce - - Solution that worked for you - ---- - -_Last updated: November 2025_ -_ProxmoxVE Version: 2.x_ diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 44f7b9464..000000000 --- a/docs/README.md +++ /dev/null @@ -1,298 +0,0 @@ -# 📚 ProxmoxVE Documentation - -Complete guide to all ProxmoxVE documentation - quickly find what you need. - ---- - -## 🎯 **Quick Navigation by Goal** - -### 👤 **I want to...** - -**Contribute a new application** -→ Start with: [contribution/README.md](contribution/README.md) -→ Then: [ct/DETAILED_GUIDE.md](ct/DETAILED_GUIDE.md) + [install/DETAILED_GUIDE.md](install/DETAILED_GUIDE.md) - -**Understand the architecture** -→ Read: [TECHNICAL_REFERENCE.md](TECHNICAL_REFERENCE.md) -→ Then: [misc/README.md](misc/README.md) - -**Debug a failed installation** -→ Check: [EXIT_CODES.md](EXIT_CODES.md) -→ Then: [DEV_MODE.md](DEV_MODE.md) -→ See also: [misc/error_handler.func/](misc/error_handler.func/) - -**Configure system defaults** -→ Read: [guides/DEFAULTS_SYSTEM_GUIDE.md](guides/DEFAULTS_SYSTEM_GUIDE.md) - -**Deploy containers automatically** -→ Read: [guides/UNATTENDED_DEPLOYMENTS.md](guides/UNATTENDED_DEPLOYMENTS.md) - -**Develop a function library** -→ Study: [misc/](misc/) documentation - ---- - -## 👤 **Quick Start by Role** - -### **I'm a...** - -**New Contributor** -→ Start: [contribution/README.md](contribution/README.md) -→ Then: Choose your path below - -**Container Creator** -→ Read: [ct/README.md](ct/README.md) -→ Deep Dive: [ct/DETAILED_GUIDE.md](ct/DETAILED_GUIDE.md) -→ Reference: [misc/build.func/](misc/build.func/) - -**Installation Script Developer** -→ Read: [install/README.md](install/README.md) -→ Deep Dive: [install/DETAILED_GUIDE.md](install/DETAILED_GUIDE.md) -→ Reference: [misc/tools.func/](misc/tools.func/) - -**VM Provisioner** -→ Read: [vm/README.md](vm/README.md) -→ Reference: [misc/cloud-init.func/](misc/cloud-init.func/) - -**Tools Developer** -→ Read: [tools/README.md](tools/README.md) -→ Reference: [misc/build.func/](misc/build.func/) - -**API Integrator** -→ Read: [api/README.md](api/README.md) -→ Reference: [misc/api.func/](misc/api.func/) - -**System Operator** -→ Start: [EXIT_CODES.md](EXIT_CODES.md) -→ Then: [guides/DEFAULTS_SYSTEM_GUIDE.md](guides/DEFAULTS_SYSTEM_GUIDE.md) -→ Automate: [guides/UNATTENDED_DEPLOYMENTS.md](guides/UNATTENDED_DEPLOYMENTS.md) -→ Debug: [DEV_MODE.md](DEV_MODE.md) - -**Architect** -→ Read: [TECHNICAL_REFERENCE.md](TECHNICAL_REFERENCE.md) -→ Deep Dive: [misc/README.md](misc/README.md) - ---- - -## 📂 **Documentation Structure** - -### Project-Mirrored Directories - -Each major project directory has documentation: - -``` -ProxmoxVE/ -├─ ct/ ↔ docs/ct/ (README.md + DETAILED_GUIDE.md) -├─ install/ ↔ docs/install/ (README.md + DETAILED_GUIDE.md) -├─ vm/ ↔ docs/vm/ (README.md) -├─ tools/ ↔ docs/tools/ (README.md) -├─ api/ ↔ docs/api/ (README.md) -├─ misc/ ↔ docs/misc/ (9 function libraries) -└─ [system-wide] ↔ docs/guides/ (configuration & deployment guides) -``` - -### Core Documentation - -| Document | Purpose | Audience | -|----------|---------|----------| -| [contribution/README.md](contribution/README.md) | How to contribute | Contributors | -| [ct/DETAILED_GUIDE.md](ct/DETAILED_GUIDE.md) | Create ct scripts | Container developers | -| [install/DETAILED_GUIDE.md](install/DETAILED_GUIDE.md) | Create install scripts | Installation developers | -| [TECHNICAL_REFERENCE.md](TECHNICAL_REFERENCE.md) | Architecture deep-dive | Architects, advanced users | -| [guides/DEFAULTS_SYSTEM_GUIDE.md](guides/DEFAULTS_SYSTEM_GUIDE.md) | Configuration system | Operators, power users | -| [guides/CONFIGURATION_REFERENCE.md](guides/CONFIGURATION_REFERENCE.md) | Configuration options reference | Advanced users | -| [guides/UNATTENDED_DEPLOYMENTS.md](guides/UNATTENDED_DEPLOYMENTS.md) | Automated deployments | DevOps, automation | -| [EXIT_CODES.md](EXIT_CODES.md) | Exit code reference | Troubleshooters | -| [DEV_MODE.md](DEV_MODE.md) | Debugging tools | Developers | - ---- - -## 📂 **Directory Guide** - -### [ct/](ct/) - Container Scripts -Documentation for `/ct` - Container creation scripts that run on the Proxmox host. - -**Includes**: -- Overview of container creation process -- Deep dive: [DETAILED_GUIDE.md](ct/DETAILED_GUIDE.md) - Complete reference with examples -- Reference to [misc/build.func/](misc/build.func/) -- Quick start for creating new containers - -### [install/](install/) - Installation Scripts -Documentation for `/install` - Scripts that run inside containers to install applications. - -**Includes**: -- Overview of 10-phase installation pattern -- Deep dive: [DETAILED_GUIDE.md](install/DETAILED_GUIDE.md) - Complete reference with examples -- Reference to [misc/tools.func/](misc/tools.func/) -- Alpine vs Debian differences - -### [vm/](vm/) - Virtual Machine Scripts -Documentation for `/vm` - VM creation scripts using cloud-init provisioning. - -**Includes**: -- Overview of VM provisioning -- Link to [misc/cloud-init.func/](misc/cloud-init.func/) -- VM vs Container comparison -- Cloud-init examples - -### [tools/](tools/) - Tools & Utilities -Documentation for `/tools` - Management tools and add-ons. - -**Includes**: -- Overview of tools structure -- Integration points -- Contributing new tools -- Common operations - -### [api/](api/) - API Integration -Documentation for `/api` - Telemetry and API backend. - -**Includes**: -- API overview -- Integration methods -- API endpoints -- Privacy information - -### [misc/](misc/) - Function Libraries -Documentation for `/misc` - 9 core function libraries with complete references. - -**Contains**: -- **build.func/** - Container orchestration (7 files) -- **core.func/** - Utilities and messaging (5 files) -- **error_handler.func/** - Error handling (5 files) -- **api.func/** - API integration (5 files) -- **install.func/** - Container setup (5 files) -- **tools.func/** - Package installation (6 files) -- **alpine-install.func/** - Alpine setup (5 files) -- **alpine-tools.func/** - Alpine tools (5 files) -- **cloud-init.func/** - VM provisioning (5 files) - ---- - -## 🎓 **Learning Paths** - -### Path 1: First-Time Contributor (2-3 hours) - -1. [contribution/README.md](contribution/README.md) - Quick Start -2. Pick your area: - - Containers → [ct/README.md](ct/README.md) + [ct/DETAILED_GUIDE.md](ct/DETAILED_GUIDE.md) - - Installation → [install/README.md](install/README.md) + [install/DETAILED_GUIDE.md](install/DETAILED_GUIDE.md) - - VMs → [vm/README.md](vm/README.md) -3. Study existing similar script -4. Create your contribution -5. Submit PR - -### Path 2: Intermediate Developer (4-6 hours) - -1. [TECHNICAL_REFERENCE.md](TECHNICAL_REFERENCE.md) -2. Dive into function libraries: - - [misc/build.func/README.md](misc/build.func/README.md) - - [misc/tools.func/README.md](misc/tools.func/README.md) - - [misc/install.func/README.md](misc/install.func/README.md) -3. Study advanced examples -4. Create complex applications - -### Path 3: Advanced Architect (8+ hours) - -1. All of Intermediate Path -2. Study all 9 function libraries in depth -3. [guides/DEFAULTS_SYSTEM_GUIDE.md](guides/DEFAULTS_SYSTEM_GUIDE.md) - Configuration system -4. [DEV_MODE.md](DEV_MODE.md) - Debugging and development -5. Design new features or function libraries - -### Path 4: Troubleshooter (30 minutes - 1 hour) - -1. [EXIT_CODES.md](EXIT_CODES.md) - Find error code -2. [DEV_MODE.md](DEV_MODE.md) - Run with debugging -3. Check relevant function library docs -4. Review logs and fix - ---- - -## 📊 **By the Numbers** - -| Metric | Count | -|--------|:---:| -| **Documentation Files** | 63 | -| **Total Lines** | 15,000+ | -| **Function Libraries** | 9 | -| **Functions Documented** | 150+ | -| **Code Examples** | 50+ | -| **Flowcharts** | 15+ | -| **Do/Don't Sections** | 20+ | -| **Real-World Examples** | 30+ | - ---- - -## 🔍 **Find It Fast** - -### By Feature -- **How do I create a container?** → [ct/DETAILED_GUIDE.md](ct/DETAILED_GUIDE.md) -- **How do I create an install script?** → [install/DETAILED_GUIDE.md](install/DETAILED_GUIDE.md) -- **How do I create a VM?** → [vm/README.md](vm/README.md) -- **How do I install Node.js?** → [misc/tools.func/](misc/tools.func/) -- **How do I debug?** → [DEV_MODE.md](DEV_MODE.md) - -### By Error -- **Exit code 206?** → [EXIT_CODES.md](EXIT_CODES.md) -- **Network failed?** → [misc/install.func/](misc/install.func/) -- **Package error?** → [misc/tools.func/](misc/tools.func/) - -### By Role -- **Contributor** → [contribution/README.md](contribution/README.md) -- **Operator** → [guides/DEFAULTS_SYSTEM_GUIDE.md](guides/DEFAULTS_SYSTEM_GUIDE.md) -- **Automation** → [guides/UNATTENDED_DEPLOYMENTS.md](guides/UNATTENDED_DEPLOYMENTS.md) -- **Developer** → [TECHNICAL_REFERENCE.md](TECHNICAL_REFERENCE.md) -- **Architect** → [misc/README.md](misc/README.md) - ---- - -## ✅ **Documentation Features** - -- ✅ **Project-mirrored structure** - Organized like the actual project -- ✅ **Complete function references** - Every function documented -- ✅ **Real-world examples** - Copy-paste ready code -- ✅ **Visual flowcharts** - ASCII diagrams of workflows -- ✅ **Integration guides** - How components connect -- ✅ **Troubleshooting** - Common issues and solutions -- ✅ **Best practices** - DO/DON'T sections throughout -- ✅ **Learning paths** - Structured curriculum by role -- ✅ **Quick references** - Fast lookup by error code -- ✅ **Comprehensive navigation** - This page - ---- - -## 🚀 **Start Here** - -**New to ProxmoxVE?** → [contribution/README.md](contribution/README.md) - -**Looking for something specific?** → Choose your role above or browse by directory - -**Need to debug?** → [EXIT_CODES.md](EXIT_CODES.md) - -**Want to understand architecture?** → [TECHNICAL_REFERENCE.md](TECHNICAL_REFERENCE.md) - ---- - -## 🤝 **Contributing Documentation** - -Found an error? Want to improve docs? - -1. See: [contribution/README.md](contribution/README.md) for full contribution guide -2. Open issue: [GitHub Issues](https://github.com/community-scripts/ProxmoxVE/issues) -3. Or submit PR with improvements - ---- - -## 📝 **Status** - -- **Last Updated**: December 2025 -- **Version**: 2.3 (Consolidated & Reorganized) -- **Completeness**: ✅ 100% - All components documented -- **Quality**: ✅ Production-ready -- **Structure**: ✅ Clean and organized - ---- - -**Welcome to ProxmoxVE! Start with [CONTRIBUTION_GUIDE.md](CONTRIBUTION_GUIDE.md) or choose your role above.** 🚀 diff --git a/docs/TECHNICAL_REFERENCE.md b/docs/TECHNICAL_REFERENCE.md deleted file mode 100644 index ec4f21eb3..000000000 --- a/docs/TECHNICAL_REFERENCE.md +++ /dev/null @@ -1,897 +0,0 @@ -# Technical Reference: Configuration System Architecture - -> **For Developers and Advanced Users** -> -> _Deep dive into how the defaults and configuration system works_ - ---- - -## Table of Contents - -1. [System Architecture](#system-architecture) -2. [File Format Specifications](#file-format-specifications) -3. [Function Reference](#function-reference) -4. [Variable Precedence](#variable-precedence) -5. [Data Flow Diagrams](#data-flow-diagrams) -6. [Security Model](#security-model) -7. [Implementation Details](#implementation-details) - ---- - -## System Architecture - -### Component Overview - -``` -┌─────────────────────────────────────────────────────────────┐ -│ Installation Script │ -│ (pihole-install.sh, docker-install.sh, etc.) │ -└────────────────────┬────────────────────────────────────────┘ - │ - v -┌─────────────────────────────────────────────────────────────┐ -│ build.func Library │ -│ ┌──────────────────────────────────────────────────────┐ │ -│ │ variables() │ │ -│ │ - Initialize NSAPP, var_install, etc. │ │ -│ └──────────────────────────────────────────────────────┘ │ -│ ┌──────────────────────────────────────────────────────┐ │ -│ │ install_script() │ │ -│ │ - Display mode menu │ │ -│ │ - Route to appropriate workflow │ │ -│ └──────────────────────────────────────────────────────┘ │ -│ ┌──────────────────────────────────────────────────────┐ │ -│ │ base_settings() │ │ -│ │ - Apply built-in defaults │ │ -│ │ - Read environment variables (var_*) │ │ -│ └──────────────────────────────────────────────────────┘ │ -│ ┌──────────────────────────────────────────────────────┐ │ -│ │ load_vars_file() │ │ -│ │ - Safe file parsing (NO source/eval) │ │ -│ │ - Whitelist validation │ │ -│ │ - Value sanitization │ │ -│ └──────────────────────────────────────────────────────┘ │ -│ ┌──────────────────────────────────────────────────────┐ │ -│ │ default_var_settings() │ │ -│ │ - Load user defaults │ │ -│ │ - Display summary │ │ -│ └──────────────────────────────────────────────────────┘ │ -│ ┌──────────────────────────────────────────────────────┐ │ -│ │ maybe_offer_save_app_defaults() │ │ -│ │ - Offer to save current settings │ │ -│ │ - Handle updates vs. new saves │ │ -│ └──────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────┘ - │ - v -┌─────────────────────────────────────────────────────────────┐ -│ Configuration Files (on Disk) │ -│ ┌──────────────────────────────────────────────────────┐ │ -│ │ /usr/local/community-scripts/default.vars │ │ -│ │ (User global defaults) │ │ -│ └──────────────────────────────────────────────────────┘ │ -│ ┌──────────────────────────────────────────────────────┐ │ -│ │ /usr/local/community-scripts/defaults/*.vars │ │ -│ │ (App-specific defaults) │ │ -│ └──────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────┘ -``` - ---- - -## File Format Specifications - -### User Defaults: `default.vars` - -**Location**: `/usr/local/community-scripts/default.vars` - -**MIME Type**: `text/plain` - -**Encoding**: UTF-8 (no BOM) - -**Format Specification**: - -``` -# File Format: Simple key=value pairs -# Purpose: Store global user defaults -# Security: Sanitized values, whitelist validation - -# Comments and blank lines are ignored -# Line format: var_name=value -# No spaces around the equals sign -# String values do not need quoting (but may be quoted) - -[CONTENT] -var_cpu=4 -var_ram=2048 -var_disk=20 -var_hostname=mydefault -var_brg=vmbr0 -var_gateway=192.168.1.1 -``` - -**Formal Grammar**: - -``` -FILE := (BLANK_LINE | COMMENT_LINE | VAR_LINE)* -BLANK_LINE := \n -COMMENT_LINE := '#' [^\n]* \n -VAR_LINE := VAR_NAME '=' VAR_VALUE \n -VAR_NAME := 'var_' [a-z_]+ -VAR_VALUE := [^\n]* # Any printable characters except newline -``` - -**Constraints**: - -| Constraint | Value | -| ----------------- | ------------------------ | -| Max file size | 64 KB | -| Max line length | 1024 bytes | -| Max variables | 100 | -| Allowed var names | `var_[a-z_]+` | -| Value validation | Whitelist + Sanitization | - -**Example Valid File**: - -```bash -# Global User Defaults -# Created: 2024-11-28 - -# Resource defaults -var_cpu=4 -var_ram=2048 -var_disk=20 - -# Network defaults -var_brg=vmbr0 -var_gateway=192.168.1.1 -var_mtu=1500 -var_vlan=100 - -# System defaults -var_timezone=Europe/Berlin -var_hostname=default-container - -# Storage -var_container_storage=local -var_template_storage=local - -# Security -var_ssh=yes -var_protection=0 -var_unprivileged=1 -``` - -### App Defaults: `.vars` - -**Location**: `/usr/local/community-scripts/defaults/.vars` - -**Format**: Identical to `default.vars` - -**Naming Convention**: `.vars` - -- `nsapp` = lowercase app name with spaces removed -- Examples: - - `pihole` → `pihole.vars` - - `opnsense` → `opnsense.vars` - - `docker compose` → `dockercompose.vars` - -**Example App Defaults**: - -```bash -# App-specific defaults for PiHole (pihole) -# Generated on 2024-11-28T15:32:00Z -# These override user defaults when installing pihole - -var_unprivileged=1 -var_cpu=2 -var_ram=1024 -var_disk=10 -var_brg=vmbr0 -var_net=veth -var_gateway=192.168.1.1 -var_hostname=pihole -var_timezone=Europe/Berlin -var_container_storage=local -var_template_storage=local -var_tags=dns,pihole -``` - ---- - -## Function Reference - -### `load_vars_file()` - -**Purpose**: Safely load variables from .vars files without using `source` or `eval` - -**Signature**: - -```bash -load_vars_file(filepath) -``` - -**Parameters**: - -| Param | Type | Required | Example | -| -------- | ------ | -------- | ------------------------------------------- | -| filepath | String | Yes | `/usr/local/community-scripts/default.vars` | - -**Returns**: - -- `0` on success -- `1` on error (file missing, parse error, etc.) - -**Environment Side Effects**: - -- Sets all parsed `var_*` variables as shell variables -- Does NOT unset variables if file missing (safe) -- Does NOT affect other variables - -**Implementation Pattern**: - -```bash -load_vars_file() { - local file="$1" - - # File must exist - [ -f "$file" ] || return 0 - - # Parse line by line (not with source/eval) - local line key val - while IFS='=' read -r key val || [ -n "$key" ]; do - # Skip comments and empty lines - [[ "$key" =~ ^[[:space:]]*# ]] && continue - [[ -z "$key" ]] && continue - - # Validate key is in whitelist - _is_whitelisted_key "$key" || continue - - # Sanitize and export value - val="$(_sanitize_value "$val")" - [ $? -eq 0 ] && export "$key=$val" - done < "$file" - - return 0 -} -``` - -**Usage Examples**: - -```bash -# Load user defaults -load_vars_file "/usr/local/community-scripts/default.vars" - -# Load app-specific defaults -load_vars_file "$(get_app_defaults_path)" - -# Check if successful -if load_vars_file "$vars_path"; then - echo "Settings loaded successfully" -else - echo "Failed to load settings" -fi - -# Values are now available as variables -echo "Using $var_cpu cores" -echo "Allocating ${var_ram} MB RAM" -``` - ---- - -### `get_app_defaults_path()` - -**Purpose**: Get the full path for app-specific defaults file - -**Signature**: - -```bash -get_app_defaults_path() -``` - -**Parameters**: None - -**Returns**: - -- String: Full path to app defaults file - -**Implementation**: - -```bash -get_app_defaults_path() { - local n="${NSAPP:-${APP,,}}" - echo "/usr/local/community-scripts/defaults/${n}.vars" -} -``` - -**Usage Examples**: - -```bash -# Get app defaults path -app_defaults="$(get_app_defaults_path)" -echo "App defaults at: $app_defaults" - -# Check if app defaults exist -if [ -f "$(get_app_defaults_path)" ]; then - echo "App defaults available" -fi - -# Load app defaults -load_vars_file "$(get_app_defaults_path)" -``` - ---- - -### `default_var_settings()` - -**Purpose**: Load and display user global defaults - -**Signature**: - -```bash -default_var_settings() -``` - -**Parameters**: None - -**Returns**: - -- `0` on success -- `1` on error - -**Workflow**: - -``` -1. Find default.vars location - (usually /usr/local/community-scripts/default.vars) - -2. Create if missing - -3. Load variables from file - -4. Map var_verbose → VERBOSE variable - -5. Call base_settings (apply to container config) - -6. Call echo_default (display summary) -``` - -**Implementation Pattern**: - -```bash -default_var_settings() { - local VAR_WHITELIST=( - var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu - var_gateway var_hostname var_ipv6_method var_mac var_mtu - var_net var_ns var_pw var_ram var_tags var_tun var_unprivileged - var_verbose var_vlan var_ssh var_ssh_authorized_key - var_container_storage var_template_storage - ) - - # Ensure file exists - _ensure_default_vars - - # Find and load - local dv="$(_find_default_vars)" - load_vars_file "$dv" - - # Map verbose flag - if [[ -n "${var_verbose:-}" ]]; then - case "${var_verbose,,}" in - 1 | yes | true | on) VERBOSE="yes" ;; - *) VERBOSE="${var_verbose}" ;; - esac - fi - - # Apply and display - base_settings "$VERBOSE" - echo_default -} -``` - ---- - -### `maybe_offer_save_app_defaults()` - -**Purpose**: Offer to save current settings as app-specific defaults - -**Signature**: - -```bash -maybe_offer_save_app_defaults() -``` - -**Parameters**: None - -**Returns**: None (side effects only) - -**Behavior**: - -1. After advanced installation completes -2. Offers user: "Save as App Defaults for ?" -3. If yes: - - Saves to `/usr/local/community-scripts/defaults/.vars` - - Only whitelisted variables included - - Previous defaults backed up (if exists) -4. If no: - - No action taken - -**Flow**: - -```bash -maybe_offer_save_app_defaults() { - local app_vars_path="$(get_app_defaults_path)" - - # Build current settings from memory - local new_tmp="$(_build_current_app_vars_tmp)" - - # Check if already exists - if [ -f "$app_vars_path" ]; then - # Show diff and ask: Update? Keep? View Diff? - _show_app_defaults_diff_menu "$new_tmp" "$app_vars_path" - else - # New defaults - just save - if whiptail --yesno "Save as App Defaults for $APP?" 10 60; then - mv "$new_tmp" "$app_vars_path" - chmod 644 "$app_vars_path" - fi - fi -} -``` - ---- - -### `_sanitize_value()` - -**Purpose**: Remove dangerous characters/patterns from configuration values - -**Signature**: - -```bash -_sanitize_value(value) -``` - -**Parameters**: - -| Param | Type | Required | -| ----- | ------ | -------- | -| value | String | Yes | - -**Returns**: - -- `0` (success) + sanitized value on stdout -- `1` (failure) + nothing if dangerous - -**Dangerous Patterns**: - -| Pattern | Threat | Example | -| --------- | -------------------- | -------------------- | -| `$(...)` | Command substitution | `$(rm -rf /)` | -| `` ` ` `` | Command substitution | `` `whoami` `` | -| `;` | Command separator | `value; rm -rf /` | -| `&` | Background execution | `value & malicious` | -| `<(` | Process substitution | `<(cat /etc/passwd)` | - -**Implementation**: - -```bash -_sanitize_value() { - case "$1" in - *'$('* | *'`'* | *';'* | *'&'* | *'<('*) - echo "" - return 1 # Reject dangerous value - ;; - esac - echo "$1" - return 0 -} -``` - -**Usage Examples**: - -```bash -# Safe value -_sanitize_value "192.168.1.1" # Returns: 192.168.1.1 (status: 0) - -# Dangerous value -_sanitize_value "$(whoami)" # Returns: (empty) (status: 1) - -# Usage in code -if val="$(_sanitize_value "$user_input")"; then - export var_hostname="$val" -else - msg_error "Invalid value: contains dangerous characters" -fi -``` - ---- - -### `_is_whitelisted_key()` - -**Purpose**: Check if variable name is in allowed whitelist - -**Signature**: - -```bash -_is_whitelisted_key(key) -``` - -**Parameters**: - -| Param | Type | Required | Example | -| ----- | ------ | -------- | --------- | -| key | String | Yes | `var_cpu` | - -**Returns**: - -- `0` if key is whitelisted -- `1` if key is NOT whitelisted - -**Implementation**: - -```bash -_is_whitelisted_key() { - local k="$1" - local w - for w in "${VAR_WHITELIST[@]}"; do - [ "$k" = "$w" ] && return 0 - done - return 1 -} -``` - -**Usage Examples**: - -```bash -# Check if variable can be saved -if _is_whitelisted_key "var_cpu"; then - echo "var_cpu can be saved" -fi - -# Reject unknown variables -if ! _is_whitelisted_key "var_custom"; then - msg_error "var_custom is not supported" -fi -``` - ---- - -## Variable Precedence - -### Loading Order - -When a container is being created, variables are resolved in this order: - -``` -Step 1: Read ENVIRONMENT VARIABLES - ├─ Check if var_cpu is already set in shell environment - ├─ Check if var_ram is already set - └─ ...all var_* variables - -Step 2: Load APP-SPECIFIC DEFAULTS - ├─ Check if /usr/local/community-scripts/defaults/pihole.vars exists - ├─ Load all var_* from that file - └─ These override built-ins but NOT environment variables - -Step 3: Load USER GLOBAL DEFAULTS - ├─ Check if /usr/local/community-scripts/default.vars exists - ├─ Load all var_* from that file - └─ These override built-ins but NOT app-specific - -Step 4: Use BUILT-IN DEFAULTS - └─ Hardcoded in script (lowest priority) -``` - -### Precedence Examples - -**Example 1: Environment Variable Wins** - -```bash -# Shell environment has highest priority -$ export var_cpu=16 -$ bash pihole-install.sh - -# Result: Container gets 16 cores -# (ignores app defaults, user defaults, built-ins) -``` - -**Example 2: App Defaults Override User Defaults** - -```bash -# User Defaults: var_cpu=4 -# App Defaults: var_cpu=2 -$ bash pihole-install.sh - -# Result: Container gets 2 cores -# (app-specific setting takes precedence) -``` - -**Example 3: All Defaults Missing (Built-ins Used)** - -```bash -# No environment variables set -# No app defaults file -# No user defaults file -$ bash pihole-install.sh - -# Result: Uses built-in defaults -# (var_cpu might be 2 by default) -``` - -### Implementation in Code - -```bash -# Typical pattern in build.func - -base_settings() { - # Priority 1: Environment variables (already set if export used) - CT_TYPE=${var_unprivileged:-"1"} # Use existing or default - - # Priority 2: Load app defaults (may override above) - if [ -f "$(get_app_defaults_path)" ]; then - load_vars_file "$(get_app_defaults_path)" - fi - - # Priority 3: Load user defaults - if [ -f "/usr/local/community-scripts/default.vars" ]; then - load_vars_file "/usr/local/community-scripts/default.vars" - fi - - # Priority 4: Apply built-in defaults (lowest) - CORE_COUNT=${var_cpu:-"${APP_CPU_DEFAULT:-2}"} - RAM_SIZE=${var_ram:-"${APP_RAM_DEFAULT:-1024}"} - - # Result: var_cpu has been set through precedence chain -} -``` - ---- - -## Data Flow Diagrams - -### Installation Flow: Advanced Settings - -``` -┌──────────────┐ -│ Start Script│ -└──────┬───────┘ - │ - v -┌──────────────────────────────┐ -│ Display Installation Mode │ -│ Menu (5 options) │ -└──────┬───────────────────────┘ - │ User selects "Advanced Settings" - v -┌──────────────────────────────────┐ -│ Call: base_settings() │ -│ (Apply built-in defaults) │ -└──────┬───────────────────────────┘ - │ - v -┌──────────────────────────────────┐ -│ Call: advanced_settings() │ -│ (Show 19-step wizard) │ -│ - Ask CPU, RAM, Disk, Network... │ -└──────┬───────────────────────────┘ - │ - v -┌──────────────────────────────────┐ -│ Show Summary │ -│ Review all chosen values │ -└──────┬───────────────────────────┘ - │ User confirms - v -┌──────────────────────────────────┐ -│ Create Container │ -│ Using current variable values │ -└──────┬───────────────────────────┘ - │ - v -┌──────────────────────────────────┐ -│ Installation Complete │ -└──────┬───────────────────────────┘ - │ - v -┌──────────────────────────────────────┐ -│ Offer: Save as App Defaults? │ -│ (Save current settings) │ -└──────┬───────────────────────────────┘ - │ - ├─ YES → Save to defaults/.vars - │ - └─ NO → Exit -``` - -### Variable Resolution Flow - -``` -CONTAINER CREATION STARTED - │ - v - ┌─────────────────────┐ - │ Check ENVIRONMENT │ - │ for var_cpu, var_..│ - └──────┬──────────────┘ - │ Found? Use them (Priority 1) - │ Not found? Continue... - v - ┌──────────────────────────┐ - │ Load App Defaults │ - │ /defaults/.vars │ - └──────┬───────────────────┘ - │ File exists? Parse & load (Priority 2) - │ Not found? Continue... - v - ┌──────────────────────────┐ - │ Load User Defaults │ - │ /default.vars │ - └──────┬───────────────────┘ - │ File exists? Parse & load (Priority 3) - │ Not found? Continue... - v - ┌──────────────────────────┐ - │ Use Built-in Defaults │ - │ (Hardcoded values) │ - └──────┬───────────────────┘ - │ - v - ┌──────────────────────────┐ - │ All Variables Resolved │ - │ Ready for container │ - │ creation │ - └──────────────────────────┘ -``` - ---- - -## Security Model - -### Threat Model - -| Threat | Mitigation | -| ---------------------------- | ------------------------------------------------- | -| **Arbitrary Code Execution** | No `source` or `eval`; manual parsing only | -| **Variable Injection** | Whitelist of allowed variable names | -| **Command Substitution** | `_sanitize_value()` blocks `$()`, backticks, etc. | -| **Path Traversal** | Files locked to `/usr/local/community-scripts/` | -| **Permission Escalation** | Files created with restricted permissions | -| **Information Disclosure** | Sensitive variables not logged | - -### Security Controls - -#### 1. Input Validation - -```bash -# Only specific variables allowed -if ! _is_whitelisted_key "$key"; then - skip_this_variable -fi - -# Values sanitized -if ! val="$(_sanitize_value "$value")"; then - reject_entire_line -fi -``` - -#### 2. Safe File Parsing - -```bash -# ❌ DANGEROUS (OLD) -source /path/to/config.conf -# Could execute: rm -rf / or any code - -# ✅ SAFE (NEW) -load_vars_file "/path/to/config.conf" -# Only reads var_name=value pairs, no execution -``` - -#### 3. Whitelisting - -```bash -# Only these variables can be configured -var_cpu, var_ram, var_disk, var_brg, ... -var_hostname, var_pw, var_ssh, ... - -# NOT allowed: -var_malicious, var_hack, custom_var, ... -``` - -#### 4. Value Constraints - -```bash -# No command injection patterns -if [[ "$value" =~ ($|`|;|&|<\() ]]; then - reject_value -fi -``` - ---- - -## Implementation Details - -### Module: `build.func` - -**Load Order** (in actual scripts): - -1. `#!/usr/bin/env bash` - Shebang -2. `source /dev/stdin <<<$(curl ... api.func)` - API functions -3. `source /dev/stdin <<<$(curl ... build.func)` - Build functions -4. `variables()` - Initialize variables -5. `check_root()` - Security check -6. `install_script()` - Main flow - -**Key Sections**: - -```bash -# Section 1: Initialization & Variables -- variables() -- NSAPP, var_install, INTEGER pattern, etc. - -# Section 2: Storage Management -- storage_selector() -- ensure_storage_selection_for_vars_file() - -# Section 3: Base Settings -- base_settings() # Apply defaults to all var_* -- echo_default() # Display current settings - -# Section 4: Variable Loading -- load_vars_file() # Safe parsing -- _is_whitelisted_key() # Validation -- _sanitize_value() # Threat mitigation - -# Section 5: Defaults Management -- default_var_settings() # Load user defaults -- get_app_defaults_path() # Get app defaults path -- maybe_offer_save_app_defaults() # Save option - -# Section 6: Installation Flow -- install_script() # Main entry point -- advanced_settings() # 20-step wizard -``` - -### Regex Patterns Used - -| Pattern | Purpose | Example Match | -| ---------------------- | --------------------- | ----------------------- | -| `^[0-9]+([.][0-9]+)?$` | Integer validation | `4`, `192.168` | -| `^var_[a-z_]+$` | Variable name | `var_cpu`, `var_ssh` | -| `*'$('*` | Command substitution | `$(whoami)` | -| `*\`\*` | Backtick substitution | `` `cat /etc/passwd` `` | - ---- - -## Appendix: Migration Reference - -### Old Pattern (Deprecated) - -```bash -# ❌ OLD: config-file.func -source config-file.conf # Executes arbitrary code -if [ "$USE_DEFAULTS" = "yes" ]; then - apply_settings_directly -fi -``` - -### New Pattern (Current) - -```bash -# ✅ NEW: load_vars_file() -if load_vars_file "$(get_app_defaults_path)"; then - echo "Settings loaded securely" -fi -``` - -### Function Mapping - -| Old | New | Location | -| ---------------- | --------------------------------- | ---------- | -| `read_config()` | `load_vars_file()` | build.func | -| `write_config()` | `_build_current_app_vars_tmp()` | build.func | -| None | `maybe_offer_save_app_defaults()` | build.func | -| None | `get_app_defaults_path()` | build.func | - ---- - -**End of Technical Reference** diff --git a/docs/api/README.md b/docs/api/README.md deleted file mode 100644 index 4d0194fee..000000000 --- a/docs/api/README.md +++ /dev/null @@ -1,146 +0,0 @@ -# API Integration Documentation (/api) - -This directory contains comprehensive documentation for API integration and the `/api` directory. - -## Overview - -The `/api` directory contains the Proxmox Community Scripts API backend for diagnostic reporting, telemetry, and analytics integration. - -## Key Components - -### Main API Service -Located in `/api/main.go`: -- RESTful API for receiving telemetry data -- Installation statistics tracking -- Error reporting and analytics -- Performance monitoring - -### Integration with Scripts -The API is integrated into all installation scripts via `api.func`: -- Sends installation start/completion events -- Reports errors and exit codes -- Collects anonymous usage statistics -- Enables project analytics - -## Documentation Structure - -API documentation covers: -- API endpoint specifications -- Integration methods -- Data formats and schemas -- Error handling -- Privacy and data handling - -## Key Resources - -- **[misc/api.func/](../misc/api.func/)** - API function library documentation -- **[misc/api.func/README.md](../misc/api.func/README.md)** - Quick reference -- **[misc/api.func/API_FUNCTIONS_REFERENCE.md](../misc/api.func/API_FUNCTIONS_REFERENCE.md)** - Complete function reference - -## API Functions - -The `api.func` library provides: - -### `post_to_api()` -Send container installation data to API. - -**Usage**: -```bash -post_to_api CTID STATUS APP_NAME -``` - -### `post_update_to_api()` -Report application update status. - -**Usage**: -```bash -post_update_to_api CTID APP_NAME VERSION -``` - -### `get_error_description()` -Get human-readable error description from exit code. - -**Usage**: -```bash -ERROR_DESC=$(get_error_description EXIT_CODE) -``` - -## API Integration Points - -### In Container Creation (`ct/AppName.sh`) -- Called by build.func to report container creation -- Sends initial container setup data -- Reports success or failure - -### In Installation Scripts (`install/appname-install.sh`) -- Called at start of installation -- Called on installation completion -- Called on error conditions - -### Data Collected -- Container/VM ID -- Application name and version -- Installation duration -- Success/failure status -- Error codes (if failure) -- Anonymous usage metrics - -## Privacy - -All API data: -- ✅ Anonymous (no personal data) -- ✅ Aggregated for statistics -- ✅ Used only for project improvement -- ✅ No tracking of user identities -- ✅ Can be disabled if desired - -## API Architecture - -``` -Installation Scripts - │ - ├─ Call: api.func functions - │ - └─ POST to: https://api.community-scripts.org - │ - ├─ Receives data - ├─ Validates format - ├─ Stores metrics - └─ Aggregates statistics - │ - └─ Used for: - ├─ Download tracking - ├─ Error trending - ├─ Feature usage stats - └─ Project health monitoring -``` - -## Common API Tasks - -- **Enable API reporting** → Built-in by default, no configuration needed -- **Disable API** → Set `api_disable="yes"` before running -- **View API data** → Visit https://community-scripts.org/stats -- **Report API errors** → [GitHub Issues](https://github.com/community-scripts/ProxmoxVE/issues) - -## Debugging API Issues - -If API calls fail: -1. Check internet connectivity -2. Verify API endpoint availability -3. Review error codes in [EXIT_CODES.md](../EXIT_CODES.md) -4. Check API function logs -5. Report issues on GitHub - -## API Endpoint - -**Base URL**: `https://api.community-scripts.org` - -**Endpoints**: -- `POST /install` - Report container installation -- `POST /update` - Report application update -- `GET /stats` - Public statistics - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team diff --git a/docs/contribution/AI.md b/docs/contribution/AI.md deleted file mode 100644 index 2da59ed8e..000000000 --- a/docs/contribution/AI.md +++ /dev/null @@ -1,868 +0,0 @@ -# 🤖 AI Contribution Guidelines for ProxmoxVE - -> **This documentation is intended for all AI assistants (GitHub Copilot, Claude, ChatGPT, etc.) contributing to this project.** - -## 🎯 Core Principles - -### 1. **Maximum Use of `tools.func` Functions** - -We have an extensive library of helper functions. **NEVER** implement your own solutions when a function already exists! - -### 2. **No Pointless Variables** - -Only create variables when they: - -- Are used multiple times -- Improve readability -- Are intended for configuration - -### 3. **Consistent Script Structure** - -All scripts follow an identical structure. Deviations are not acceptable. - -### 4. **Bare-Metal Installation** - -We do **NOT use Docker** for our installation scripts. All applications are installed directly on the system. - ---- - -## 📁 Script Types and Their Structure - -### CT Script (`ct/AppName.sh`) - -```bash -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: AuthorName (GitHubUsername) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://application-url.com - -APP="AppName" -var_tags="${var_tags:-tag1;tag2;tag3}" -var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-2048}" -var_disk="${var_disk:-8}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - if [[ ! -d /opt/appname ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - if check_for_gh_release "appname" "YourUsername/YourRepo"; then - msg_info "Stopping Service" - systemctl stop appname - msg_ok "Stopped Service" - - msg_info "Backing up Data" - cp -r /opt/appname/data /opt/appname_data_backup - msg_ok "Backed up Data" - - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "appname" "owner/repo" "tarball" "latest" "/opt/appname" - - # Build steps... - - msg_info "Restoring Data" - cp -r /opt/appname_data_backup/. /opt/appname/data - rm -rf /opt/appname_data_backup - msg_ok "Restored Data" - - msg_info "Starting Service" - systemctl start appname - msg_ok "Started Service" - msg_ok "Updated successfully!" - fi - exit -} - -start -build_container -description - -msg_ok "Completed Successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:PORT${CL}" -``` - -### Install Script (`install/AppName-install.sh`) - -```bash -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: AuthorName (GitHubUsername) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://application-url.com - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Dependencies" -$STD apt-get install -y \ - dependency1 \ - dependency2 -msg_ok "Installed Dependencies" - -# Runtime Setup (ALWAYS use our functions!) -NODE_VERSION="22" setup_nodejs -# or -PG_VERSION="16" setup_postgresql -# or -setup_uv -# etc. - -fetch_and_deploy_gh_release "appname" "owner/repo" "tarball" "latest" "/opt/appname" - -msg_info "Setting up Application" -cd /opt/appname -# Build/Setup Schritte... -msg_ok "Set up Application" - -msg_info "Creating Service" -cat </etc/systemd/system/appname.service -[Unit] -Description=AppName Service -After=network.target - -[Service] -Type=simple -User=root -WorkingDirectory=/opt/appname -ExecStart=/path/to/executable -Restart=on-failure -RestartSec=5 - -[Install] -WantedBy=multi-user.target -EOF -systemctl enable -q --now appname -msg_ok "Created Service" - -motd_ssh -customize -cleanup_lxc -``` - ---- - -## 🔧 Available Helper Functions - -### Release Management - -| Function | Description | Example | -| ----------------------------- | ----------------------------------- | ------------------------------------------------------------- | -| `fetch_and_deploy_gh_release` | Fetches and installs GitHub Release | `fetch_and_deploy_gh_release "app" "owner/repo"` | -| `check_for_gh_release` | Checks for new version | `if check_for_gh_release "app" "YourUsername/YourRepo"; then` | - -**Modes for `fetch_and_deploy_gh_release`:** - -```bash -# Tarball/Source (Standard) -fetch_and_deploy_gh_release "appname" "owner/repo" - -# Binary (.deb) -fetch_and_deploy_gh_release "appname" "owner/repo" "binary" - -# Prebuilt Archive -fetch_and_deploy_gh_release "appname" "owner/repo" "prebuild" "latest" "/opt/appname" "filename.tar.gz" - -# Single Binary -fetch_and_deploy_gh_release "appname" "owner/repo" "singlefile" "latest" "/opt/appname" "binary-linux-amd64" -``` - -**Clean Install Flag:** - -```bash -CLEAN_INSTALL=1 fetch_and_deploy_gh_release "appname" "owner/repo" -``` - -### Runtime/Language Setup - -| Function | Variable(s) | Example | -| -------------- | ----------------------------- | ---------------------------------------------------- | -| `setup_nodejs` | `NODE_VERSION`, `NODE_MODULE` | `NODE_VERSION="22" setup_nodejs` | -| `setup_uv` | `PYTHON_VERSION` | `PYTHON_VERSION="3.12" setup_uv` | -| `setup_go` | `GO_VERSION` | `GO_VERSION="1.22" setup_go` | -| `setup_rust` | `RUST_VERSION`, `RUST_CRATES` | `RUST_CRATES="monolith" setup_rust` | -| `setup_ruby` | `RUBY_VERSION` | `RUBY_VERSION="3.3" setup_ruby` | -| `setup_java` | `JAVA_VERSION` | `JAVA_VERSION="21" setup_java` | -| `setup_php` | `PHP_VERSION`, `PHP_MODULES` | `PHP_VERSION="8.3" PHP_MODULES="redis,gd" setup_php` | - -### Database Setup - -| Function | Variable(s) | Example | -| --------------------- | ------------------------------------ | ----------------------------------------------------------- | -| `setup_postgresql` | `PG_VERSION`, `PG_MODULES` | `PG_VERSION="16" setup_postgresql` | -| `setup_postgresql_db` | `PG_DB_NAME`, `PG_DB_USER` | `PG_DB_NAME="mydb" PG_DB_USER="myuser" setup_postgresql_db` | -| `setup_mariadb_db` | `MARIADB_DB_NAME`, `MARIADB_DB_USER` | `MARIADB_DB_NAME="mydb" setup_mariadb_db` | -| `setup_mysql` | `MYSQL_VERSION` | `setup_mysql` | -| `setup_mongodb` | `MONGO_VERSION` | `setup_mongodb` | -| `setup_clickhouse` | - | `setup_clickhouse` | - -### Tools & Utilities - -| Function | Description | -| ------------------- | ---------------------------------- | -| `setup_adminer` | Installs Adminer for DB management | -| `setup_composer` | Install PHP Composer | -| `setup_ffmpeg` | Install FFmpeg | -| `setup_imagemagick` | Install ImageMagick | -| `setup_gs` | Install Ghostscript | -| `setup_hwaccel` | Configure hardware acceleration | - -### Helper Utilities - -| Function | Description | Example | -| ----------------------------- | ---------------------------- | ----------------------------------------- | -| `import_local_ip` | Sets `$LOCAL_IP` variable | `import_local_ip` | -| `ensure_dependencies` | Checks/installs dependencies | `ensure_dependencies curl jq` | -| `install_packages_with_retry` | APT install with retry | `install_packages_with_retry nginx redis` | - ---- - -## ❌ Anti-Patterns (NEVER use!) - -### 1. Pointless Variables - -```bash -# ❌ WRONG - unnecessary variables -APP_NAME="myapp" -APP_DIR="/opt/${APP_NAME}" -APP_USER="root" -APP_PORT="3000" -cd $APP_DIR - -# ✅ CORRECT - use directly -cd /opt/myapp -``` - -### 2. Custom Download Logic - -```bash -# ❌ WRONG - custom wget/curl logic -RELEASE=$(curl -s https://api.github.com/repos/YourUsername/YourRepo/releases/latest | jq -r '.tag_name') -wget https://github.com/YourUsername/YourRepo/archive/${RELEASE}.tar.gz -tar -xzf ${RELEASE}.tar.gz -mv repo-${RELEASE} /opt/myapp - -# ✅ CORRECT - use our function -fetch_and_deploy_gh_release "myapp" "YourUsername/YourRepo" "tarball" "latest" "/opt/myapp" -``` - -### 3. Custom Version-Check Logic - -```bash -# ❌ WRONG - custom version check -CURRENT=$(cat /opt/myapp/version.txt) -LATEST=$(curl -s https://api.github.com/repos/YourUsername/YourRepo/releases/latest | jq -r '.tag_name') -if [[ "$CURRENT" != "$LATEST" ]]; then - # update... -fi - -# ✅ CORRECT - use our function -if check_for_gh_release "myapp" "YourUsername/YourRepo"; then - # update... -fi -``` - -### 4. Docker-based Installation - -```bash -# ❌ WRONG - using Docker -docker pull myapp/myapp:latest -docker run -d --name myapp myapp/myapp:latest - -# ✅ CORRECT - Bare-Metal Installation -fetch_and_deploy_gh_release "myapp" "YourUsername/YourRepo" -npm install && npm run build -``` - -### 5. Custom Runtime Installation - -```bash -# ❌ WRONG - custom Node.js installation -curl -fsSL https://deb.nodesource.com/setup_22.x | bash - -apt install -y nodejs - -# ✅ CORRECT - use our function -NODE_VERSION="22" setup_nodejs -``` - -### 6. Redundant echo Statements - -```bash -# ❌ WRONG - custom logging messages -echo "Installing dependencies..." -apt install -y curl -echo "Done!" - -# ✅ CORRECT - use msg_info/msg_ok -msg_info "Installing Dependencies" -$STD apt install -y curl -msg_ok "Installed Dependencies" -``` - -### 7. Missing $STD Usage - -```bash -# ❌ WRONG - apt without $STD -apt install -y nginx - -# ✅ CORRECT - with $STD for silent output -$STD apt install -y nginx -``` - -### 8. Wrapping `tools.func` Functions in msg Blocks - -```bash -# ❌ WRONG - tools.func functions have their own msg_info/msg_ok! -msg_info "Installing Node.js" -NODE_VERSION="22" setup_nodejs -msg_ok "Installed Node.js" - -msg_info "Updating Application" -CLEAN_INSTALL=1 fetch_and_deploy_gh_release "appname" "owner/repo" "tarball" "latest" "/opt/appname" -msg_ok "Updated Application" - -# ✅ CORRECT - call directly without msg wrapper -NODE_VERSION="22" setup_nodejs - -CLEAN_INSTALL=1 fetch_and_deploy_gh_release "appname" "owner/repo" "tarball" "latest" "/opt/appname" -``` - -**Functions with built-in messages (NEVER wrap in msg blocks):** - -- `fetch_and_deploy_gh_release` -- `check_for_gh_release` -- `setup_nodejs` -- `setup_postgresql` / `setup_postgresql_db` -- `setup_mariadb` / `setup_mariadb_db` -- `setup_mongodb` -- `setup_mysql` -- `setup_ruby` -- `setup_go` -- `setup_java` -- `setup_php` -- `setup_uv` -- `setup_rust` -- `setup_composer` -- `setup_ffmpeg` -- `setup_imagemagick` -- `setup_gs` -- `setup_adminer` -- `setup_hwaccel` - -### 9. Creating Unnecessary System Users - -```bash -# ❌ WRONG - LXC containers run as root, no separate user needed -useradd -m -s /usr/bin/bash appuser -chown -R appuser:appuser /opt/appname -sudo -u appuser npm install - -# ✅ CORRECT - run directly as root -cd /opt/appname -$STD npm install -``` - -### 10. Using `export` in .env Files - -```bash -# ❌ WRONG - export is unnecessary in .env files -cat </opt/appname/.env -export DATABASE_URL=postgres://... -export SECRET_KEY=abc123 -export NODE_ENV=production -EOF - -# ✅ CORRECT - simple KEY=VALUE format (files are sourced with set -a) -cat </opt/appname/.env -DATABASE_URL=postgres://... -SECRET_KEY=abc123 -NODE_ENV=production -EOF -``` - -### 11. Using External Shell Scripts - -```bash -# ❌ WRONG - external script that gets executed -cat <<'EOF' >/opt/appname/install_script.sh -#!/bin/bash -cd /opt/appname -npm install -npm run build -EOF -chmod +x /opt/appname/install_script.sh -$STD bash /opt/appname/install_script.sh -rm -f /opt/appname/install_script.sh - -# ✅ CORRECT - run commands directly -cd /opt/appname -$STD npm install -$STD npm run build -``` - -### 12. Using `sudo` in LXC Containers - -```bash -# ❌ WRONG - sudo is unnecessary in LXC (already root) -sudo -u postgres psql -c "CREATE DATABASE mydb;" -sudo -u appuser npm install - -# ✅ CORRECT - use functions or run directly as root -PG_DB_NAME="mydb" PG_DB_USER="myuser" setup_postgresql_db - -cd /opt/appname -$STD npm install -``` - -### 13. Unnecessary `systemctl daemon-reload` - -```bash -# ❌ WRONG - daemon-reload is only needed when MODIFYING existing services -cat </etc/systemd/system/appname.service -# ... service config ... -EOF -systemctl daemon-reload # Unnecessary for new services! -systemctl enable -q --now appname - -# ✅ CORRECT - new services don't need daemon-reload -cat </etc/systemd/system/appname.service -# ... service config ... -EOF -systemctl enable -q --now appname -``` - -### 14. Creating Custom Credentials Files - -```bash -# ❌ WRONG - custom credentials file is not part of the standard template -msg_info "Saving Credentials" -cat <~/appname.creds -Database User: ${DB_USER} -Database Pass: ${DB_PASS} -EOF -msg_ok "Saved Credentials" - -# ✅ CORRECT - credentials are stored in .env or shown in final message only -# If you use setup_postgresql_db / setup_mariadb_db, a standard ~/[appname].creds is created automatically -``` - -### 15. Wrong Footer Pattern - -```bash -# ❌ WRONG - old cleanup pattern with msg blocks -motd_ssh -customize - -msg_info "Cleaning up" -$STD apt-get -y autoremove -$STD apt-get -y autoclean -msg_ok "Cleaned" - -# ✅ CORRECT - use cleanup_lxc function -motd_ssh -customize -cleanup_lxc -``` - -### 16. Manual Database Creation Instead of Functions - -```bash -# ❌ WRONG - manual database creation -DB_USER="myuser" -DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13) -$STD sudo -u postgres psql -c "CREATE ROLE $DB_USER WITH LOGIN PASSWORD '$DB_PASS';" -$STD sudo -u postgres psql -c "CREATE DATABASE mydb WITH OWNER $DB_USER;" -$STD sudo -u postgres psql -d mydb -c "CREATE EXTENSION IF NOT EXISTS postgis;" - -# ✅ CORRECT - use setup_postgresql_db function -# This sets PG_DB_USER, PG_DB_PASS, PG_DB_NAME automatically -PG_DB_NAME="mydb" PG_DB_USER="myuser" PG_DB_EXTENSIONS="postgis" setup_postgresql_db -``` - -### 17. Writing Files Without Heredocs - -```bash -# ❌ WRONG - echo / printf / tee -echo "# Config" > /opt/app/config.yml -echo "port: 3000" >> /opt/app/config.yml - -printf "# Config\nport: 3000\n" > /opt/app/config.yml -cat config.yml | tee /opt/app/config.yml -``` - -```bash -# ✅ CORRECT - always use a single heredoc -cat </opt/app/config.yml -# Config -port: 3000 -EOF -``` - ---- - -## 📝 Important Rules - -### Variable Declarations (CT Script) - -```bash -# Standard declarations (ALWAYS present) -APP="AppName" -var_tags="${var_tags:-tag1;tag2}" -var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-2048}" -var_disk="${var_disk:-8}" -var_os="${var_os:-debian}" -var_version="${var_version:-13}" -var_unprivileged="${var_unprivileged:-1}" -``` - -### Update-Script Pattern - -```bash -function update_script() { - header_info - check_container_storage - check_container_resources - - # 1. Check if installation exists - if [[ ! -d /opt/appname ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - # 2. Check for update - if check_for_gh_release "appname" "YourUsername/YourRepo"; then - # 3. Stop service - msg_info "Stopping Service" - systemctl stop appname - msg_ok "Stopped Service" - - # 4. Backup data (if present) - msg_info "Backing up Data" - cp -r /opt/appname/data /opt/appname_data_backup - msg_ok "Backed up Data" - - # 5. Perform clean install - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "appname" "owner/repo" "tarball" "latest" "/opt/appname" - - # 6. Rebuild (if needed) - cd /opt/appname - $STD npm install - $STD npm run build - - # 7. Restore data - msg_info "Restoring Data" - cp -r /opt/appname_data_backup/. /opt/appname/data - rm -rf /opt/appname_data_backup - msg_ok "Restored Data" - - # 8. Start service - msg_info "Starting Service" - systemctl start appname - msg_ok "Started Service" - msg_ok "Updated successfully!" - fi - exit # IMPORTANT: Always end with exit! -} -``` - -### Systemd Service Pattern - -```bash -msg_info "Creating Service" -cat </etc/systemd/system/appname.service -[Unit] -Description=AppName Service -After=network.target - -[Service] -Type=simple -User=root -WorkingDirectory=/opt/appname -Environment=NODE_ENV=production -ExecStart=/usr/bin/node /opt/appname/server.js -Restart=on-failure -RestartSec=5 - -[Install] -WantedBy=multi-user.target -EOF -systemctl enable -q --now appname -msg_ok "Created Service" -``` - -### Installation Script Footer - -```bash -# ALWAYS at the end of the install script: -motd_ssh -customize -cleanup_lxc -``` - ---- - -## 📖 Reference: Good Example Scripts - -Look at these recent well-implemented applications as reference: - -### Container Scripts (Latest 10) - -- [ct/thingsboard.sh](../ct/thingsboard.sh) - IoT platform with proper update_script -- [ct/unifi-os-server.sh](../ct/unifi-os-server.sh) - Complex setup with podman -- [ct/trip.sh](../ct/trip.sh) - Simple Ruby app -- [ct/fladder.sh](../ct/fladder.sh) - Media app with database -- [ct/qui.sh](../ct/qui.sh) - Lightweight utility -- [ct/kutt.sh](../ct/kutt.sh) - Node.js with PostgreSQL -- [ct/flatnotes.sh](../ct/flatnotes.sh) - Python notes app -- [ct/investbrain.sh](../ct/investbrain.sh) - Finance app -- [ct/gwn-manager.sh](../ct/gwn-manager.sh) - Network management -- [ct/sportarr.sh](../ct/sportarr.sh) - Specialized \*Arr variant - -### Install Scripts (Latest) - -- [install/unifi-os-server-install.sh](../install/unifi-os-server-install.sh) - Complex setup with API integration -- [install/trip-install.sh](../install/trip-install.sh) - Rails application setup -- [install/mail-archiver-install.sh](../install/mail-archiver-install.sh) - Email-related service - -**Key things to notice:** - -- Proper error handling with `catch_errors` -- Use of `check_for_gh_release` and `fetch_and_deploy_gh_release` -- Correct backup/restore patterns in `update_script` -- Footer always ends with `motd_ssh`, `customize`, `cleanup_lxc` -- JSON metadata files created for each app - ---- - -## � JSON Metadata Files - -Every application requires a JSON metadata file in `frontend/public/json/.json`. - -### JSON Structure - -```json -{ - "name": "AppName", - "slug": "appname", - "categories": [1], - "date_created": "2026-01-16", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.appname.com/", - "website": "https://appname.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/appname.webp", - "config_path": "/opt/appname/.env", - "description": "Short description of the application and its purpose.", - "install_methods": [ - { - "type": "default", - "script": "ct/appname.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} -``` - -### Required Fields - -| Field | Type | Description | -| --------------------- | ------- | -------------------------------------------------- | -| `name` | string | Display name of the application | -| `slug` | string | Lowercase, no spaces, used for filenames | -| `categories` | array | Category ID(s) - see category list below | -| `date_created` | string | Creation date (YYYY-MM-DD) | -| `type` | string | `ct` for container, `vm` for virtual machine | -| `updateable` | boolean | Whether update_script is implemented | -| `privileged` | boolean | Whether container needs privileged mode | -| `interface_port` | number | Primary web interface port (or `null`) | -| `documentation` | string | Link to official docs | -| `website` | string | Link to official website | -| `logo` | string | URL to application logo (preferably selfhst icons) | -| `config_path` | string | Path to main config file (or empty string) | -| `description` | string | Brief description of the application | -| `install_methods` | array | Installation configurations | -| `default_credentials` | object | Default username/password (or null) | -| `notes` | array | Additional notes/warnings | - -### Categories - -| ID | Category | -| --- | ------------------------- | -| 0 | Miscellaneous | -| 1 | Proxmox & Virtualization | -| 2 | Operating Systems | -| 3 | Containers & Docker | -| 4 | Network & Firewall | -| 5 | Adblock & DNS | -| 6 | Authentication & Security | -| 7 | Backup & Recovery | -| 8 | Databases | -| 9 | Monitoring & Analytics | -| 10 | Dashboards & Frontends | -| 11 | Files & Downloads | -| 12 | Documents & Notes | -| 13 | Media & Streaming | -| 14 | \*Arr Suite | -| 15 | NVR & Cameras | -| 16 | IoT & Smart Home | -| 17 | ZigBee, Z-Wave & Matter | -| 18 | MQTT & Messaging | -| 19 | Automation & Scheduling | -| 20 | AI / Coding & Dev-Tools | -| 21 | Webservers & Proxies | -| 22 | Bots & ChatOps | -| 23 | Finance & Budgeting | -| 24 | Gaming & Leisure | -| 25 | Business & ERP | - -### Notes Format - -```json -"notes": [ - { - "text": "Change the default password after first login!", - "type": "warning" - }, - { - "text": "Requires at least 4GB RAM for optimal performance.", - "type": "info" - } -] -``` - -**Note types:** `info`, `warning`, `error` - -### Examples with Credentials - -```json -"default_credentials": { - "username": "admin", - "password": "admin" -} -``` - -Or no credentials: - -```json -"default_credentials": { - "username": null, - "password": null -} -``` - ---- - -## 🔍 Checklist Before PR Creation - -- [ ] No Docker installation used -- [ ] `fetch_and_deploy_gh_release` used for GitHub releases -- [ ] `check_for_gh_release` used for update checks -- [ ] `setup_*` functions used for runtimes (nodejs, postgresql, etc.) -- [ ] **`tools.func` functions NOT wrapped in msg_info/msg_ok blocks** -- [ ] No redundant variables (only when used multiple times) -- [ ] `$STD` before all apt/npm/build commands -- [ ] `msg_info`/`msg_ok`/`msg_error` for logging (only for custom code) -- [ ] Correct script structure followed (see templates) -- [ ] Update function present and functional (CT scripts) -- [ ] Data backup implemented in update function (if applicable) -- [ ] `motd_ssh`, `customize`, `cleanup_lxc` at the end of install scripts -- [ ] No custom download/version-check logic -- [ ] All links point to `community-scripts/ProxmoxVE` (not `ProxmoxVED`!) -- [ ] JSON metadata file created in `frontend/public/json/.json` -- [ ] Category IDs are valid (0-25) -- [ ] Default OS version is Debian 13 or newer (unless special requirement) -- [ ] Default resources are reasonable for the application - ---- - -## 💡 Tips for AI Assistants - -1. **ALWAYS search `tools.func` first** before implementing custom solutions -2. **Use recent scripts as reference** (Thingsboard, UniFi OS, Trip, Flatnotes, etc.) -3. **Ask when uncertain** instead of introducing wrong patterns -4. **Test via GitHub** - push to your fork and test with curl (not local bash) - ```bash - bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)" - # Wait 10-30 seconds after pushing - GitHub takes time to update files - ``` -5. **Consistency > Creativity** - follow established patterns strictly -6. **Check the templates** - they show the correct structure -7. **Don't wrap tools.func functions** - they handle their own msg_info/msg_ok output -8. **Minimal variables** - only create variables that are truly reused multiple times -9. **Always use $STD** - ensures silent/non-interactive execution -10. **Reference good examples** - look at recent additions in each category - ---- - -## 🍒 Important: Cherry-Picking Your Files for PR Submission - -⚠️ **CRITICAL**: When you submit your PR, you must use git cherry-pick to send ONLY your 3-4 files! - -Why? Because `setup-fork.sh` modifies 600+ files to update links. If you commit all changes, your PR will be impossible to merge. - -**See**: [README.md - Cherry-Pick Section](README.md#-cherry-pick-submitting-only-your-changes) for complete instructions on: - -- Creating a clean submission branch -- Cherry-picking only your files (ct/myapp.sh, install/myapp-install.sh, frontend/public/json/myapp.json) -- Verifying your PR has only 3 file changes (not 600+) - -**Quick reference**: - -```bash -# Create clean branch from upstream -git fetch upstream -git checkout -b submit/myapp upstream/main - -# Cherry-pick your commit(s) or manually add your 3-4 files -# Then push to your fork and create PR -``` - ---- - -## 📚 Further Documentation - -- [CONTRIBUTING.md](CONTRIBUTING.md) - General contribution guidelines -- [GUIDE.md](GUIDE.md) - Detailed developer documentation -- [HELPER_FUNCTIONS.md](HELPER_FUNCTIONS.md) - Complete tools.func reference -- [README.md](README.md) - Cherry-pick guide and workflow instructions -- [../TECHNICAL_REFERENCE.md](../TECHNICAL_REFERENCE.md) - Technical deep dive -- [../EXIT_CODES.md](../EXIT_CODES.md) - Exit code reference -- [templates_ct/](templates_ct/) - CT script templates -- [templates_install/](templates_install/) - Install script templates -- [templates_json/](templates_json/) - JSON metadata templates diff --git a/docs/contribution/CODE-AUDIT.md b/docs/contribution/CODE-AUDIT.md deleted file mode 100644 index d1aedf275..000000000 --- a/docs/contribution/CODE-AUDIT.md +++ /dev/null @@ -1,41 +0,0 @@ -# 🧪 Code Audit: LXC Script Flow - -This guide explains the current execution flow and what to verify during reviews. - -## Execution Flow (CT + Install) - -1. `ct/appname.sh` runs on the Proxmox host and sources `misc/build.func`. -2. `build.func` orchestrates prompts, container creation, and invokes the install script. -3. Inside the container, `misc/install.func` exposes helper functions via `$FUNCTIONS_FILE_PATH`. -4. `install/appname-install.sh` performs the application install. -5. The CT script prints the completion message. - -## Audit Checklist - -### CT Script (ct/) - -- Sources `misc/build.func` from `community-scripts/ProxmoxVE/main` (setup-fork.sh updates for forks). -- Uses `check_for_gh_release` + `fetch_and_deploy_gh_release` for updates. -- No Docker-based installs. - -### Install Script (install/) - -- Sources `$FUNCTIONS_FILE_PATH`. -- Uses `tools.func` helpers (setup\_\*). -- Ends with `motd_ssh`, `customize`, `cleanup_lxc`. - -### JSON Metadata - -- File in `frontend/public/json/.json` matches template schema. - -### Testing - -- Test via curl from your fork (CT script only). -- Wait 10-30 seconds after push. - -## References - -- `docs/contribution/templates_ct/AppName.sh` -- `docs/contribution/templates_install/AppName-install.sh` -- `docs/contribution/templates_json/AppName.json` -- `docs/contribution/GUIDE.md` diff --git a/docs/contribution/CONTRIBUTING.md b/docs/contribution/CONTRIBUTING.md deleted file mode 100644 index 619204378..000000000 --- a/docs/contribution/CONTRIBUTING.md +++ /dev/null @@ -1,178 +0,0 @@ -# Community Scripts Contribution Guide - -## **Welcome to the communty-scripts Repository!** - -📜 These documents outline the essential coding standards for all our scripts and JSON files. Adhering to these standards ensures that our codebase remains consistent, readable, and maintainable. By following these guidelines, we can improve collaboration, reduce errors, and enhance the overall quality of our project. - -### Why Coding Standards Matter - -Coding standards are crucial for several reasons: - -1. **Consistency**: Consistent code is easier to read, understand, and maintain. It helps new team members quickly get up to speed and reduces the learning curve. -2. **Readability**: Clear and well-structured code is easier to debug and extend. It allows developers to quickly identify and fix issues. -3. **Maintainability**: Code that follows a standard structure is easier to refactor and update. It ensures that changes can be made with minimal risk of introducing new bugs. -4. **Collaboration**: When everyone follows the same standards, it becomes easier to collaborate on code. It reduces friction and misunderstandings during code reviews and merges. - -### Scope of These Documents - -These documents cover the coding standards for the following types of files in our project: - -- **`install/$AppName-install.sh` Scripts**: These scripts are responsible for the installation of applications. -- **`ct/$AppName.sh` Scripts**: These scripts handle the creation and updating of containers. -- **`json/$AppName.json`**: These files store structured data and are used for the website. - -Each section provides detailed guidelines on various aspects of coding, including shebang usage, comments, variable naming, function naming, indentation, error handling, command substitution, quoting, script structure, and logging. Additionally, examples are provided to illustrate the application of these standards. - -By following the coding standards outlined in this document, we ensure that our scripts and JSON files are of high quality, making our project more robust and easier to manage. Please refer to this guide whenever you create or update scripts and JSON files to maintain a high standard of code quality across the project. 📚🔍 - -Let's work together to keep our codebase clean, efficient, and maintainable! 💪🚀 - -## Getting Started - -Before contributing, please ensure that you have the following setup: - -1. **Visual Studio Code** (recommended for script development) -2. **Recommended VS Code Extensions:** - - [Shell Syntax](https://marketplace.visualstudio.com/items?itemName=bmalehorn.shell-syntax) - - [ShellCheck](https://marketplace.visualstudio.com/items?itemName=timonwong.shellcheck) - - [Shell Format](https://marketplace.visualstudio.com/items?itemName=foxundermoon.shell-format) - -### Important Notes - -- Use [AppName.sh](https://github.com/community-scripts/ProxmoxVE/blob/main/docs/contribution/templates_ct/AppName.sh) and [AppName-install.sh](https://github.com/community-scripts/ProxmoxVE/blob/main/docs/contribution/templates_install/AppName-install.sh) as templates when creating new scripts. - ---- - -# 🚀 The Application Script (ct/AppName.sh) - -- You can find all coding standards, as well as the structure for this file [here](https://github.com/community-scripts/ProxmoxVE/blob/main/docs/contribution/templates_ct/AppName.md). -- These scripts are responsible for container creation, setting the necessary variables and handling the update of the application once installed. - ---- - -# 🛠 The Installation Script (install/AppName-install.sh) - -- You can find all coding standards, as well as the structure for this file [here](https://github.com/community-scripts/ProxmoxVE/blob/main/docs/contribution/templates_install/AppName-install.md). -- These scripts are responsible for the installation of the application. - ---- - -## 🚀 Building Your Own Scripts - -Start with the [template script](https://github.com/community-scripts/ProxmoxVE/blob/main/docs/contribution/templates_install/AppName-install.sh) - ---- - -## 🤝 Contribution Process - -### 1. Fork the repository - -Fork to your GitHub account - -### 2. Clone your fork on your local environment - -```bash -git clone https://github.com/yourUserName/ForkName -``` - -### 3. Create a new branch - -```bash -git switch -c your-feature-branch -``` - -### 4. Run setup-fork.sh to auto-configure your fork - -```bash -bash docs/contribution/setup-fork.sh --full -``` - -This script automatically: - -- Detects your GitHub username -- Updates ALL curl URLs to point to your fork (for testing) -- Creates `.git-setup-info` with your config -- Backs up all modified files (\*.backup) - -**IMPORTANT**: This modifies 600+ files! Use cherry-pick when submitting your PR (see below). - -### 5. Commit ONLY your new application files - -```bash -git commit -m "Your commit message" -``` - -### 5. Push to your fork - -```bash -git push origin your-feature-branch -``` - -### 6. Cherry-Pick: Submit Only Your Files for PR - -⚠️ **IMPORTANT**: setup-fork.sh modified 600+ files. You MUST only submit your 3 new files! - -See [README.md - Cherry-Pick Guide](README.md#-cherry-pick-submitting-only-your-changes) for step-by-step instructions. - -Quick version: - -```bash -# Create clean branch from upstream -git fetch upstream -git checkout -b submit/myapp upstream/main - -# Copy only your files -cp ../your-work-branch/ct/myapp.sh ct/myapp.sh -cp ../your-work-branch/install/myapp-install.sh install/myapp-install.sh -cp ../your-work-branch/frontend/public/json/myapp.json frontend/public/json/myapp.json - -# Commit and verify -git add ct/myapp.sh install/myapp-install.sh frontend/public/json/myapp.json -git commit -m "feat: add MyApp" -git diff upstream/main --name-only # Should show ONLY your 3 files - -# Push and create PR -git push origin submit/myapp -``` - -### 7. Create a Pull Request - -Open a Pull Request from `submit/myapp` → `community-scripts/ProxmoxVE/main`. - -Verify the PR shows ONLY these 3 files: - -- `ct/myapp.sh` -- `install/myapp-install.sh` -- `frontend/public/json/myapp.json` - ---- - -# 🛠️ Developer Mode & Debugging - -When building or testing scripts, you can use the `dev_mode` variable to enable powerful debugging features. These flags can be combined (comma-separated). - -**Usage**: -```bash -# Example: Run with trace and keep the container even if it fails -dev_mode="trace,keep" bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/myapp.sh)" -``` - -### Available Flags: - -| Flag | Description | -| :--- | :--- | -| `trace` | Enables `set -x` for maximum verbosity during execution. | -| `keep` | Prevents the container from being deleted if the build fails. | -| `pause` | Pauses execution at key points (e.g., before customization). | -| `breakpoint` | Allows hardcoded `breakpoint` calls in scripts to drop to a shell. | -| `logs` | Saves detailed build logs to `/var/log/community-scripts/`. | -| `dryrun` | Bypasses actual container creation (limited support). | -| `motd` | Forces an update of the Message of the Day (MOTD). | - ---- - -## 📚 Pages - -- [CT Template: AppName.sh](https://github.com/community-scripts/ProxmoxVE/blob/main/docs/contribution/templates_ct/AppName.sh) -- [Install Template: AppName-install.sh](https://github.com/community-scripts/ProxmoxVE/blob/main/docs/contribution/templates_install/AppName-install.sh) -- [JSON Template: AppName.json](https://github.com/community-scripts/ProxmoxVE/blob/main/docs/contribution/templates_json/AppName.json) diff --git a/docs/contribution/FORK_SETUP.md b/docs/contribution/FORK_SETUP.md deleted file mode 100644 index 0a25a30c7..000000000 --- a/docs/contribution/FORK_SETUP.md +++ /dev/null @@ -1,231 +0,0 @@ -# 🍴 Fork Setup Guide - -**Just forked ProxmoxVE? Run this first!** - -## Quick Start - -```bash -# Clone your fork -git clone https://github.com/YOUR_USERNAME/ProxmoxVE.git -cd ProxmoxVE - -# Run setup script (auto-detects your username from git) -bash docs/contribution/setup-fork.sh --full -``` - -That's it! ✅ - ---- - -## What Does It Do? - -The `setup-fork.sh` script automatically: - -1. **Detects** your GitHub username from git config -2. **Updates ALL hardcoded links** to point to your fork: - - Documentation links pointing to `community-scripts/ProxmoxVE` - - **Curl download URLs** in scripts (e.g., `curl ... github.com/community-scripts/ProxmoxVE/main/...`) -3. **Creates** `.git-setup-info` with your configuration details -4. **Backs up** all modified files (\*.backup for safety) - -### Why Updating Curl Links Matters - -Your scripts contain `curl` commands that download dependencies from GitHub (build.func, tools.func, etc.): - -```bash -# First line of ct/myapp.sh -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) -``` - -**WITHOUT setup-fork.sh:** - -- Script URLs still point to `community-scripts/ProxmoxVE/main` -- If you test locally with `bash ct/myapp.sh`, you're testing local files, but the script's curl commands would download from **upstream** repo -- Your modifications aren't actually being tested via the curl commands! ❌ - -**AFTER setup-fork.sh:** - -- Script URLs are updated to `YourUsername/ProxmoxVE/main` -- When you test via curl from GitHub: `bash -c "$(curl ... YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)"`, it downloads from **your fork** -- The script's curl commands also point to your fork, so you're actually testing your changes! ✅ -- ⏱️ **Important:** GitHub takes 10-30 seconds to recognize pushed files - wait before testing! - -```bash -# Example: What setup-fork.sh changes - -# BEFORE (points to upstream): -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) - -# AFTER (points to your fork): -source <(curl -fsSL https://raw.githubusercontent.com/john/ProxmoxVE/main/misc/build.func) -``` - ---- - -## Usage - -### Auto-Detect (Recommended) - -```bash -bash docs/contribution/setup-fork.sh --full -``` - -Automatically reads your GitHub username from `git remote origin url` - -### Specify Username - -```bash -bash docs/contribution/setup-fork.sh --full john -``` - -Updates links to `github.com/john/ProxmoxVE` - -### Custom Repository Name - -```bash -bash docs/contribution/setup-fork.sh --full john my-fork -``` - -Updates links to `github.com/john/my-fork` - ---- - -## What Gets Updated? - -The script updates hardcoded links in these areas when using `--full`: - -- `ct/`, `install/`, `vm/` scripts -- `misc/` function libraries -- `docs/` (including `docs/contribution/`) -- Code examples in documentation - ---- - -## After Setup - -1. **Review changes** - - ```bash - git diff docs/ - ``` - -2. **Read git workflow tips** - - ```bash - cat .git-setup-info - ``` - -3. **Start contributing** - - ```bash - git checkout -b feature/my-app - # Make your changes... - git commit -m "feat: add my awesome app" - ``` - -4. **Follow the guide** - ```bash - cat docs/contribution/GUIDE.md - ``` - ---- - -## Common Workflows - -### Keep Your Fork Updated - -```bash -# Add upstream if you haven't already -git remote add upstream https://github.com/community-scripts/ProxmoxVE.git - -# Get latest from upstream -git fetch upstream -git rebase upstream/main -git push origin main -``` - -### Create a Feature Branch - -```bash -git checkout -b feature/docker-improvements -# Make changes... -git push origin feature/docker-improvements -# Then create PR on GitHub -``` - -### Sync Before Contributing - -```bash -git fetch upstream -git rebase upstream/main -git push -f origin main # Update your fork's main -git checkout -b feature/my-feature -``` - ---- - -## Troubleshooting - -### "Git is not installed" or "not a git repository" - -```bash -# Make sure you cloned the repo first -git clone https://github.com/YOUR_USERNAME/ProxmoxVE.git -cd ProxmoxVE -bash docs/contribution/setup-fork.sh --full -``` - -### "Could not auto-detect GitHub username" - -```bash -# Your git origin URL isn't set up correctly -git remote -v -# Should show your fork URL, not community-scripts - -# Fix it: -git remote set-url origin https://github.com/YOUR_USERNAME/ProxmoxVE.git -bash docs/contribution/setup-fork.sh --full -``` - -### "Permission denied" - -```bash -# Make script executable -chmod +x docs/contribution/setup-fork.sh -bash docs/contribution/setup-fork.sh --full -``` - -### Reverted Changes by Accident? - -```bash -# Backups are created automatically -git checkout docs/*.backup -# Or just re-run setup-fork.sh -bash docs/contribution/setup-fork.sh --full -``` - ---- - -## Next Steps - -1. ✅ Run `bash docs/contribution/setup-fork.sh --full` -2. 📖 Read [docs/contribution/GUIDE.md](GUIDE.md) -3. 🍴 Choose your contribution path: - - **Containers** → [docs/ct/README.md](docs/ct/README.md) - - **Installation** → [docs/install/README.md](docs/install/README.md) - - **VMs** → [docs/vm/README.md](docs/vm/README.md) - - **Tools** → [docs/tools/README.md](docs/tools/README.md) -4. 💻 Create your feature branch and contribute! - ---- - -## Questions? - -- **Fork Setup Issues?** → See [Troubleshooting](#troubleshooting) above -- **How to Contribute?** → [docs/contribution/GUIDE.md](GUIDE.md) -- **Git Workflows?** → `cat .git-setup-info` -- **Project Structure?** → [docs/README.md](docs/README.md) - ---- - -## Happy Contributing! 🚀 diff --git a/docs/contribution/GUIDE.md b/docs/contribution/GUIDE.md deleted file mode 100644 index a014b3cdb..000000000 --- a/docs/contribution/GUIDE.md +++ /dev/null @@ -1,1077 +0,0 @@ -# 🎯 **ProxmoxVE Contribution Guide** - -**Everything you need to know to contribute to ProxmoxVE** - -> **Last Updated**: December 2025 -> **Difficulty**: Beginner → Advanced -> **Time to Setup**: 15 minutes -> **Time to Contribute**: 1-3 hours - ---- - -## 📋 Table of Contents - -- [Quick Start](#quick-start) -- [Repository Structure](#repository-structure) -- [Development Setup](#development-setup) -- [Creating New Applications](#creating-new-applications) -- [Updating Existing Applications](#updating-existing-applications) -- [Code Standards](#code-standards) -- [Testing Your Changes](#testing-your-changes) -- [Submitting a Pull Request](#submitting-a-pull-request) -- [Troubleshooting](#troubleshooting) -- [FAQ](#faq) - ---- - -## Quick Start - -### Setup Your Fork (First Time Only) - -```bash -# 1. Fork the repository on GitHub -# Visit: https://github.com/community-scripts/ProxmoxVE -# Click: Fork (top right) - -# 2. Clone your fork -git clone https://github.com/YOUR_USERNAME/ProxmoxVE.git -cd ProxmoxVE - -# 3. Run fork setup script (automatically configures everything) -bash docs/contribution/setup-fork.sh --full -# --full updates ct/, install/, vm/, docs/, misc/ links for fork testing - -# 4. Read the git workflow tips -cat .git-setup-info -``` - -### 60 Seconds to First Contribution - -```bash -# 1. Create feature branch -git checkout -b add/my-awesome-app - -# 2. Create application scripts from templates -cp docs/contribution/templates_ct/AppName.sh ct/myapp.sh -cp docs/contribution/templates_install/AppName-install.sh install/myapp-install.sh -cp docs/contribution/templates_json/AppName.json frontend/public/json/myapp.json - -# 3. Edit your scripts -nano ct/myapp.sh -nano install/myapp-install.sh -nano frontend/public/json/myapp.json - -# 4. Commit and push to your fork -git add ct/myapp.sh install/myapp-install.sh frontend/public/json/myapp.json -git commit -m "feat: add MyApp container and install scripts" -git push origin add/my-awesome-app - -# 5. Test via curl from your fork (GitHub may take 10-30 seconds) -bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)" - -# 6. Use cherry-pick to submit only your files (see Cherry-Pick section) -# DO NOT submit the 600+ files modified by setup-fork.sh! - -# 7. Open Pull Request on GitHub -# Create PR from: your-fork/add/my-awesome-app → community-scripts/ProxmoxVE/main -``` - -**💡 Tip**: See `../FORK_SETUP.md` for detailed fork setup and troubleshooting - ---- - -## Repository Structure - -### Top-Level Organization - -``` -ProxmoxVE/ -├── ct/ # 🏗️ Container creation scripts (host-side) -│ ├── pihole.sh -│ ├── docker.sh -│ └── ... (40+ applications) -│ -├── install/ # 🛠️ Installation scripts (container-side) -│ ├── pihole-install.sh -│ ├── docker-install.sh -│ └── ... (40+ applications) -│ -├── vm/ # 💾 VM creation scripts -│ ├── ubuntu2404-vm.sh -│ ├── debian-vm.sh -│ └── ... (15+ operating systems) -│ -├── misc/ # 📦 Shared function libraries -│ ├── build.func # Main orchestrator (3800+ lines) -│ ├── core.func # UI/utilities -│ ├── error_handler.func # Error management -│ ├── tools.func # Tool installation -│ ├── install.func # Container setup -│ ├── cloud-init.func # VM configuration -│ ├── api.func # Telemetry -│ ├── alpine-install.func # Alpine-specific -│ └── alpine-tools.func # Alpine tools -│ -├── docs/ # 📚 Documentation -│ ├── ct/DETAILED_GUIDE.md # Container script guide -│ ├── install/DETAILED_GUIDE.md # Install script guide -│ └── contribution/README.md # Contribution overview -│ -├── tools/ # 🔧 Proxmox management tools -│ └── pve/ -│ -└── README.md # Project overview -``` - -### Naming Conventions - -``` -Container Script: ct/AppName.sh -Installation Script: install/appname-install.sh -Defaults: defaults/appname.vars -Update Script: /usr/bin/update (inside container) - -Examples: - ct/pihole.sh → install/pihole-install.sh - ct/docker.sh → install/docker-install.sh - ct/nextcloud-vm.sh → install/nextcloud-vm-install.sh -``` - -**Rules**: - -- Container script name: **Title Case** (PiHole, Docker, NextCloud) -- Install script name: **lowercase** with **hyphens** (pihole-install, docker-install) -- Must match: `ct/AppName.sh` ↔ `install/appname-install.sh` -- Directory names: lowercase (always) -- Variable names: lowercase (except APP constant) - ---- - -## Development Setup - -### Prerequisites - -1. **Proxmox VE 8.0+** with at least: - - 4 CPU cores - - 8 GB RAM - - 50 GB disk space - - Ubuntu 20.04 / Debian 11+ on host - -2. **Git** installed - - ```bash - apt-get install -y git - ``` - -3. **Text Editor** (VS Code recommended) - ```bash - # VS Code extensions: - # - Bash IDE - # - Shellcheck - # - Markdown All in One - ``` - -### Local Development Workflow - -#### Option A: Development Fork (Recommended) - -```bash -# 1. Fork on GitHub (one-time) -# Visit: https://github.com/community-scripts/ProxmoxVE -# Click: Fork - -# 2. Clone your fork -git clone https://github.com/YOUR_USERNAME/ProxmoxVE.git -cd ProxmoxVE - -# 3. Add upstream remote for updates -git remote add upstream https://github.com/community-scripts/ProxmoxVE.git - -# 4. Create feature branch -git checkout -b feat/add-myapp - -# 5. Make changes -# ... edit files ... - -# 6. Keep fork updated -git fetch upstream -git rebase upstream/main - -# 7. Push and open PR -git push origin feat/add-myapp -``` - -#### Option B: Testing on a Proxmox Host (still via curl) - -```bash -# 1. SSH into Proxmox host -ssh root@192.168.1.100 - -# 2. Test via curl from your fork (CT script only) -bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)" -# ⏱️ Wait 10-30 seconds after pushing - GitHub takes time to update -``` - -> **Note:** Do not edit URLs manually or run install scripts directly. The CT script calls the install script inside the container. - -#### Option C: Using Curl (Recommended for Real Testing) - -```bash -# Always test via curl from your fork (GitHub takes 10-30 seconds after push) -git push origin feature/myapp -bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)" -# This tests the actual GitHub URLs, not local files -``` - -#### Option D: Static Checks (Without Proxmox) - -```bash -# You can validate syntax and linting locally (limited) -# Note: This does NOT replace real Proxmox testing - -# Run ShellCheck -shellcheck ct/myapp.sh -shellcheck install/myapp-install.sh - -# Syntax check -bash -n ct/myapp.sh -bash -n install/myapp-install.sh -``` - ---- - -## Creating New Applications - -### Step 1: Choose Your Template - -**For Simple Web Apps** (Node.js, Python, PHP): - -```bash -cp ct/example.sh ct/myapp.sh -cp install/example-install.sh install/myapp-install.sh -``` - -**For Database Apps** (PostgreSQL, MariaDB, MongoDB): - -Use the standard templates and the database helpers from `tools.func` (no Docker). - -**For Alpine Linux Apps** (lightweight): - -```bash -# Use ct/alpine.sh as reference -# Edit install script to use Alpine packages (apk not apt) -``` - -### Step 2: Update Container Script - -**File**: `ct/myapp.sh` - -```bash -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/misc/build.func) - -# Update these: -APP="MyAwesomeApp" # Display name -var_tags="category;tag2;tag3" # Max 3-4 tags -var_cpu="2" # Realistic CPU cores -var_ram="2048" # Min RAM needed (MB) -var_disk="10" # Min disk (GB) -var_os="debian" # OS type -var_version="12" # OS version -var_unprivileged="1" # Security (1=unprivileged) - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - if [[ ! -d /opt/myapp ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - if check_for_gh_release "myapp" "owner/repo"; then - msg_info "Stopping Service" - systemctl stop myapp - msg_ok "Stopped Service" - - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "myapp" "owner/repo" "tarball" "latest" "/opt/myapp" - - # ... update logic (migrations, rebuilds, etc.) ... - - msg_info "Starting Service" - systemctl start myapp - msg_ok "Started Service" - msg_ok "Updated successfully!" - fi - exit -} - -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:PORT${CL}" -``` - -**Checklist**: - -- [ ] APP variable matches filename -- [ ] var_tags semicolon-separated (no spaces) -- [ ] Realistic CPU/RAM/disk values -- [ ] update_script() implemented -- [ ] Correct OS and version -- [ ] Success message with access URL - -### Step 3: Update Installation Script - -**File**: `install/myapp-install.sh` - -```bash -#!/usr/bin/env bash -# Copyright (c) 2021-2026 community-scripts ORG -# Author: YourUsername -# License: MIT -# Source: https://github.com/example/myapp - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Dependencies" -$STD apt-get install -y \ - build-essential -msg_ok "Installed Dependencies" - -NODE_VERSION="22" setup_nodejs - -fetch_and_deploy_gh_release "myapp" "owner/repo" "tarball" "latest" "/opt/myapp" - -motd_ssh -customize -cleanup_lxc -``` - -**Checklist**: - -- [ ] Functions loaded from `$FUNCTIONS_FILE_PATH` -- [ ] All installation phases present (deps, tools, app, config, cleanup) -- [ ] Using `$STD` for output suppression -- [ ] Version file saved -- [ ] Final cleanup with `cleanup_lxc` -- [ ] No hardcoded versions (use GitHub API) - -### Step 4: Create ASCII Header (Optional) - -**File**: `ct/headers/myapp` - -``` -╔═══════════════════════════════════════╗ -║ ║ -║ 🎉 MyAwesomeApp 🎉 ║ -║ ║ -║ Your app is being installed... ║ -║ ║ -╚═══════════════════════════════════════╝ -``` - -Save in: `ct/headers/myapp` (no extension) - -### Step 5: Create Defaults File (Optional) - -**File**: `defaults/myapp.vars` - -```bash -# Default configuration for MyAwesomeApp -var_cpu=4 -var_ram=4096 -var_disk=15 -var_hostname=myapp-container -var_timezone=UTC -``` - ---- - -## Updating Existing Applications - -### Step 1: Identify What Changed - -```bash -# Check logs or GitHub releases -curl -fsSL https://api.github.com/repos/app/repo/releases/latest | jq '.' - -# Review breaking changes -# Update dependencies if needed -``` - -### Step 2: Update Installation Script - -```bash -# Edit: install/existingapp-install.sh - -# 1. Update version (if hardcoded) -RELEASE="2.0.0" - -# 2. Update package dependencies (if any changed) -$STD apt-get install -y newdependency - -# 3. Update configuration (if format changed) -# Update sed replacements or config files - -# 4. Test thoroughly before committing -``` - -### Step 3: The Standard Update Pattern - -The `update_script()` function in `ct/appname.sh` should follow a robust pattern: - -1. **Check for updates**: Use `check_for_gh_release` to skip logic if no new version exists. -2. **Stop services**: Stop all relevant services (`systemctl stop appname`). -3. **Backup existing installation**: Move the old folder (e.g., `mv /opt/app /opt/app_bak`). -4. **Deploy new version**: Use `CLEAN_INSTALL=1 fetch_and_deploy_gh_release`. -5. **Restore configuration**: Copy `.env` or config files back from the backup. -6. **Rebuild/Migrate**: Run `npm install`, `composer install`, or DB migrations. -7. **Start services**: Restart services and cleanup the backup. - -**Example from `ct/bookstack.sh`**: -```bash -function update_script() { - if check_for_gh_release "bookstack" "BookStackApp/BookStack"; then - msg_info "Stopping Services" - systemctl stop apache2 - - msg_info "Backing up data" - mv /opt/bookstack /opt/bookstack-backup - - fetch_and_deploy_gh_release "bookstack" "BookStackApp/BookStack" "tarball" - - msg_info "Restoring backup" - cp /opt/bookstack-backup/.env /opt/bookstack/.env - # ... restore uploads ... - - msg_info "Configuring" - cd /opt/bookstack - $STD composer install --no-dev - $STD php artisan migrate --force - - systemctl start apache2 - rm -rf /opt/bookstack-backup - msg_ok "Updated successfully!" - fi -} -``` - ---- - -## Code Standards - -### Bash Style Guide - -#### Variable Naming - -```bash -# ✅ Good -APP="MyApp" # Constants (UPPERCASE) -var_cpu="2" # Configuration (var_*) -container_id="100" # Local variables (lowercase) -DB_PASSWORD="secret" # Environment-like (UPPERCASE) - -# ❌ Bad -myapp="MyApp" # Inconsistent -VAR_CPU="2" # Wrong convention -containerid="100" # Unclear purpose -``` - -#### Function Naming - -```bash -# ✅ Good -function setup_database() { } # Descriptive -function check_version() { } # Verb-noun pattern -function install_dependencies() { } # Clear action - -# ❌ Bad -function setup() { } # Too vague -function db_setup() { } # Inconsistent pattern -function x() { } # Cryptic -``` - -#### Quoting - -```bash -# ✅ Good -echo "${APP}" # Always quote variables -if [[ "$var" == "value" ]]; then # Use [[ ]] for conditionals -echo "Using $var in string" # Variables in double quotes - -# ❌ Bad -echo $APP # Unquoted variables -if [ "$var" = "value" ]; then # Use [[ ]] instead -echo 'Using $var in string' # Single quotes prevent expansion -``` - -#### Command Formatting - -```bash -# ✅ Good: Multiline for readability -$STD apt-get install -y \ - package1 \ - package2 \ - package3 - -# ✅ Good: Complex commands with variables -if ! wget -q "https://example.com/${file}"; then - msg_error "Failed to download" - exit 1 -fi - -# ❌ Bad: Too long on one line -$STD apt-get install -y package1 package2 package3 package4 package5 package6 - -# ❌ Bad: No error checking -wget https://example.com/file -``` - -#### Error Handling - -```bash -# ✅ Good: Check critical commands -if ! some_command; then - msg_error "Command failed" - exit 1 -fi - -# ✅ Good: Use catch_errors for automatic trapping -catch_errors - -# ❌ Bad: Silently ignore failures -some_command || true -some_command 2>/dev/null - -# ❌ Bad: Unclear what failed -if ! (cmd1 && cmd2 && cmd3); then - msg_error "Something failed" -fi -``` - -### Documentation Standards - -#### Header Comments - -```bash -#!/usr/bin/env bash -# Copyright (c) 2021-2026 community-scripts ORG -# Author: YourUsername -# Co-Author: AnotherAuthor (for collaborative work) -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/app/repo -# Description: Brief description of what this script does -``` - -#### Inline Comments - -```bash -# ✅ Good: Explain WHY, not WHAT -# Use alphanumeric only to avoid shell escaping issues -DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) - -# ✅ Good: Comment complex logic -# Detect if running Alpine vs Debian for proper package manager -if grep -qi 'alpine' /etc/os-release; then - PKG_MGR="apk" -else - PKG_MGR="apt" -fi - -# ❌ Bad: Comment obvious code -# Set the variable -var="value" - -# ❌ Bad: Outdated comments -# TODO: Fix this (written 2 years ago, not fixed) -``` - -### File Organization - -```bash -#!/usr/bin/env bash # [1] Shebang (first line) -# Copyright & Metadata # [2] Comments - # [3] Blank line -# Load functions # [4] Import section -source <(curl -fsSL ...) - # [5] Blank line -# Configuration # [6] Variables/Config -APP="MyApp" -var_cpu="2" - # [7] Blank line -# Initialization # [8] Setup -header_info "$APP" -variables -color -catch_errors - # [9] Blank line -# Functions # [10] Function definitions -function update_script() { } -function custom_setup() { } - # [11] Blank line -# Main execution # [12] Script logic -start -build_container -``` - ---- - -## Testing Your Changes - -### Pre-Submission Testing - -#### 1. Syntax Check - -```bash -# Verify bash syntax -bash -n ct/myapp.sh -bash -n install/myapp-install.sh - -# If no output: ✅ Syntax is valid -# If error output: ❌ Fix syntax before submitting -``` - -#### 2. ShellCheck Static Analysis - -```bash -# Install ShellCheck -apt-get install -y shellcheck - -# Check scripts -shellcheck ct/myapp.sh -shellcheck install/myapp-install.sh - -# Review warnings and fix if applicable -# Some warnings can be intentional (use # shellcheck disable=...) -``` - -#### 3. Real Proxmox Testing - -```bash -# Best: Test on actual Proxmox system - -# 1. SSH into Proxmox host -ssh root@YOUR_PROXMOX_IP - -# 2. Test via curl from your fork (CT script only) -bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)" -# ⏱️ Wait 10-30 seconds after pushing - GitHub takes time to update - -# 3. Test interaction: -# - Select installation mode -# - Confirm settings -# - Monitor installation - -# 4. Verify container created -pct list | grep myapp - -# 5. Log into container and verify app -pct exec 100 bash -``` - -#### 4. Edge Case Testing - -```bash -# Test with different settings: - -# Test 1: Advanced (19-step) installation -# When prompted: Select "2" for Advanced - -# Test 2: User Defaults -# Before running: Create ~/.community-scripts/default.vars -# When prompted: Select "3" for User Defaults - -# Test 3: Error handling -# Simulate network outage (block internet) -# Verify script handles gracefully - -# Test 4: Update function -# Create initial container (via curl from fork) -# Wait for new release -# Test update: bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)" -# Verify it detects and applies update -``` - -### Testing Checklist - -Before submitting PR: - -```bash -# Code quality -- [ ] Syntax: bash -n passes -- [ ] ShellCheck: No critical warnings -- [ ] Naming: Follows conventions -- [ ] Formatting: Consistent indentation - -# Functionality -- [ ] Container creation: Successful -- [ ] Installation: Completes without errors -- [ ] Access URL: Works and app responds -- [ ] Update function: Detects new versions -- [ ] Cleanup: No temporary files left - -# Documentation -- [ ] Copyright header present -- [ ] App name matches filenames -- [ ] Default values realistic -- [ ] Success message clear and helpful - -# Compatibility -- [ ] Works on Debian 12 -- [ ] Works on Ubuntu 22.04 -- [ ] (Optional) Works on Alpine 3.20 -``` - ---- - -## Submitting a Pull Request - -### Step 1: Prepare Your Branch - -```bash -# Update with latest changes -git fetch upstream -git rebase upstream/main - -# If conflicts occur: -git rebase --abort -# Resolve conflicts manually then: -git add . -git rebase --continue -``` - -### Step 2: Push Your Changes - -```bash -git push origin feat/add-myapp - -# If already pushed: -git push origin feat/add-myapp --force-with-lease -``` - -### Step 3: Create Pull Request on GitHub - -**Visit**: https://github.com/community-scripts/ProxmoxVE/pulls - -**Click**: "New Pull Request" - -**Select**: `community-scripts:main` ← `YOUR_USERNAME:feat/myapp` - -### Step 4: Fill PR Description - -Use this template: - -```markdown -## Description - -Brief description of what this PR adds/fixes - -## Type of Change - -- [ ] New application (ct/AppName.sh + install/appname-install.sh) -- [ ] Update existing application -- [ ] Bug fix -- [ ] Documentation update -- [ ] Other: **\_\_\_** - -## Testing - -- [ ] Tested on Proxmox VE 8.x -- [ ] Container creation successful -- [ ] Application installation successful -- [ ] Application is accessible at URL -- [ ] Update function works (if applicable) -- [ ] No temporary files left after installation - -## Application Details (for new apps only) - -- **App Name**: MyApp -- **Source**: https://github.com/app/repo -- **Default OS**: Debian 12 -- **Recommended Resources**: 2 CPU, 2GB RAM, 10GB Disk -- **Tags**: category;tag2;tag3 -- **Access URL**: http://IP:PORT/path - -## Checklist - -- [ ] My code follows the style guidelines -- [ ] I have performed a self-review -- [ ] I have tested the script via curl from my fork (after git push) -- [ ] GitHub had time to update (waited 10-30 seconds) -- [ ] ShellCheck shows no critical warnings -- [ ] Documentation is accurate and complete -- [ ] I have added/updated relevant documentation -``` - -### Step 5: Respond to Review Comments - -**Maintainers may request changes**: - -- Fix syntax/style issues -- Add better error handling -- Optimize resource usage -- Update documentation - -**To address feedback**: - -```bash -# Make requested changes -git add . -git commit -m "Address review feedback: ..." -git push origin feat/add-myapp - -# PR automatically updates! -# No need to create new PR -``` - -### Step 6: Celebrate! 🎉 - -Once merged, your contribution will be part of ProxmoxVE and available to all users! - ---- - -## Troubleshooting - -### "Repository not found" when cloning - -```bash -# Check your fork exists -# Visit: https://github.com/YOUR_USERNAME/ProxmoxVE - -# If not there: Click "Fork" on original repo first -``` - -### "Permission denied" when pushing - -```bash -# Setup SSH key -ssh-keygen -t ed25519 -C "your_email@example.com" -cat ~/.ssh/id_ed25519.pub # Copy this - -# Add to GitHub: Settings → SSH Keys → New Key - -# Or use HTTPS with token: -git remote set-url origin https://YOUR_TOKEN@github.com/YOUR_USERNAME/ProxmoxVE.git -``` - -### Script syntax errors - -```bash -# Use ShellCheck to identify issues -shellcheck install/myapp-install.sh - -# Common issues: -# - Unmatched quotes: "string' or 'string" -# - Missing semicolons before then: if [...]; then -# - Wrong quoting: echo $VAR instead of echo "${VAR}" -``` - -### Container creation fails immediately - -```bash -# 1. Check Proxmox resources -free -h # Check RAM -df -h # Check disk space -pct list # Check CTID availability - -# 2. Check script URL -# Make sure curl -s in script points to your fork - -# 3. Review errors -# Run with verbose: bash -x ct/myapp.sh -``` - -### App not accessible after creation - -```bash -# 1. Verify container running -pct list -pct status CTID - -# 2. Check if service running inside -pct exec CTID systemctl status myapp - -# 3. Check firewall -# Proxmox host: iptables -L -# Container: iptables -L - -# 4. Verify listening port -pct exec CTID netstat -tlnp | grep LISTEN -``` - ---- - -## FAQ - -### Q: Do I need to be a Bash expert? - -**A**: No! The codebase has many examples you can copy. Most contributions are straightforward script creation following the established patterns. - -### Q: Can I add a new application that's not open source? - -**A**: No. ProxmoxVE focuses on open-source applications (GPL, MIT, Apache, etc.). Closed-source applications won't be accepted. - -### Q: How long until my PR is reviewed? - -**A**: Maintainers are volunteers. Reviews typically happen within 1-2 weeks. Complex changes may take longer. - -### Q: Can I test without a Proxmox system? - -**A**: Partially. You can verify syntax and ShellCheck compliance locally, but real container testing requires Proxmox. Consider using: - -- Proxmox in a VM (VirtualBox/KVM) -- Test instances on Hetzner/DigitalOcean -- Ask maintainers to test for you - -### Q: My update function is very complex - is that OK? - -**A**: Yes! Update functions can be complex if needed. Just ensure: - -- Backup user data before updating -- Restore user data after update -- Test thoroughly before submitting -- Add clear comments explaining logic - -### Q: Can I add new dependencies to build.func? - -**A**: Generally no. build.func is the orchestrator and should remain stable. New functions should go in: - -- `tools.func` - Tool installation -- `core.func` - Utility functions -- `install.func` - Container setup - -Ask in an issue first if you're unsure. - -### Q: What if the application has many configuration options? - -**A**: You have options: - -**Option 1**: Use Advanced mode (19-step wizard) - -```bash -# Extend advanced_settings() if app needs special vars -``` - -**Option 2**: Create custom setup menu - -```bash -function custom_config() { - OPTION=$(whiptail --inputbox "Enter database name:" 8 60) - # ... use $OPTION in installation -} -``` - -**Option 3**: Leave as defaults + documentation - -```bash -# In success message: -echo "Edit /opt/myapp/config.json to customize settings" -``` - -### Q: Can I contribute Windows/macOS/ARM support? - -**A**: - -- **Windows**: Not planned (ProxmoxVE is Linux/Proxmox focused) -- **macOS**: Can contribute Docker-based alternatives -- **ARM**: Yes! Many apps work on ARM. Add to vm/pimox-\*.sh scripts - ---- - -## Getting Help - -### Resources - -- **Documentation**: `/docs` directory and wikis -- **Function Reference**: `/misc/*.md` wiki files -- **Examples**: Look at similar applications in `/ct` and `/install` -- **GitHub Issues**: https://github.com/community-scripts/ProxmoxVE/issues -- **Discussions**: https://github.com/community-scripts/ProxmoxVE/discussions - -### Ask Questions - -1. **Check existing issues** - Your question may be answered -2. **Search documentation** - See `/docs` and `/misc/*.md` -3. **Ask in Discussions** - For general questions -4. **Open an Issue** - For bugs or specific problems - -### Report Bugs - -When reporting bugs, include: - -- Which application -- What happened (error message) -- What you expected -- Your Proxmox version -- Container OS and version - -Example: - -``` -Title: pihole-install.sh fails on Alpine 3.20 - -Description: -Installation fails with error: "PHP-FPM not found" - -Expected: -PiHole should install successfully - -Environment: -- Proxmox VE 8.2 -- Alpine 3.20 -- Container CTID 110 - -Error Output: -[ERROR] in line 42: exit code 127: while executing command php-fpm --start -``` - ---- - -## Contribution Statistics - -**ProxmoxVE by the Numbers**: - -- 🎯 40+ applications supported -- 👥 100+ contributors -- 📊 10,000+ GitHub stars -- 🚀 50+ releases -- 📈 100,000+ downloads/month - -**Your contribution makes a difference!** - ---- - -## Code of Conduct - -By contributing, you agree to: - -- ✅ Be respectful and inclusive -- ✅ Follow the style guidelines -- ✅ Test your changes thoroughly -- ✅ Provide clear commit messages -- ✅ Respond to review feedback - ---- - -**Ready to contribute?** Start with the [Quick Start](#quick-start) section! - -**Questions?** Open an issue or start a discussion on GitHub. - -**Thank you for your contribution!** 🙏 diff --git a/docs/contribution/HELPER_FUNCTIONS.md b/docs/contribution/HELPER_FUNCTIONS.md deleted file mode 100644 index d32ab2474..000000000 --- a/docs/contribution/HELPER_FUNCTIONS.md +++ /dev/null @@ -1,753 +0,0 @@ -# 🛠️ Helper Functions Reference - -**Quick reference for all helper functions available in `tools.func`** - -> These functions are automatically available in install scripts via `$FUNCTIONS_FILE_PATH` - ---- - -## 📋 Table of Contents - -- [Scripts to Watch](#scripts-to-watch) -- [Runtime & Language Setup](#runtime--language-setup) -- [Database Setup](#database-setup) -- [GitHub Release Helpers](#github-release-helpers) -- [Tools & Utilities](#tools--utilities) -- [SSL/TLS](#ssltls) -- [Utility Functions](#utility-functions) -- [Package Management](#package-management) - ---- - -## 📚 Scripts to Watch - -**Learn from real, well-implemented scripts. Each app requires TWO files that work together:** - -| File | Location | Purpose | -| ------------------ | ---------------------------- | ------------------------------------------------------------------------ | -| **CT Script** | `ct/appname.sh` | Runs on **Proxmox host** - creates container, contains `update_script()` | -| **Install Script** | `install/appname-install.sh` | Runs **inside container** - installs and configures the app | - -> ⚠️ **Both files are ALWAYS required!** The CT script calls the install script automatically during container creation. - -Install scripts are **not** run directly by users; they are invoked by the CT script inside the container. - -### Node.js + PostgreSQL - -**Koel** - Music streaming with PHP + Node.js + PostgreSQL -| File | Link | -| ----------------- | -------------------------------------------------------- | -| CT (update logic) | [ct/koel.sh](../../ct/koel.sh) | -| Install | [install/koel-install.sh](../../install/koel-install.sh) | - -**Actual Budget** - Finance app with npm global install -| File | Link | -| ----------------- | ------------------------------------------------------------------------ | -| CT (update logic) | [ct/actualbudget.sh](../../ct/actualbudget.sh) | -| Install | [install/actualbudget-install.sh](../../install/actualbudget-install.sh) | - -### Python + uv - -**MeTube** - YouTube downloader with Python uv + Node.js + Deno -| File | Link | -| ----------------- | ------------------------------------------------------------ | -| CT (update logic) | [ct/metube.sh](../../ct/metube.sh) | -| Install | [install/metube-install.sh](../../install/metube-install.sh) | - -**Endurain** - Fitness tracker with Python uv + PostgreSQL/PostGIS -| File | Link | -| ----------------- | ---------------------------------------------------------------- | -| CT (update logic) | [ct/endurain.sh](../../ct/endurain.sh) | -| Install | [install/endurain-install.sh](../../install/endurain-install.sh) | - -### Java + Gradle - -**BookLore** - Book management with Java 21 + Gradle + MariaDB + Nginx -| File | Link | -| ----------------- | -------------------------------------------------------------- | -| CT (update logic) | [ct/booklore.sh](../../ct/booklore.sh) | -| Install | [install/booklore-install.sh](../../install/booklore-install.sh) | - -### Pnpm + Meilisearch - -**KaraKeep** - Bookmark manager with Pnpm + Meilisearch + Puppeteer -| File | Link | -| ----------------- | -------------------------------------------------------------- | -| CT (update logic) | [ct/karakeep.sh](../../ct/karakeep.sh) | -| Install | [install/karakeep-install.sh](../../install/karakeep-install.sh) | - -### PHP + MariaDB/MySQL - -**Wallabag** - Read-it-later with PHP + MariaDB + Redis + Nginx -| File | Link | -| ----------------- | ---------------------------------------------------------------- | -| CT (update logic) | [ct/wallabag.sh](../../ct/wallabag.sh) | -| Install | [install/wallabag-install.sh](../../install/wallabag-install.sh) | - -**InvoiceNinja** - Invoicing with PHP + MariaDB + Supervisor -| File | Link | -| ----------------- | ------------------------------------------------------------------------ | -| CT (update logic) | [ct/invoiceninja.sh](../../ct/invoiceninja.sh) | -| Install | [install/invoiceninja-install.sh](../../install/invoiceninja-install.sh) | - -**BookStack** - Wiki/Docs with PHP + MariaDB + Apache -| File | Link | -| ----------------- | ------------------------------------------------------------------ | -| CT (update logic) | [ct/bookstack.sh](../../ct/bookstack.sh) | -| Install | [install/bookstack-install.sh](../../install/bookstack-install.sh) | - -### PHP + SQLite (Simple) - -**Speedtest Tracker** - Speedtest with PHP + SQLite + Nginx -| File | Link | -| ----------------- | ---------------------------------------------------------------------------------- | -| CT (update logic) | [ct/speedtest-tracker.sh](../../ct/speedtest-tracker.sh) | -| Install | [install/speedtest-tracker-install.sh](../../install/speedtest-tracker-install.sh) | - ---- - -## Runtime & Language Setup - -### `setup_nodejs` - -Install Node.js from NodeSource repository. - -```bash -# Default (Node.js 24) -setup_nodejs - -# Specific version -NODE_VERSION="20" setup_nodejs -NODE_VERSION="22" setup_nodejs -NODE_VERSION="24" setup_nodejs -``` - -### `setup_go` - -Install Go programming language (latest stable). - -```bash -setup_go - -# Use in script -setup_go -cd /opt/myapp -$STD go build -o myapp . -``` - -### `setup_rust` - -Install Rust via rustup. - -```bash -setup_rust - -# Use in script -setup_rust -source "$HOME/.cargo/env" -$STD cargo build --release -``` - -### `setup_uv` - -Install Python uv package manager (fast pip/venv replacement). - -```bash -# Default -setup_uv - -# Install a specific Python version -PYTHON_VERSION="3.12" setup_uv - -# Use in script -setup_uv -cd /opt/myapp -$STD uv sync --locked -``` - -### `setup_ruby` - -Install Ruby from official repositories. - -```bash -setup_ruby -``` - -### `setup_php` - -Install PHP with configurable modules and FPM/Apache support. - -```bash -# Basic PHP -setup_php - -# Full configuration -PHP_VERSION="8.4" \ -PHP_MODULE="mysqli,gd,curl,mbstring,xml,zip,ldap" \ -PHP_FPM="YES" \ -PHP_APACHE="YES" \ -setup_php -``` - -**Environment Variables:** -| Variable | Default | Description | -| ------------- | ------- | ------------------------------- | -| `PHP_VERSION` | `8.4` | PHP version to install | -| `PHP_MODULE` | `""` | Comma-separated list of modules | -| `PHP_FPM` | `NO` | Install PHP-FPM | -| `PHP_APACHE` | `NO` | Install Apache module | - -### `setup_composer` - -Install PHP Composer package manager. - -```bash -setup_php -setup_composer - -# Use in script -$STD composer install --no-dev -``` - -### `setup_java` - -Install Java (OpenJDK). - -```bash -# Default (Java 21) -setup_java - -# Specific version -JAVA_VERSION="17" setup_java -JAVA_VERSION="21" setup_java -``` - ---- - -## Database Setup - -### `setup_mariadb` - -Install MariaDB server. - -```bash -setup_mariadb -``` - -### `setup_mariadb_db` - -Create a MariaDB database and user. Sets `$MARIADB_DB_PASS` with the generated password. - -```bash -setup_mariadb -MARIADB_DB_NAME="myapp_db" MARIADB_DB_USER="myapp_user" setup_mariadb_db - -# After calling, these variables are available: -# $MARIADB_DB_NAME - Database name -# $MARIADB_DB_USER - Database user -# $MARIADB_DB_PASS - Generated password (saved to ~/[appname].creds) -``` - -### `setup_mysql` - -Install MySQL server. - -```bash -setup_mysql -``` - -### `setup_postgresql` - -Install PostgreSQL server. - -```bash -# Default (PostgreSQL 16) -setup_postgresql - -# Specific version -PG_VERSION="16" setup_postgresql -PG_VERSION="16" setup_postgresql -``` - -### `setup_postgresql_db` - -Create a PostgreSQL database and user. Sets `$PG_DB_PASS` with the generated password. - -```bash -PG_VERSION="17" setup_postgresql -PG_DB_NAME="myapp_db" PG_DB_USER="myapp_user" setup_postgresql_db - -# After calling, these variables are available: -# $PG_DB_NAME - Database name -# $PG_DB_USER - Database user -# $PG_DB_PASS - Generated password (saved to ~/[appname].creds) -``` - -### `setup_mongodb` - -Install MongoDB server. - -```bash -setup_mongodb -``` - -### `setup_clickhouse` - -Install ClickHouse analytics database. - -```bash -setup_clickhouse -``` - ---- - -## Advanced Repository Management - -### `setup_deb822_repo` - -The modern standard (Debian 12+) for adding external repositories. Automatically handles GPG keys and sources. - -```bash -setup_deb822_repo \ - "nodejs" \ - "https://deb.nodesource.com/gpgkey/nodesource.gpg.key" \ - "https://deb.nodesource.com/node_22.x" \ - "bookworm" \ - "main" -``` - -### `prepare_repository_setup` - -A high-level helper that performs three critical tasks before adding a new repo: -1. Cleans up old repo files matching the names provided. -2. Removes old GPG keyrings from all standard locations. -3. Ensures APT is in a working state (fixes locks, runs update). - -```bash -# Clean up old mysql/mariadb artifacts before setup -prepare_repository_setup "mariadb" "mysql" -``` - -### `cleanup_tool_keyrings` - -Force-removes GPG keys for specific tools from `/usr/share/keyrings/`, `/etc/apt/keyrings/`, and `/etc/apt/trusted.gpg.d/`. - -```bash -cleanup_tool_keyrings "docker" "kubernetes" -``` - ---- - -## GitHub Release Helpers - -> **Note**: `fetch_and_deploy_gh_release` is the **preferred method** for downloading GitHub releases. It handles version tracking automatically. Only use `get_latest_github_release` if you need the version number separately. - -### `fetch_and_deploy_gh_release` - -**Primary method** for downloading and extracting GitHub releases. Handles version tracking automatically. - -```bash -# Basic usage - downloads tarball to /opt/appname -fetch_and_deploy_gh_release "appname" "owner/repo" - -# With explicit parameters -fetch_and_deploy_gh_release "appname" "owner/repo" "tarball" "latest" "/opt/appname" - -# Pre-built release with specific asset pattern -fetch_and_deploy_gh_release "koel" "koel/koel" "prebuild" "latest" "/opt/koel" "koel-*.tar.gz" - -# Clean install (removes old directory first) - used in update_script -CLEAN_INSTALL=1 fetch_and_deploy_gh_release "appname" "owner/repo" "tarball" "latest" "/opt/appname" -``` - -**Parameters:** -| Parameter | Default | Description | -| --------------- | ------------- | ----------------------------------------------------------------- | -| `name` | required | App name (for version tracking) | -| `repo` | required | GitHub repo (`owner/repo`) | -| `type` | `tarball` | Release type: `tarball`, `zipball`, `prebuild`, `binary` | -| `version` | `latest` | Version tag or `latest` | -| `dest` | `/opt/[name]` | Destination directory | -| `asset_pattern` | `""` | For `prebuild`: glob pattern to match asset (e.g. `app-*.tar.gz`) | - -**Environment Variables:** -| Variable | Description | -| ----------------- | ------------------------------------------------------------ | -| `CLEAN_INSTALL=1` | Remove destination directory before extracting (for updates) | - -### `check_for_gh_release` - -Check if a newer version is available. Returns 0 if update needed, 1 if already at latest. **Use in `update_script()` function.** - -```bash -# In update_script() function in ct/appname.sh -if check_for_gh_release "appname" "owner/repo"; then - msg_info "Updating..." - # Stop services, backup, update, restore, start - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "appname" "owner/repo" - msg_ok "Updated successfully!" -fi -``` - -### `get_latest_github_release` - -Get the latest release version from a GitHub repository. **Only use if you need the version number separately** (e.g., for manual download or display). - -```bash -RELEASE=$(get_latest_github_release "owner/repo") -echo "Latest version: $RELEASE" -``` - ---- - -## Tools & Utilities - -### `setup_meilisearch` - -Install Meilisearch, a lightning-fast search engine. - -```bash -setup_meilisearch - -# Use in script -$STD php artisan scout:sync-index-settings -``` - -### `setup_yq` - -Install yq YAML processor. - -```bash -setup_yq - -# Use in script -yq '.server.port = 8080' -i config.yaml -```` - -### `setup_ffmpeg` - -Install FFmpeg with common codecs. - -```bash -setup_ffmpeg -``` - -### `setup_hwaccel` - -Setup GPU hardware acceleration (Intel/AMD/NVIDIA). - -```bash -# Only runs if GPU passthrough is detected (/dev/dri, /dev/nvidia0, /dev/kfd) -setup_hwaccel -``` - -### `setup_imagemagick` - -Install ImageMagick 7 from source. - -```bash -setup_imagemagick -``` - -### `setup_docker` - -Install Docker Engine. - -```bash -setup_docker -``` - -### `setup_adminer` - -Install Adminer for database management. - -```bash -setup_mariadb -setup_adminer - -# Access at http://IP/adminer -``` - ---- - -## SSL/TLS - -### `create_self_signed_cert` - -Create a self-signed SSL certificate. - -```bash -create_self_signed_cert - -# Creates files at: -# /etc/ssl/[appname]/[appname].key -# /etc/ssl/[appname]/[appname].crt -``` - ---- - -## Utility Functions - -### `verify_tool_version` - -Validate that the installed major version matches the expected version. Useful during upgrades or troubleshooting. - -```bash -# Verify Node.js is version 22 -verify_tool_version "nodejs" "22" "$(node -v | grep -oP '^v\K[0-9]+')" -``` - -### `get_lxc_ip` - -Set the `$LOCAL_IP` variable with the container's IP address. - -```bash -get_lxc_ip -echo "Container IP: $LOCAL_IP" - -# Use in config files -sed -i "s/localhost/$LOCAL_IP/g" /opt/myapp/config.yaml -``` - -### `ensure_dependencies` - -Ensure packages are installed (installs if missing). - -```bash -ensure_dependencies "jq" "unzip" "curl" -``` - -### `msg_info` / `msg_ok` / `msg_error` / `msg_warn` - -Display formatted messages. - -```bash -msg_info "Installing application..." -# ... do work ... -msg_ok "Installation complete" - -msg_warn "Optional feature not available" -msg_error "Installation failed" -``` - ---- - -## Package Management - -### `cleanup_lxc` - -Final cleanup function - call at end of install script. - -```bash -# At the end of your install script -motd_ssh -customize -cleanup_lxc # Handles autoremove, autoclean, cache cleanup -``` - -### `install_packages_with_retry` - -Install packages with automatic retry on failure. - -```bash -install_packages_with_retry "package1" "package2" "package3" -``` - -### `prepare_repository_setup` - -Prepare system for adding new repositories (cleanup old repos, keyrings). - -```bash -prepare_repository_setup "mariadb" "mysql" -``` - ---- - -## Complete Examples - -### Example 1: Node.js App with PostgreSQL (install script) - -```bash -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: YourUsername -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/example/myapp - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Dependencies" -$STD apt install -y nginx -msg_ok "Installed Dependencies" - -# Setup runtimes and databases FIRST -NODE_VERSION="22" setup_nodejs -PG_VERSION="16" setup_postgresql -PG_DB_NAME="myapp" PG_DB_USER="myapp" setup_postgresql_db -get_lxc_ip - -# Download app using fetch_and_deploy (handles version tracking) -fetch_and_deploy_gh_release "myapp" "example/myapp" "tarball" "latest" "/opt/myapp" - -msg_info "Setting up MyApp" -cd /opt/myapp -$STD npm ci --production -msg_ok "Setup MyApp" - -msg_info "Configuring MyApp" -cat </opt/myapp/.env -DATABASE_URL=postgresql://${PG_DB_USER}:${PG_DB_PASS}@localhost/${PG_DB_NAME} -HOST=${LOCAL_IP} -PORT=3000 -EOF -msg_ok "Configured MyApp" - -msg_info "Creating Service" -cat </etc/systemd/system/myapp.service -[Unit] -Description=MyApp -After=network.target postgresql.service - -[Service] -Type=simple -WorkingDirectory=/opt/myapp -ExecStart=/usr/bin/node /opt/myapp/server.js -Restart=on-failure - -[Install] -WantedBy=multi-user.target -EOF -systemctl enable -q --now myapp -msg_ok "Created Service" - -motd_ssh -customize -cleanup_lxc -``` - -### Example 2: Matching Container Script (ct script) - -```bash -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: YourUsername -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/example/myapp - -APP="MyApp" -var_tags="${var_tags:-webapp}" -var_cpu="${var_cpu:-2}" -var_ram="${var_ram:-2048}" -var_disk="${var_disk:-6}" -var_os="${var_os:-debian}" -var_version="${var_version:-12}" -var_unprivileged="${var_unprivileged:-1}" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - header_info - check_container_storage - check_container_resources - - if [[ ! -d /opt/myapp ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - # check_for_gh_release returns true if update available - if check_for_gh_release "myapp" "example/myapp"; then - msg_info "Stopping Service" - systemctl stop myapp - msg_ok "Stopped Service" - - msg_info "Creating Backup" - cp /opt/myapp/.env /tmp/myapp_env.bak - msg_ok "Created Backup" - - # CLEAN_INSTALL=1 removes old dir before extracting - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "myapp" "example/myapp" "tarball" "latest" "/opt/myapp" - - msg_info "Restoring Config & Rebuilding" - cp /tmp/myapp_env.bak /opt/myapp/.env - rm /tmp/myapp_env.bak - cd /opt/myapp - $STD npm ci --production - msg_ok "Restored Config & Rebuilt" - - msg_info "Starting Service" - systemctl start myapp - msg_ok "Started Service" - - msg_ok "Updated successfully!" - fi - exit -} - -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3000${CL}" -``` - -### Example 3: PHP App with MariaDB (install script) - -```bash -#!/usr/bin/env bash - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Dependencies" -$STD apt install -y nginx -msg_ok "Installed Dependencies" - -# PHP with FPM and common modules -PHP_VERSION="8.4" PHP_FPM="YES" PHP_MODULE="bcmath,curl,gd,intl,mbstring,mysql,xml,zip" setup_php -setup_composer -setup_mariadb -MARIADB_DB_NAME="myapp" MARIADB_DB_USER="myapp" setup_mariadb_db -get_lxc_ip - -# Download pre-built release (with asset pattern) -fetch_and_deploy_gh_release "myapp" "example/myapp" "prebuild" "latest" "/opt/myapp" "myapp-*.tar.gz" - -msg_info "Configuring MyApp" -cd /opt/myapp -cp .env.example .env -sed -i "s|APP_URL=.*|APP_URL=http://${LOCAL_IP}|" .env -sed -i "s|DB_DATABASE=.*|DB_DATABASE=${MARIADB_DB_NAME}|" .env -sed -i "s|DB_USERNAME=.*|DB_USERNAME=${MARIADB_DB_USER}|" .env -sed -i "s|DB_PASSWORD=.*|DB_PASSWORD=${MARIADB_DB_PASS}|" .env -$STD composer install --no-dev --no-interaction -$STD php artisan key:generate --force -$STD php artisan migrate --force -chown -R www-data:www-data /opt/myapp -msg_ok "Configured MyApp" - -# ... nginx config, service creation ... - -motd_ssh -customize -cleanup_lxc -``` diff --git a/docs/contribution/README.md b/docs/contribution/README.md deleted file mode 100644 index cdceec1cc..000000000 --- a/docs/contribution/README.md +++ /dev/null @@ -1,656 +0,0 @@ -# 🤝 Contributing to ProxmoxVE - -Complete guide to contributing to the ProxmoxVE project - from your first fork to submitting your pull request. - ---- - -## 📋 Table of Contents - -- [Quick Start](#quick-start) -- [Setting Up Your Fork](#setting-up-your-fork) -- [Coding Standards](#coding-standards) -- [Code Audit](#code-audit) -- [Guides & Resources](#guides--resources) -- [FAQ](#faq) - ---- - -## 🚀 Quick Start - -### 60 Seconds to Contributing (Development) - -When developing and testing **in your fork**: - -```bash -# 1. Fork on GitHub -# Visit: https://github.com/community-scripts/ProxmoxVE → Fork (top right) - -# 2. Clone your fork -git clone https://github.com/YOUR_USERNAME/ProxmoxVE.git -cd ProxmoxVE - -# 3. Auto-configure your fork (IMPORTANT - updates all links!) -bash docs/contribution/setup-fork.sh --full - -# 4. Create a feature branch -git checkout -b feature/my-awesome-app - -# 5. Read the guides -cat docs/README.md # Documentation overview -cat docs/ct/DETAILED_GUIDE.md # For container scripts -cat docs/install/DETAILED_GUIDE.md # For install scripts - -# 6. Create your contribution -cp docs/contribution/templates_ct/AppName.sh ct/myapp.sh -cp docs/contribution/templates_install/AppName-install.sh install/myapp-install.sh -# ... edit files ... - -# 7. Push to your fork and test via GitHub -git push origin feature/my-awesome-app -bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)" -# ⏱️ GitHub may take 10-30 seconds to update files - be patient! - -# 8. Create your JSON metadata file -cp docs/contribution/templates_json/AppName.json frontend/public/json/myapp.json -# Edit metadata: name, slug, categories, description, resources, etc. - -# 9. No direct install-script test -# Install scripts are executed by the CT script inside the container - -# 10. Commit ONLY your new files (see Cherry-Pick section below!) -git add ct/myapp.sh install/myapp-install.sh frontend/public/json/myapp.json -git commit -m "feat: add MyApp container and install scripts" -git push origin feature/my-awesome-app - -# 11. Create Pull Request on GitHub -``` - -⚠️ **IMPORTANT: After setup-fork.sh, many files are modified!** - -See the **Cherry-Pick: Submitting Only Your Changes** section below to learn how to push ONLY your 3-4 files instead of 600+ modified files! - -### How Users Run Scripts (After Merged) - -Once your script is merged to the main repository, users download and run it from GitHub like this: - -```bash -# ✅ Users run from GitHub (normal usage after PR merged) -bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/myapp.sh)" - -# Install scripts are called by the CT script and are not run directly by users -``` - -### Development vs. Production Execution - -**During Development (you, in your fork):** - -```bash -# You MUST test via curl from your GitHub fork (not local files!) -bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)" - -# The script's curl commands are updated by setup-fork.sh to point to YOUR fork -# This ensures you're testing your actual changes -# ⏱️ Wait 10-30 seconds after pushing - GitHub updates slowly -``` - -**After Merge (users, from GitHub):** - -```bash -# Users download the script from upstream via curl -bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/myapp.sh)" - -# The script's curl commands now point back to upstream (community-scripts) -# This is the stable, tested version -``` - -**Summary:** - -- **Development**: Push to fork, test via curl → setup-fork.sh changes curl URLs to your fork -- **Production**: curl | bash from upstream → curl URLs point to community-scripts repo - ---- - -## 🍴 Setting Up Your Fork - -### Automatic Setup (Recommended) - -When you clone your fork, run the setup script to automatically configure everything: - -```bash -bash docs/contribution/setup-fork.sh --full -``` - -**What it does:** - -- Auto-detects your GitHub username from git config -- Auto-detects your fork repository name -- Updates **ALL** hardcoded links to point to your fork instead of the main repo (`--full`) -- Creates `.git-setup-info` with your configuration -- Allows you to develop and test independently in your fork - -**Why this matters:** - -Without running this script, all links in your fork will still point to the upstream repository (community-scripts). This is a problem when testing because: - -- Installation links will pull from upstream, not your fork -- Updates will target the wrong repository -- Your contributions won't be properly tested - -**After running setup-fork.sh:** - -Your fork is fully configured and ready to develop. You can: - -- Push changes to your fork -- Test via curl: `bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)"` -- All links will reference your fork for development -- ⏱️ Wait 10-30 seconds after pushing - GitHub takes time to update -- Commit and push with confidence -- Create a PR to merge into upstream - -**See**: [FORK_SETUP.md](FORK_SETUP.md) for detailed instructions - -### Manual Setup - -If the script doesn't work, manually configure: - -```bash -# Set git user -git config user.name "Your Name" -git config user.email "your.email@example.com" - -# Add upstream remote for syncing with main repo -git remote add upstream https://github.com/community-scripts/ProxmoxVE.git - -# Verify remotes -git remote -v -# Should show: origin (your fork) and upstream (main repo) -``` - ---- - -## 📖 Coding Standards - -All scripts and configurations must follow our coding standards to ensure consistency and quality. - -### Available Guides - -- **[CONTRIBUTING.md](CONTRIBUTING.md)** - Essential coding standards and best practices -- **[CODE-AUDIT.md](CODE-AUDIT.md)** - Code review checklist and audit procedures -- **[GUIDE.md](GUIDE.md)** - Comprehensive contribution guide -- **[HELPER_FUNCTIONS.md](HELPER_FUNCTIONS.md)** - Reference for all tools.func helper functions -- **Container Scripts** - `/ct/` templates and guidelines -- **Install Scripts** - `/install/` templates and guidelines -- **JSON Configurations** - `frontend/public/json/` structure and format - -### Quick Checklist - -- ✅ Use `/ct/example.sh` as template for container scripts -- ✅ Use `/install/example-install.sh` as template for install scripts -- ✅ Follow naming conventions: `appname.sh` and `appname-install.sh` -- ✅ Include proper shebang: `#!/usr/bin/env bash` -- ✅ Add copyright header with author -- ✅ Handle errors properly with `msg_error`, `msg_ok`, etc. -- ✅ Test before submitting PR (via curl from your fork, not local bash) -- ✅ Update documentation if needed - ---- - -## 🔍 Code Audit - -Before submitting a pull request, ensure your code passes our audit: - -**See**: [CODE_AUDIT.md](CODE_AUDIT.md) for complete audit checklist - -Key points: - -- Code consistency with existing scripts -- Proper error handling -- Correct variable naming -- Adequate comments and documentation -- Security best practices - ---- - -## 🍒 Cherry-Pick: Submitting Only Your Changes - -**Problem**: `setup-fork.sh` modifies 600+ files to update links. You don't want to submit all of those changes - only your new 3-4 files! - -**Solution**: Use git cherry-pick to select only YOUR files. - -### Step-by-Step Cherry-Pick Guide - -#### 1. Check what changed - -```bash -# See all modified files -git status - -# Verify your files are there -git status | grep -E "ct/myapp|install/myapp|json/myapp" -``` - -#### 2. Create a clean feature branch for submission - -```bash -# Go back to upstream main (clean slate) -git fetch upstream -git checkout -b submit/myapp upstream/main - -# Don't use your modified main branch! -``` - -#### 3. Cherry-pick ONLY your files - -Cherry-picking extracts specific changes from commits: - -```bash -# Option A: Cherry-pick commits that added your files -# (if you committed your files separately) -git cherry-pick - -# Option B: Manually copy and commit only your files -# From your work branch, get the file contents -git show feature/my-awesome-app:ct/myapp.sh > /tmp/myapp.sh -git show feature/my-awesome-app:install/myapp-install.sh > /tmp/myapp-install.sh -git show feature/my-awesome-app:frontend/public/json/myapp.json > /tmp/myapp.json - -# Add them to the clean branch -cp /tmp/myapp.sh ct/myapp.sh -cp /tmp/myapp-install.sh install/myapp-install.sh -cp /tmp/myapp.json frontend/public/json/myapp.json - -# Commit -git add ct/myapp.sh install/myapp-install.sh frontend/public/json/myapp.json -git commit -m "feat: add MyApp" -``` - -#### 4. Verify only your files are in the PR - -```bash -# Check git diff against upstream -git diff upstream/main --name-only -# Should show ONLY: -# ct/myapp.sh -# install/myapp-install.sh -# frontend/public/json/myapp.json -``` - -#### 5. Push and create PR - -```bash -# Push your clean submission branch -git push origin submit/myapp - -# Create PR on GitHub from: submit/myapp → main -``` - -### Why This Matters - -- ✅ Clean PR with only your changes -- ✅ Easier for maintainers to review -- ✅ Faster merge without conflicts -- ❌ Without cherry-pick: PR has 600+ file changes (won't merge!) - -### If You Made a Mistake - -```bash -# Delete the messy branch -git branch -D submit/myapp - -# Go back to clean branch -git checkout -b submit/myapp upstream/main - -# Try cherry-picking again -``` - ---- - -If you're using **Visual Studio Code** with an AI assistant, you can leverage our detailed guidelines to generate high-quality contributions automatically. - -### How to Use AI Assistance - -1. **Open the AI Guidelines** - - ``` - docs/contribution/AI.md - ``` - - This file contains all requirements, patterns, and examples for writing proper scripts. - -2. **Prepare Your Information** - - Before asking the AI to generate code, gather: - - **Repository URL**: e.g., `https://github.com/owner/myapp` - - **Dockerfile/Script**: Paste the app's installation instructions (if available) - - **Dependencies**: What packages does it need? (Node, Python, Java, PostgreSQL, etc.) - - **Ports**: What port does it listen on? (e.g., 3000, 8080, 5000) - - **Configuration**: Any environment variables or config files? - -3. **Tell the AI Assistant** - - Share with the AI: - - The repository URL - - The Dockerfile or install instructions - - Link to [docs/contribution/AI.md](AI.md) with instructions to follow - - **Example prompt:** - - ``` - I want to contribute a container script for MyApp to ProxmoxVE. - Repository: https://github.com/owner/myapp - - Here's the Dockerfile: - [paste Dockerfile content] - - Please follow the guidelines in docs/contribution/AI.md to create: - 1. ct/myapp.sh (container script) - 2. install/myapp-install.sh (installation script) - 3. frontend/public/json/myapp.json (metadata) - ``` - -4. **AI Will Generate** - - The AI will produce scripts that: - - Follow all ProxmoxVE patterns and conventions - - Use helper functions from `tools.func` correctly - - Include proper error handling and messages - - Have correct update mechanisms - - Are ready to submit as a PR - -### Key Points for AI Assistants - -- **Templates Location**: `docs/contribution/templates_ct/AppName.sh`, `templates_install/`, `templates_json/` -- **Guidelines**: Must follow `docs/contribution/AI.md` exactly -- **Helper Functions**: Use only functions from `misc/tools.func` - never write custom ones -- **Testing**: Always test before submission via curl from your fork - ```bash - bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)" - # Wait 10-30 seconds after pushing changes - ``` -- **No Docker**: Container scripts must be bare-metal, not Docker-based - -### Benefits - -- **Speed**: AI generates boilerplate in seconds -- **Consistency**: Follows same patterns as 200+ existing scripts -- **Quality**: Less bugs and more maintainable code -- **Learning**: See how your app should be structured - ---- - -### Documentation - -- **[docs/README.md](../README.md)** - Main documentation hub -- **[docs/ct/README.md](../ct/README.md)** - Container scripts overview -- **[docs/install/README.md](../install/README.md)** - Installation scripts overview -- **[docs/ct/DETAILED_GUIDE.md](../ct/DETAILED_GUIDE.md)** - Complete ct/ script reference -- **[docs/install/DETAILED_GUIDE.md](../install/DETAILED_GUIDE.md)** - Complete install/ script reference -- **[docs/TECHNICAL_REFERENCE.md](../TECHNICAL_REFERENCE.md)** - Architecture deep-dive -- **[docs/EXIT_CODES.md](../EXIT_CODES.md)** - Exit codes reference -- **[docs/DEV_MODE.md](../DEV_MODE.md)** - Debugging guide - -### Community Guides - -See [USER_SUBMITTED_GUIDES.md](USER_SUBMITTED_GUIDES.md) for excellent community-written guides: - -- Home Assistant installation and configuration -- Frigate setup on Proxmox -- Docker and Portainer installation -- Database setup and optimization -- And many more! - -### Templates - -Use these templates when creating new scripts: - -```bash -# Container script template -cp docs/contribution/templates_ct/AppName.sh ct/my-app.sh - -# Installation script template -cp docs/contribution/templates_install/AppName-install.sh install/my-app-install.sh - -# JSON configuration template -cp docs/contribution/templates_json/AppName.json frontend/public/json/my-app.json -``` - -**Template Features:** - -- Updated to match current codebase patterns -- Includes all available helper functions from `tools.func` -- Examples for Node.js, Python, PHP, Go applications -- Database setup examples (MariaDB, PostgreSQL) -- Proper service creation and cleanup - ---- - -## 🔄 Git Workflow - -### Keep Your Fork Updated - -```bash -# Fetch latest from upstream -git fetch upstream - -# Rebase your work on latest main -git rebase upstream/main - -# Push to your fork -git push -f origin main -``` - -### Create Feature Branch - -```bash -# Create and switch to new branch -git checkout -b feature/my-feature - -# Make changes... -git add . -git commit -m "feat: description of changes" - -# Push to your fork -git push origin feature/my-feature - -# Create Pull Request on GitHub -``` - -### Before Submitting PR - -1. **Sync with upstream** - - ```bash - git fetch upstream - git rebase upstream/main - ``` - -2. **Test your changes** (via curl from your fork) - - ```bash - bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/my-app.sh)" - # Follow prompts and test the container - # ⏱️ Wait 10-30 seconds after pushing - GitHub takes time to update - ``` - -3. **Check code standards** - - [ ] Follows template structure - - [ ] Proper error handling - - [ ] Documentation updated (if needed) - - [ ] No hardcoded values - - [ ] Version tracking implemented - -4. **Push final changes** - ```bash - git push origin feature/my-feature - ``` - ---- - -## 📋 Pull Request Checklist - -Before opening a PR: - -- [ ] Code follows coding standards (see CONTRIBUTING.md) -- [ ] All templates used correctly -- [ ] Tested on Proxmox VE -- [ ] Error handling implemented -- [ ] Documentation updated (if applicable) -- [ ] No merge conflicts -- [ ] Synced with upstream/main -- [ ] Clear PR title and description - ---- - -## ❓ FAQ - -### ❌ Why can't I test with `bash ct/myapp.sh` locally? - -You might try: - -```bash -# ❌ WRONG - This won't test your actual changes! -bash ct/myapp.sh -./ct/myapp.sh -sh ct/myapp.sh -``` - -**Why this fails:** - -- `bash ct/myapp.sh` uses the LOCAL clone file -- The LOCAL file doesn't execute the curl commands - it's already on disk -- The curl URLs INSIDE the script are modified by setup-fork.sh, but they're not executed -- So you can't verify if your curl URLs actually work -- Users will get the curl URL version (which may be broken) - -**Solution:** Always test via curl from GitHub: - -```bash -# ✅ CORRECT - Tests the actual GitHub URLs -bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/myapp.sh)" -``` - -### ❓ How do I test my changes? - -You **cannot** test locally with `bash ct/myapp.sh` from your cloned directory! - -You **must** push to GitHub and test via curl from your fork: - -```bash -# 1. Push your changes to your fork -git push origin feature/my-awesome-app - -# 2. Test via curl (this loads the script from GitHub, not local files) -bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/my-app.sh)" - -# 3. For verbose/debug output, pass environment variables -VERBOSE=yes bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/my-app.sh)" -DEV_MODE_LOGS=true bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/my-app.sh)" -``` - -**Why?** - -- Local `bash ct/myapp.sh` uses local files from your clone -- But the script's INTERNAL curl commands have been modified by setup-fork.sh to point to your fork -- This discrepancy means you're not actually testing the curl URLs -- Testing via curl ensures the script downloads from YOUR fork GitHub URLs -- ⏱️ **Important:** GitHub takes 10-30 seconds to recognize newly pushed files. Wait before testing! - -**What if local bash worked?** - -You'd be testing local files only, not the actual GitHub URLs that users will download. This means broken curl links wouldn't be caught during testing. - -### What if my PR has conflicts? - -```bash -# Sync with upstream main repository -git fetch upstream -git rebase upstream/main - -# Resolve conflicts in your editor -git add . -git rebase --continue -git push -f origin your-branch -``` - -### How do I keep my fork updated? - -Two ways: - -**Option 1: Run setup script again** - -```bash -bash docs/contribution/setup-fork.sh --full -``` - -**Option 2: Manual sync** - -```bash -git fetch upstream -git rebase upstream/main -git push -f origin main -``` - -### Where do I ask questions? - -- **GitHub Issues**: For bugs and feature requests -- **GitHub Discussions**: For general questions and ideas -- **Discord**: Community-scripts server for real-time chat - ---- - -## 🎓 Learning Resources - -### For First-Time Contributors - -1. Read: [docs/README.md](../README.md) - Documentation overview -2. Read: [CONTRIBUTING.md](CONTRIBUTING.md) - Essential coding standards -3. Choose your path: - - Containers → [docs/ct/DETAILED_GUIDE.md](../ct/DETAILED_GUIDE.md) - - Installation → [docs/install/DETAILED_GUIDE.md](../install/DETAILED_GUIDE.md) -4. Study existing scripts in same category -5. Create your contribution - -### For Experienced Developers - -1. Review [CONTRIBUTING.md](CONTRIBUTING.md) - Coding standards -2. Review [CODE_AUDIT.md](CODE_AUDIT.md) - Audit checklist -3. Check templates in `/docs/contribution/templates_*/` -4. Use AI assistants with [AI.md](AI.md) for code generation -5. Submit PR with confidence - -### For Using AI Assistants - -See "Using AI Assistants" section above for: - -- How to structure prompts -- What information to provide -- How to validate AI output - ---- - -## 🚀 Ready to Contribute? - -1. **Fork** the repository -2. **Clone** your fork and **setup** with `bash docs/contribution/setup-fork.sh --full` -3. **Choose** your contribution type (container, installation, tools, etc.) -4. **Read** the appropriate detailed guide -5. **Create** your feature branch -6. **Develop** and **test** your changes -7. **Commit** with clear messages -8. **Push** to your fork -9. **Create** Pull Request - ---- - -## 📞 Contact & Support - -- **GitHub**: [community-scripts/ProxmoxVE](https://github.com/community-scripts/ProxmoxVE) -- **Issues**: [GitHub Issues](https://github.com/community-scripts/ProxmoxVE/issues) -- **Discussions**: [GitHub Discussions](https://github.com/community-scripts/ProxmoxVE/discussions) -- **Discord**: [Join Server](https://discord.gg/UHrpNWGwkH) - ---- - -**Thank you for contributing to ProxmoxVE!** 🙏 - -Your efforts help make Proxmox VE automation accessible to everyone. Happy coding! 🚀 diff --git a/docs/contribution/setup-fork.sh b/docs/contribution/setup-fork.sh deleted file mode 100644 index a842d7c13..000000000 --- a/docs/contribution/setup-fork.sh +++ /dev/null @@ -1,336 +0,0 @@ -#!/bin/bash - -################################################################################ -# ProxmoxVE Fork Setup Script -# -# Automatically configures documentation and scripts for your fork -# Detects your GitHub username and repository from git config -# Updates all hardcoded links to point to your fork -# -# Usage: -# ./setup-fork.sh # Auto-detect from git config (updates misc/ only) -# ./setup-fork.sh YOUR_USERNAME # Specify username (updates misc/ only) -# ./setup-fork.sh YOUR_USERNAME REPO_NAME # Specify both (updates misc/ only) -# ./setup-fork.sh --full # Update all files including ct/, install/, vm/, etc. -# -# Examples: -# ./setup-fork.sh john # Uses john/ProxmoxVE, updates misc/ only -# ./setup-fork.sh john my-fork # Uses john/my-fork, updates misc/ only -# ./setup-fork.sh --full # Auto-detect + update all files -################################################################################ - -set -e - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Default values -REPO_NAME="ProxmoxVE" -USERNAME="" -AUTO_DETECT=true -UPDATE_ALL=false - -################################################################################ -# FUNCTIONS -################################################################################ - -print_header() { - echo -e "\n${BLUE}╔════════════════════════════════════════════════════════════╗${NC}" - echo -e "${BLUE}║${NC} ProxmoxVE Fork Setup Script" - echo -e "${BLUE}║${NC} Configuring for your fork..." - echo -e "${BLUE}╚════════════════════════════════════════════════════════════╝${NC}\n" -} - -print_info() { - echo -e "${BLUE}ℹ${NC} $1" -} - -print_success() { - echo -e "${GREEN}✓${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}⚠${NC} $1" -} - -print_error() { - echo -e "${RED}✗${NC} $1" -} - -# Detect username from git remote -detect_username() { - local remote_url - - # Try to get from origin - if ! remote_url=$(git config --get remote.origin.url 2>/dev/null); then - return 1 - fi - - # Extract username from SSH or HTTPS URL - if [[ $remote_url =~ git@github.com:([^/]+) ]]; then - echo "${BASH_REMATCH[1]}" - elif [[ $remote_url =~ github.com/([^/]+) ]]; then - echo "${BASH_REMATCH[1]}" - else - return 1 - fi -} - -# Detect repo name from git remote -detect_repo_name() { - local remote_url - - if ! remote_url=$(git config --get remote.origin.url 2>/dev/null); then - return 1 - fi - - # Extract repo name (remove .git if present) - if [[ $remote_url =~ /([^/]+?)(.git)?$ ]]; then - local repo="${BASH_REMATCH[1]}" - echo "${repo%.git}" - else - return 1 - fi -} - -# Ask user for confirmation -confirm() { - local prompt="$1" - local response - - echo -ne "${YELLOW}${prompt} (y/n)${NC} " - read -r response - [[ $response =~ ^[Yy]$ ]] -} - -# Update links in files -update_links() { - local old_repo="community-scripts" - local old_name="ProxmoxVE" - local new_owner="$1" - local new_repo="$2" - local files_updated=0 - - print_info "Scanning for hardcoded links..." - - # Change to repo root - local repo_root=$(git rev-parse --show-toplevel 2>/dev/null || pwd) - - # Determine search path - local search_path="$repo_root/misc" - if [[ "$UPDATE_ALL" == "true" ]]; then - search_path="$repo_root" - print_info "Searching all files (--full mode)" - else - print_info "Searching misc/ directory only (core functions)" - fi - - echo "" - - # Find all files containing the old repo reference - while IFS= read -r file; do - # Count occurrences - local count=$(grep -E -c "(github.com|githubusercontent.com)/$old_repo/$old_name" "$file" 2>/dev/null || echo 0) - - if [[ $count -gt 0 ]]; then - # Backup original - cp "$file" "$file.backup" - - # Replace links - use different sed syntax for BSD/macOS vs GNU sed - if sed --version &>/dev/null 2>&1; then - # GNU sed - sed -E -i "s@(github.com|githubusercontent.com)/$old_repo/$old_name@\\1/$new_owner/$new_repo@g" "$file" - else - # BSD sed (macOS) - sed -E -i '' "s@(github.com|githubusercontent.com)/$old_repo/$old_name@\\1/$new_owner/$new_repo@g" "$file" - fi - - ((files_updated++)) - print_success "Updated $file ($count links)" - fi - done < <(find "$search_path" -type f \( -name "*.md" -o -name "*.sh" -o -name "*.func" -o -name "*.json" \) -not -path "*/.git/*" 2>/dev/null | xargs grep -E -l "(github.com|githubusercontent.com)/$old_repo/$old_name" 2>/dev/null) - - return $files_updated -} - -# Create user git config setup info -create_git_setup_info() { - local username="$1" - - cat >.git-setup-info <<'EOF' -# Git Configuration for ProxmoxVE Development - -## Recommended Git Configuration - -### Set up remotes for easy syncing with upstream: - -```bash -# View your current remotes -git remote -v - -# If you don't have 'upstream' configured, add it: -git remote add upstream https://github.com/community-scripts/ProxmoxVE.git - -# Verify both remotes exist: -git remote -v -# Should show: -# origin https://github.com/YOUR_USERNAME/ProxmoxVE.git (fetch) -# origin https://github.com/YOUR_USERNAME/ProxmoxVE.git (push) -# upstream https://github.com/community-scripts/ProxmoxVE.git (fetch) -# upstream https://github.com/community-scripts/ProxmoxVE.git (push) -``` - -### Configure Git User (if not done globally) - -```bash -git config user.name "Your Name" -git config user.email "your.email@example.com" - -# Or configure globally: -git config --global user.name "Your Name" -git config --global user.email "your.email@example.com" -``` - -### Useful Git Workflows - -**Keep your fork up-to-date:** -```bash -git fetch upstream -git rebase upstream/main -git push origin main -``` - -**Create feature branch:** -```bash -git checkout -b feature/my-awesome-app -# Make changes... -git commit -m "feat: add my awesome app" -git push origin feature/my-awesome-app -``` - -**Pull latest from upstream:** -```bash -git fetch upstream -git merge upstream/main -``` - ---- - -For more help, see: docs/contribution/README.md -EOF - - print_success "Created .git-setup-info file" -} - -################################################################################ -# MAIN LOGIC -################################################################################ - -print_header - -# Parse command line arguments -if [[ $# -gt 0 ]]; then - # Check for --full flag - if [[ "$1" == "--full" ]]; then - UPDATE_ALL=true - shift # Remove --full from arguments - fi - - # Process remaining arguments - if [[ $# -gt 0 ]]; then - USERNAME="$1" - AUTO_DETECT=false - - if [[ $# -gt 1 ]]; then - REPO_NAME="$2" - fi - fi -fi - -# Try auto-detection -if [[ -z "$USERNAME" ]]; then - if username=$(detect_username); then - USERNAME="$username" - print_success "Detected GitHub username: $USERNAME" - else - print_error "Could not auto-detect GitHub username from git config" - echo -e "${YELLOW}Please run:${NC}" - echo " ./setup-fork.sh YOUR_USERNAME" - exit 1 - fi -fi - -# Auto-detect repo name if needed -if repo_name=$(detect_repo_name); then - REPO_NAME="$repo_name" - if [[ "$REPO_NAME" != "ProxmoxVE" ]]; then - print_info "Detected custom repo name: $REPO_NAME" - else - print_success "Using default repo name: ProxmoxVE" - fi -fi - -# Validate inputs -if [[ -z "$USERNAME" ]]; then - print_error "Username cannot be empty" - exit 1 -fi - -if [[ -z "$REPO_NAME" ]]; then - print_error "Repository name cannot be empty" - exit 1 -fi - -# Show what we'll do -echo -e "${BLUE}Configuration Summary:${NC}" -echo " Repository URL: https://github.com/$USERNAME/$REPO_NAME" -if [[ "$UPDATE_ALL" == "true" ]]; then - echo " Files to update: ALL files (ct/, install/, vm/, misc/, docs/, etc.)" -else - echo " Files to update: misc/ directory only (core functions)" -fi -echo "" - -# Ask for confirmation -if ! confirm "Apply these changes?"; then - print_warning "Setup cancelled" - exit 0 -fi - -echo "" - -# Update all links -if update_links "$USERNAME" "$REPO_NAME"; then - links_changed=$? - print_success "Updated $links_changed files" -else - print_warning "No links needed updating or some files not found" -fi - -# Create git setup info file -create_git_setup_info "$USERNAME" - -# Final summary -echo "" -echo -e "${GREEN}╔════════════════════════════════════════════════════════════╗${NC}" -echo -e "${GREEN}║${NC} Fork Setup Complete! ${GREEN}║${NC}" -echo -e "${GREEN}╚════════════════════════════════════════════════════════════╝${NC}" -echo "" - -print_success "All documentation links updated to point to your fork" -print_info "Your fork: https://github.com/$USERNAME/$REPO_NAME" -print_info "Upstream: https://github.com/community-scripts/ProxmoxVE" -echo "" - -echo -e "${BLUE}Next Steps:${NC}" -echo " 1. Review the changes: git diff" -echo " 2. Check .git-setup-info for recommended git workflow" -echo " 3. Start developing: git checkout -b feature/my-app" -echo " 4. Read: docs/contribution/README.md" -echo "" - -print_success "Happy contributing! 🚀" diff --git a/docs/contribution/templates_ct/AppName.md b/docs/contribution/templates_ct/AppName.md deleted file mode 100644 index 1739b7976..000000000 --- a/docs/contribution/templates_ct/AppName.md +++ /dev/null @@ -1,278 +0,0 @@ -# CT Container Scripts - Quick Reference - -> [!WARNING] -> **This is legacy documentation.** Refer to the **modern template** at [templates_ct/AppName.sh](AppName.sh) for best practices. -> -> Current templates use: -> -> - `tools.func` helpers instead of manual patterns -> - `check_for_gh_release` and `fetch_and_deploy_gh_release` from build.func -> - Automatic setup-fork.sh configuration - ---- - -## Before Creating a Script - -1. **Fork & Clone:** - - ```bash - git clone https://github.com/YOUR_USERNAME/ProxmoxVE.git - cd ProxmoxVE - ``` - -2. **Run setup-fork.sh** (updates all curl URLs to your fork): - - ```bash - bash docs/contribution/setup-fork.sh - ``` - -3. **Copy the Modern Template:** - - ```bash - cp templates_ct/AppName.sh ct/MyApp.sh - # Edit ct/MyApp.sh with your app details - ``` - -4. **Test Your Script (via GitHub):** - - ⚠️ **Important:** You must push to GitHub and test via curl, not `bash ct/MyApp.sh`! - - ```bash - # Push your changes to your fork first - git push origin feature/my-awesome-app - - # Then test via curl (this loads from YOUR fork, not local files) - bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/MyApp.sh)" - ``` - - > 💡 **Why?** The script's curl commands are modified by setup-fork.sh, but local execution uses local files, not the updated GitHub URLs. Testing via curl ensures your script actually works. - > - > ⏱️ **Note:** GitHub sometimes takes 10-30 seconds to update files. If you don't see your changes, wait and try again. - -5. **Cherry-Pick for PR** (submit ONLY your 3-4 files): - - See [Cherry-Pick Guide](../README.md) for step-by-step git commands - ---- - -## Template Structure - -The modern template includes: - -### Header - -```bash -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) -# (Note: setup-fork.sh changes this URL to point to YOUR fork during development) -``` - -### Metadata - -```bash -# Copyright (c) 2021-2026 community-scripts ORG -# Author: YourUsername -# License: MIT -APP="MyApp" -var_tags="app-category;foss" -var_cpu="2" -var_ram="2048" -var_disk="4" -var_os="alpine" -var_version="3.20" -var_unprivileged="1" -``` - -### Core Setup - -```bash -header_info "$APP" -variables -color -catch_errors -``` - -### Update Function - -The modern template provides a standard update pattern: - -```bash -function update_script() { - header_info - check_container_storage - check_container_resources - - # Use tools.func helpers: - check_for_gh_release "myapp" "owner/repo" - fetch_and_deploy_gh_release "myapp" "owner/repo" "tarball" "latest" "/opt/myapp" -} -``` - ---- - -## Key Patterns - -### Check for Updates (App Repository) - -Use `check_for_gh_release` with the **app repo**: - -```bash -check_for_gh_release "myapp" "owner/repo" -``` - -### Deploy External App - -Use `fetch_and_deploy_gh_release` with the **app repo**: - -```bash -fetch_and_deploy_gh_release "myapp" "owner/repo" -``` - -### Avoid Manual Version Checking - -❌ OLD (manual): - -```bash -RELEASE=$(curl -fsSL https://api.github.com/repos/myapp/myapp/releases/latest | grep tag_name) -``` - -✅ NEW (use tools.func): - -```bash -fetch_and_deploy_gh_release "myapp" "owner/repo" -``` - ---- - -## Best Practices - -1. **Use tools.func helpers** - Don't manually curl for versions -2. **Only add app-specific dependencies** - Don't add ca-certificates, curl, gnupg (handled by build.func) -3. **Test via curl from your fork** - Push first, then: `bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/MyApp.sh)"` -4. **Wait for GitHub to update** - Takes 10-30 seconds after git push -5. **Cherry-pick only YOUR files** - Submit only ct/MyApp.sh, install/MyApp-install.sh, frontend/public/json/myapp.json (3 files) -6. **Verify before PR** - Run `git diff upstream/main --name-only` to confirm only your files changed - ---- - -## Common Update Patterns - -See the [modern template](AppName.sh) and [AI.md](../AI.md) for complete working examples. - -Recent reference scripts with good update functions: - -- [Trip](https://github.com/community-scripts/ProxmoxVE/blob/main/ct/trip.sh) -- [Thingsboard](https://github.com/community-scripts/ProxmoxVE/blob/main/ct/thingsboard.sh) -- [UniFi](https://github.com/community-scripts/ProxmoxVE/blob/main/ct/unifi.sh) - ---- - -## Need Help? - -- **[README.md](../README.md)** - Full contribution workflow -- **[AI.md](../AI.md)** - AI-generated script guidelines -- **[FORK_SETUP.md](../FORK_SETUP.md)** - Why setup-fork.sh is important -- **[Slack Community](https://discord.gg/your-link)** - Ask questions - -```` - -### 3.4 **Verbosity** - -- Use the appropriate flag (**-q** in the examples) for a command to suppress its output. - Example: - -```bash -curl -fsSL -unzip -q -```` - -- If a command does not come with this functionality use `$STD` to suppress it's output. - -Example: - -```bash -$STD php artisan migrate --force -$STD php artisan config:clear -``` - -### 3.5 **Backups** - -- Backup user data if necessary. -- Move all user data back in the directory when the update is finished. - -> [!NOTE] -> This is not meant to be a permanent backup - -Example backup: - -```bash - mv /opt/snipe-it /opt/snipe-it-backup -``` - -Example config restore: - -```bash - cp /opt/snipe-it-backup/.env /opt/snipe-it/.env - cp -r /opt/snipe-it-backup/public/uploads/ /opt/snipe-it/public/uploads/ - cp -r /opt/snipe-it-backup/storage/private_uploads /opt/snipe-it/storage/private_uploads -``` - -### 3.6 **Cleanup** - -- Do not forget to remove any temporary files/folders such as zip-files or temporary backups. - Example: - -```bash - rm -rf /opt/v${RELEASE}.zip - rm -rf /opt/snipe-it-backup -``` - -### 3.7 **No update function** - -- In case you can not provide an update function use the following code to provide user feedback. - -```bash -function update_script() { - header_info - check_container_storage - check_container_resources - if [[ ! -d /opt/snipeit ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - msg_error "Currently we don't provide an update function for this ${APP}." - exit -} -``` - ---- - -## 4 **End of the script** - -- `start`: Launches Whiptail dialogue -- `build_container`: Collects and integrates user settings -- `description`: Sets LXC container description -- With `echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"` you can point the user to the IP:PORT/folder needed to access the app. - -```bash -start -build_container -description - -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}" -``` - ---- - -## 5. **Contribution checklist** - -- [ ] Shebang is correctly set (`#!/usr/bin/env bash`). -- [ ] Correct link to _build.func_ -- [ ] Metadata (author, license) is included at the top. -- [ ] Variables follow naming conventions. -- [ ] Update function exists. -- [ ] Update functions checks if app is installed and for new version. -- [ ] Update function cleans up temporary files. -- [ ] Script ends with a helpful message for the user to reach the application. diff --git a/docs/contribution/templates_ct/AppName.sh b/docs/contribution/templates_ct/AppName.sh deleted file mode 100644 index f8c685fd3..000000000 --- a/docs/contribution/templates_ct/AppName.sh +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func) -# Copyright (c) 2021-2026 community-scripts ORG -# Author: [YourGitHubUsername] -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: [SOURCE_URL e.g. https://github.com/example/app] - -# ============================================================================ -# APP CONFIGURATION -# ============================================================================ -# These values are sent to build.func and define default container resources. -# Users can customize these during installation via the interactive prompts. -# ============================================================================ - -APP="[AppName]" -var_tags="${var_tags:-[category1];[category2]}" # Max 2 tags, semicolon-separated -var_cpu="${var_cpu:-2}" # CPU cores: 1-4 typical -var_ram="${var_ram:-2048}" # RAM in MB: 512, 1024, 2048, etc. -var_disk="${var_disk:-8}" # Disk in GB: 6, 8, 10, 20 typical -var_os="${var_os:-debian}" # OS: debian, ubuntu, alpine -var_version="${var_version:-13}" # OS Version: 13 (Debian), 24.04 (Ubuntu), 3.21 (Alpine) -var_unprivileged="${var_unprivileged:-1}" # 1=unprivileged (secure), 0=privileged (for Docker/Podman) - -# ============================================================================ -# INITIALIZATION - These are required in all CT scripts -# ============================================================================ -header_info "$APP" # Display app name and setup header -variables # Initialize build.func variables -color # Load color variables for output -catch_errors # Enable error handling with automatic exit on failure - -# ============================================================================ -# UPDATE SCRIPT - Called when user selects "Update" from web interface -# ============================================================================ -# This function is triggered by the web interface to update the application. -# It should: -# 1. Check if installation exists -# 2. Check for new GitHub releases -# 3. Stop running services -# 4. Backup critical data -# 5. Deploy new version -# 6. Run post-update commands (migrations, config updates, etc.) -# 7. Restore data if needed -# 8. Start services -# -# Exit with `exit` at the end to prevent container restart. -# ============================================================================ - -function update_script() { - header_info - check_container_storage - check_container_resources - - # Step 1: Verify installation exists - if [[ ! -d /opt/[appname] ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - # Step 2: Check if update is available - if check_for_gh_release "[appname]" "YourUsername/YourRepo"; then - - # Step 3: Stop services before update - msg_info "Stopping Service" - systemctl stop [appname] - msg_ok "Stopped Service" - - # Step 4: Backup critical data before overwriting - msg_info "Backing up Data" - cp -r /opt/[appname]/data /opt/[appname]_data_backup 2>/dev/null || true - msg_ok "Backed up Data" - - # Step 5: Download and deploy new version - # CLEAN_INSTALL=1 removes old directory before extracting - CLEAN_INSTALL=1 fetch_and_deploy_gh_release "[appname]" "owner/repo" "tarball" "latest" "/opt/[appname]" - - # Step 6: Run post-update commands (uncomment as needed) - # These examples show common patterns - use what applies to your app: - # - # For Node.js apps: - # msg_info "Installing Dependencies" - # cd /opt/[appname] - # $STD npm ci --production - # msg_ok "Installed Dependencies" - # - # For Python apps: - # msg_info "Installing Dependencies" - # cd /opt/[appname] - # $STD uv sync --frozen - # msg_ok "Installed Dependencies" - # - # For database migrations: - # msg_info "Running Database Migrations" - # cd /opt/[appname] - # $STD npm run migrate - # msg_ok "Ran Database Migrations" - # - # For PHP apps: - # msg_info "Installing Dependencies" - # cd /opt/[appname] - # $STD composer install --no-dev - # msg_ok "Installed Dependencies" - - # Step 7: Restore data from backup - msg_info "Restoring Data" - cp -r /opt/[appname]_data_backup/. /opt/[appname]/data/ 2>/dev/null || true - rm -rf /opt/[appname]_data_backup - msg_ok "Restored Data" - - # Step 8: Restart service with new version - msg_info "Starting Service" - systemctl start [appname] - msg_ok "Started Service" - msg_ok "Updated successfully!" - fi - exit -} - -# ============================================================================ -# MAIN EXECUTION - Container creation flow -# ============================================================================ -# These are called by build.func and handle the full installation process: -# 1. start - Initialize container creation -# 2. build_container - Execute the install script inside container -# 3. description - Display completion info and access details -# ============================================================================ - -start -build_container -description - -# ============================================================================ -# COMPLETION MESSAGE -# ============================================================================ -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:[PORT]${CL}" diff --git a/docs/contribution/templates_install/AppName-install.md b/docs/contribution/templates_install/AppName-install.md deleted file mode 100644 index 9d8388734..000000000 --- a/docs/contribution/templates_install/AppName-install.md +++ /dev/null @@ -1,494 +0,0 @@ -# Install Scripts - Quick Reference - -> [!WARNING] -> **This is legacy documentation.** Refer to the **modern template** at [templates_install/AppName-install.sh](AppName-install.sh) for best practices. -> -> Current templates use: -> -> - `tools.func` helpers (setup_nodejs, setup_uv, setup_postgresql_db, etc.) -> - Automatic dependency installation via build.func -> - Standardized environment variable patterns - ---- - -## Before Creating a Script - -1. **Copy the Modern Template:** - - ```bash - cp templates_install/AppName-install.sh install/MyApp-install.sh - # Edit install/MyApp-install.sh - ``` - -2. **Key Pattern:** - - CT scripts source build.func and call the install script - - Install scripts use sourced FUNCTIONS_FILE_PATH (via build.func) - - Both scripts work together in the container - -3. **Test via GitHub:** - - ```bash - # Push your changes to your fork first - git push origin feature/my-awesome-app - - # Test the CT script via curl (it will call the install script) - bash -c "$(curl -fsSL https://raw.githubusercontent.com/YOUR_USERNAME/ProxmoxVE/main/ct/MyApp.sh)" - # ⏱️ Wait 10-30 seconds after pushing - GitHub takes time to update - ``` - ---- - -## Template Structure - -### Header - -```bash -#!/usr/bin/env bash -source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/install.func) -# (setup-fork.sh modifies this URL to point to YOUR fork during development) -``` - -### Dependencies (App-Specific Only) - -```bash -# Don't add: ca-certificates, curl, gnupg, wget, git, jq -# These are handled by build.func -msg_info "Installing dependencies" -$STD apt-get install -y app-specific-deps -msg_ok "Installed dependencies" -``` - -### Runtime Setup - -Use tools.func helpers instead of manual installation: - -```bash -# ✅ NEW (use tools.func): -NODE_VERSION="20" -setup_nodejs -# OR -PYTHON_VERSION="3.12" -setup_uv -# OR -PG_DB_NAME="myapp_db" -PG_DB_USER="myapp" -setup_postgresql_db -``` - -### Service Configuration - -```bash -# Create .env file -msg_info "Configuring MyApp" -cat << EOF > /opt/myapp/.env -DEBUG=false -PORT=8080 -DATABASE_URL=postgresql://... -EOF -msg_ok "Configuration complete" - -# Create systemd service -msg_info "Creating systemd service" -cat << EOF > /etc/systemd/system/myapp.service -[Unit] -Description=MyApp -[Service] -ExecStart=/usr/bin/node /opt/myapp/app.js -[Install] -WantedBy=multi-user.target -EOF -msg_ok "Service created" -``` - -### Finalization - -```bash -msg_info "Finalizing MyApp installation" -systemctl enable --now myapp -motd_ssh -customize -msg_ok "MyApp installation complete" -cleanup_lxc -``` - ---- - -## Key Patterns - -### Avoid Manual Version Checking - -❌ OLD (manual): - -```bash -RELEASE=$(curl -fsSL https://api.github.com/repos/app/repo/releases/latest | grep tag_name) -wget https://github.com/app/repo/releases/download/$RELEASE/app.tar.gz -``` - -✅ NEW (use tools.func via CT script's fetch_and_deploy_gh_release): - -```bash -# In CT script, not install script: -fetch_and_deploy_gh_release "myapp" "app/repo" "app.tar.gz" "latest" "/opt/myapp" -``` - -### Database Setup - -```bash -# Use setup_postgresql_db, setup_mysql_db, etc. -PG_DB_NAME="myapp" -PG_DB_USER="myapp" -setup_postgresql_db -``` - -### Node.js Setup - -```bash -NODE_VERSION="20" -setup_nodejs -npm install --no-save -``` - ---- - -## Best Practices - -1. **Only add app-specific dependencies** - - Don't add: ca-certificates, curl, gnupg, wget, git, jq - - These are handled by build.func - -2. **Use tools.func helpers** - - setup_nodejs, setup_python, setup_uv, setup_postgresql_db, setup_mysql_db, etc. - -3. **Don't do version checks in install script** - - Version checking happens in CT script's update_script() - - Install script just installs the latest - -4. **Structure:** - - Dependencies - - Runtime setup (tools.func) - - Deployment (fetch from CT script) - - Configuration files - - Systemd service - - Finalization - ---- - -## Reference Scripts - -See working examples: - -- [Trip](https://github.com/community-scripts/ProxmoxVE/blob/main/install/trip-install.sh) -- [Thingsboard](https://github.com/community-scripts/ProxmoxVE/blob/main/install/thingsboard-install.sh) -- [UniFi](https://github.com/community-scripts/ProxmoxVE/blob/main/install/unifi-install.sh) - ---- - -## Need Help? - -- **[Modern Template](AppName-install.sh)** - Start here -- **[CT Template](../templates_ct/AppName.sh)** - How CT scripts work -- **[README.md](../README.md)** - Full contribution workflow -- **[AI.md](../AI.md)** - AI-generated script guidelines - -### 1.2 **Comments** - -- Add clear comments for script metadata, including author, copyright, and license information. -- Use meaningful inline comments to explain complex commands or logic. - -Example: - -```bash -# Copyright (c) 2021-2026 community-scripts ORG -# Author: [YourUserName] -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: [SOURCE_URL] -``` - -> [!NOTE]: -> -> - Add your username -> - When updating/reworking scripts, add "| Co-Author [YourUserName]" - -### 1.3 **Variables and function import** - -- This sections adds the support for all needed functions and variables. - -```bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os -``` - ---- - -## 2. **Variable naming and management** - -### 2.1 **Naming conventions** - -- Use uppercase names for constants and environment variables. -- Use lowercase names for local script variables. - -Example: - -```bash -DB_NAME=snipeit_db # Environment-like variable (constant) -db_user="snipeit" # Local variable -``` - ---- - -## 3. **Dependencies** - -### 3.1 **Install all at once** - -- Install all dependencies with a single command if possible - -Example: - -```bash -$STD apt-get install -y \ - curl \ - composer \ - git \ - sudo \ - mc \ - nginx -``` - -### 3.2 **Collapse dependencies** - -Collapse dependencies to keep the code readable. - -Example: -Use - -```bash -php8.2-{bcmath,common,ctype} -``` - -instead of - -```bash -php8.2-bcmath php8.2-common php8.2-ctype -``` - ---- - -## 4. **Paths to application files** - -If possible install the app and all necessary files in `/opt/` - ---- - -## 5. **Version management** - -### 5.1 **Install the latest release** - -- Always try and install the latest release -- Do not hardcode any version if not absolutely necessary - -Example for a git release: - -```bash -RELEASE=$(curl -fsSL https://api.github.com/repos/snipe/snipe-it/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }') -curl -fsSL "https://github.com/snipe/snipe-it/archive/refs/tags/v${RELEASE}.zip" -``` - -### 5.2 **Save the version for update checks** - -- Write the installed version into a file. -- This is used for the update function in **AppName.sh** to check for if a Update is needed. - -Example: - -```bash -echo "${RELEASE}" >"/opt/AppName_version.txt" -``` - ---- - -## 6. **Input and output management** - -### 6.1 **User feedback** - -- Use standard functions like `msg_info`, `msg_ok` or `msg_error` to print status messages. -- Each `msg_info` must be followed with a `msg_ok` before any other output is made. -- Display meaningful progress messages at key stages. - -Example: - -```bash -msg_info "Installing Dependencies" -$STD apt-get install -y ... -msg_ok "Installed Dependencies" -``` - -### 6.2 **Verbosity** - -- Use the appropiate flag (**-q** in the examples) for a command to suppres its output - Example: - -```bash -curl -fsSL -unzip -q -``` - -- If a command dose not come with such a functionality use `$STD` (a custom standard redirection variable) for managing output verbosity. - -Example: - -```bash -$STD apt-get install -y nginx -``` - ---- - -## 7. **String/File Manipulation** - -### 7.1 **File Manipulation** - -- Use `sed` to replace placeholder values in configuration files. - -Example: - -```bash -sed -i -e "s|^DB_DATABASE=.*|DB_DATABASE=$DB_NAME|" \ - -e "s|^DB_USERNAME=.*|DB_USERNAME=$DB_USER|" \ - -e "s|^DB_PASSWORD=.*|DB_PASSWORD=$DB_PASS|" .env -``` - ---- - -## 8. **Security practices** - -### 8.1 **Password generation** - -- Use `openssl` to generate random passwords. -- Use only alphanumeric values to not introduce unknown behaviour. - -Example: - -```bash -DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) -``` - -### 8.2 **File permissions** - -Explicitly set secure ownership and permissions for sensitive files. - -Example: - -```bash -chown -R www-data: /opt/snipe-it -chmod -R 755 /opt/snipe-it -``` - ---- - -## 9. **Service Configuration** - -### 9.1 **Configuration files** - -Use `cat </etc/nginx/conf.d/snipeit.conf -server { - listen 80; - root /opt/snipe-it/public; - index index.php; -} -EOF -``` - -### 9.2 **Credential management** - -Store the generated credentials in a file. - -Example: - -```bash -USERNAME=username -PASSWORD=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) -{ - echo "Application-Credentials" - echo "Username: $USERNAME" - echo "Password: $PASSWORD" -} >> ~/application.creds -``` - -### 9.3 **Enviroment files** - -Use `cat </path/to/.env -VARIABLE="value" -PORT=3000 -DB_NAME="${DB_NAME}" -EOF -``` - -### 9.4 **Services** - -Enable affected services after configuration changes and start them right away. - -Example: - -```bash -systemctl enable -q --now nginx -``` - ---- - -## 10. **Cleanup** - -### 10.1 **Remove temporary files** - -Remove temporary files and downloads after use. - -Example: - -```bash -rm -rf /opt/v${RELEASE}.zip -``` - -### 10.2 **Autoremove and autoclean** - -Remove unused dependencies to reduce disk space usage. - -Example: - -```bash -apt-get -y autoremove -apt-get -y autoclean -``` - ---- - -## 11. **Best Practices Checklist** - -- [ ] Shebang is correctly set (`#!/usr/bin/env bash`). -- [ ] Metadata (author, license) is included at the top. -- [ ] Variables follow naming conventions. -- [ ] Sensitive values are dynamically generated. -- [ ] Files and services have proper permissions. -- [ ] Script cleans up temporary files. - ---- - -### Example: High-Level Script Flow - -1. Dependencies installation -2. Database setup -3. Download and configure application -4. Service configuration -5. Final cleanup diff --git a/docs/contribution/templates_install/AppName-install.sh b/docs/contribution/templates_install/AppName-install.sh deleted file mode 100644 index 58134a113..000000000 --- a/docs/contribution/templates_install/AppName-install.sh +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: [YourGitHubUsername] -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: [SOURCE_URL e.g. https://github.com/example/app] - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -# ============================================================================= -# DEPENDENCIES - Only add app-specific dependencies here! -# Don't add: ca-certificates, curl, gnupg, git, build-essential (handled by build.func) -# ============================================================================= - -msg_info "Installing Dependencies" -$STD apt install -y \ - libharfbuzz0b \ - fontconfig -msg_ok "Installed Dependencies" - -# ============================================================================= -# SETUP RUNTIMES & DATABASES (if needed) -# ============================================================================= -# Examples (uncomment as needed): -# -# NODE_VERSION="22" setup_nodejs -# NODE_VERSION="22" NODE_MODULE="pnpm" setup_nodejs # Installs pnpm -# PYTHON_VERSION="3.13" setup_uv -# JAVA_VERSION="21" setup_java -# GO_VERSION="1.22" setup_go -# PHP_VERSION="8.4" PHP_FPM="YES" setup_php -# setup_postgresql # Server only -# setup_mariadb # Server only -# setup_meilisearch # Search engine -# -# Then set up DB and user (sets $[DB]_DB_PASS): -# PG_DB_NAME="myapp" PG_DB_USER="myapp" setup_postgresql_db -# MARIADB_DB_NAME="myapp" MARIADB_DB_USER="myapp" setup_mariadb_db - -# ============================================================================= -# DOWNLOAD & DEPLOY APPLICATION -# ============================================================================= -# fetch_and_deploy_gh_release modes: -# "tarball" - Source tarball (default if omitted) -# "binary" - .deb package (auto-detects amd64/arm64) -# "prebuild" - Pre-built archive (.tar.gz) -# "singlefile" - Single binary file -# -# Examples: -# fetch_and_deploy_gh_release "myapp" "YourUsername/myapp" "tarball" "latest" "/opt/myapp" -# fetch_and_deploy_gh_release "myapp" "YourUsername/myapp" "binary" "latest" "/tmp" -# fetch_and_deploy_gh_release "myapp" "YourUsername/myapp" "prebuild" "latest" "/opt/myapp" "myapp-*.tar.gz" - -fetch_and_deploy_gh_release "[appname]" "owner/repo" "tarball" "latest" "/opt/[appname]" - -# --- Tools & Utilities --- -# get_lxc_ip # Sets $LOCAL_IP variable (call early!) -# setup_ffmpeg # Install FFmpeg with codecs -# setup_hwaccel # Setup GPU hardware acceleration -# setup_imagemagick # Install ImageMagick 7 -# setup_docker # Install Docker Engine -# setup_adminer # Install Adminer for DB management -# create_self_signed_cert # Creates cert in /etc/ssl/[appname]/ - -# ============================================================================= -# EXAMPLES -# ============================================================================= -# -# EXAMPLE 1: Node.js Application with PostgreSQL -# --------------------------------------------- -# NODE_VERSION="22" setup_nodejs -# PG_VERSION="17" setup_postgresql -# PG_DB_NAME="myapp" PG_DB_USER="myapp" setup_postgresql_db -# get_lxc_ip -# fetch_and_deploy_gh_release "myapp" "owner/myapp" "tarball" "latest" "/opt/myapp" -# -# msg_info "Configuring MyApp" -# cd /opt/myapp -# $STD npm ci -# cat </opt/myapp/.env -# DATABASE_URL=postgresql://${PG_DB_USER}:${PG_DB_PASS}@localhost/${PG_DB_NAME} -# HOST=${LOCAL_IP} -# PORT=3000 -# EOF -# msg_ok "Configured MyApp" -# -# EXAMPLE 2: Python Application with uv -# ------------------------------------ -# PYTHON_VERSION="3.13" setup_uv -# get_lxc_ip -# fetch_and_deploy_gh_release "myapp" "owner/myapp" "tarball" "latest" "/opt/myapp" -# -# msg_info "Setting up MyApp" -# cd /opt/myapp -# $STD uv sync -# cat </opt/myapp/.env -# HOST=${LOCAL_IP} -# PORT=8000 -# EOF -# msg_ok "Setup MyApp" - -# ============================================================================= -# EXAMPLE 3: PHP Application with MariaDB + Nginx -# ============================================================================= -# PHP_VERSION="8.4" PHP_FPM="YES" PHP_MODULE="bcmath,curl,gd,intl,mbstring,mysql,xml,zip" setup_php -# setup_composer -# setup_mariadb -# MARIADB_DB_NAME="myapp" MARIADB_DB_USER="myapp" setup_mariadb_db -# get_lxc_ip -# fetch_and_deploy_gh_release "myapp" "owner/myapp" "prebuild" "latest" "/opt/myapp" "myapp-*.tar.gz" -# -# msg_info "Configuring MyApp" -# cd /opt/myapp -# cp .env.example .env -# sed -i "s|APP_URL=.*|APP_URL=http://${LOCAL_IP}|" .env -# sed -i "s|DB_DATABASE=.*|DB_DATABASE=${MARIADB_DB_NAME}|" .env -# sed -i "s|DB_USERNAME=.*|DB_USERNAME=${MARIADB_DB_USER}|" .env -# sed -i "s|DB_PASSWORD=.*|DB_PASSWORD=${MARIADB_DB_PASS}|" .env -# $STD composer install --no-dev --no-interaction -# chown -R www-data:www-data /opt/myapp -# msg_ok "Configured MyApp" - -# ============================================================================= -# YOUR APPLICATION INSTALLATION -# ============================================================================= -# 1. Setup runtimes and databases FIRST -# 2. Call get_lxc_ip if you need the container IP -# 3. Use fetch_and_deploy_gh_release to download the app (handles version tracking) -# 4. Configure the application -# 5. Create systemd service -# 6. Finalize with motd_ssh, customize, cleanup_lxc - -# --- Setup runtimes/databases --- -NODE_VERSION="22" setup_nodejs -get_lxc_ip - -# --- Download and install app --- -fetch_and_deploy_gh_release "[appname]" "[owner/repo]" "tarball" "latest" "/opt/[appname]" - -msg_info "Setting up [AppName]" -cd /opt/[appname] -# $STD npm ci -msg_ok "Setup [AppName]" - -# ============================================================================= -# CONFIGURATION -# ============================================================================= - -msg_info "Configuring [AppName]" -cd /opt/[appname] - -# Install application dependencies (uncomment as needed): -# $STD npm ci --production # Node.js apps -# $STD uv sync --frozen # Python apps -# $STD composer install --no-dev # PHP apps -# $STD cargo build --release # Rust apps - -# Create .env file if needed: -cat </opt/[appname]/.env -# Use import_local_ip to get container IP, or hardcode if building on Proxmox -APP_URL=http://localhost -PORT=8080 -EOF - -msg_ok "Configured [AppName]" - -# ============================================================================= -# CREATE SYSTEMD SERVICE -# ============================================================================= - -msg_info "Creating Service" -cat </etc/systemd/system/[appname].service -[Unit] -Description=[AppName] Service -After=network.target - -[Service] -Type=simple -User=root -WorkingDirectory=/opt/[appname] -ExecStart=/usr/bin/node /opt/[appname]/server.js -Restart=on-failure -RestartSec=5 - -[Install] -WantedBy=multi-user.target -EOF -systemctl enable -q --now [appname] -msg_ok "Created Service" - -# ============================================================================= -# CLEANUP & FINALIZATION -# ============================================================================= -# These are called automatically, but shown here for clarity: -# motd_ssh - Displays service info on SSH login -# customize - Enables optional customizations -# cleanup_lxc - Removes temp files, bash history, logs - -motd_ssh -customize -cleanup_lxc diff --git a/docs/contribution/templates_json/AppName.json b/docs/contribution/templates_json/AppName.json deleted file mode 100644 index 0b67ce327..000000000 --- a/docs/contribution/templates_json/AppName.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "AppName", - "slug": "appname", - "categories": [ - 0 - ], - "date_created": "2026-01-18", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.example.com/", - "website": "https://example.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/appname.webp", - "config_path": "/opt/appname/.env", - "description": "Short description of what AppName does and its main features.", - "install_methods": [ - { - "type": "default", - "script": "ct/appname.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Change the default password after first login!", - "type": "warning" - } - ] -} diff --git a/docs/contribution/templates_json/AppName.md b/docs/contribution/templates_json/AppName.md deleted file mode 100644 index 1eb4ed61f..000000000 --- a/docs/contribution/templates_json/AppName.md +++ /dev/null @@ -1,165 +0,0 @@ -# JSON Metadata Files - Quick Reference - -The metadata file (`frontend/public/json/myapp.json`) tells the web interface how to display your application. - ---- - -## Quick Start - -**Use the JSON Generator Tool:** -[https://community-scripts.github.io/ProxmoxVE/json-editor](https://community-scripts.github.io/ProxmoxVE/json-editor) - -1. Enter application details -2. Generator creates `frontend/public/json/myapp.json` -3. Copy the output to your contribution - ---- - -## File Structure - -```json -{ - "name": "MyApp", - "slug": "myapp", - "categories": [1], - "date_created": "2026-01-18", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.example.com/", - "website": "https://example.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/myapp.webp", - "config_path": "/opt/myapp/.env", - "description": "Brief description of what MyApp does", - "install_methods": [ - { - "type": "default", - "script": "ct/myapp.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Change the default password after first login!", - "type": "warning" - } - ] -} -``` - ---- - -## Field Reference - -| Field | Required | Example | Notes | -| --------------------- | -------- | ----------------- | ---------------------------------------------- | -| `name` | Yes | "MyApp" | Display name | -| `slug` | Yes | "myapp" | URL-friendly identifier (lowercase, no spaces) | -| `categories` | Yes | [1] | One or more category IDs | -| `date_created` | Yes | "2026-01-18" | Format: YYYY-MM-DD | -| `type` | Yes | "ct" | Container type: "ct" or "vm" | -| `interface_port` | Yes | 3000 | Default web interface port | -| `logo` | No | "https://..." | Logo URL (64px x 64px PNG) | -| `config_path` | Yes | "/opt/myapp/.env" | Main config file location | -| `description` | Yes | "App description" | Brief description (100 chars) | -| `install_methods` | Yes | See below | Installation resources (array) | -| `default_credentials` | No | See below | Optional default login | -| `notes` | No | See below | Additional notes (array) | - ---- - -## Install Methods - -Each installation method specifies resource requirements: - -```json -"install_methods": [ - { - "type": "default", - "script": "ct/myapp.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } -] -``` - -**Resource Defaults:** - -- CPU: Cores (1-8) -- RAM: Megabytes (256-4096) -- Disk: Gigabytes (4-50) - ---- - -## Common Categories - -- `0` Miscellaneous -- `1` Proxmox & Virtualization -- `2` Operating Systems -- `3` Containers & Docker -- `4` Network & Firewall -- `5` Adblock & DNS -- `6` Authentication & Security -- `7` Backup & Recovery -- `8` Databases -- `9` Monitoring & Analytics -- `10` Dashboards & Frontends -- `11` Files & Downloads -- `12` Documents & Notes -- `13` Media & Streaming -- `14` \*Arr Suite -- `15` NVR & Cameras -- `16` IoT & Smart Home -- `17` ZigBee, Z-Wave & Matter -- `18` MQTT & Messaging -- `19` Automation & Scheduling -- `20` AI / Coding & Dev-Tools -- `21` Webservers & Proxies -- `22` Bots & ChatOps -- `23` Finance & Budgeting -- `24` Gaming & Leisure -- `25` Business & ERP - ---- - -## Best Practices - -1. **Use the JSON Generator** - It validates structure -2. **Keep descriptions short** - 100 characters max -3. **Use real resource requirements** - Based on your testing -4. **Include sensible defaults** - Pre-filled in install_methods -5. **Slug must be lowercase** - No spaces, use hyphens - ---- - -## Reference Examples - -See actual examples in the repo: - -- [frontend/public/json/trip.json](https://github.com/community-scripts/ProxmoxVE/blob/main/frontend/public/json/trip.json) -- [frontend/public/json/thingsboard.json](https://github.com/community-scripts/ProxmoxVE/blob/main/frontend/public/json/thingsboard.json) -- [frontend/public/json/unifi.json](https://github.com/community-scripts/ProxmoxVE/blob/main/frontend/public/json/unifi.json) - ---- - -## Need Help? - -- **[JSON Generator](https://community-scripts.github.io/ProxmoxVE/json-editor)** - Interactive tool -- **[README.md](../README.md)** - Full contribution workflow -- **[Quick Start](../README.md)** - Step-by-step guide diff --git a/docs/ct/DETAILED_GUIDE.md b/docs/ct/DETAILED_GUIDE.md deleted file mode 100644 index 9059d6736..000000000 --- a/docs/ct/DETAILED_GUIDE.md +++ /dev/null @@ -1,472 +0,0 @@ -# 🚀 **Application Container Scripts (ct/AppName.sh)** - -**Modern Guide to Creating LXC Container Installation Scripts** - -> **Updated**: December 2025 -> **Context**: Fully integrated with build.func, advanced_settings wizard, and defaults system -> **Example Used**: `/ct/pihole.sh`, `/ct/docker.sh` - ---- - -## 📋 Table of Contents - -- [Overview](#overview) -- [Architecture & Flow](#architecture--flow) -- [File Structure](#file-structure) -- [Complete Script Template](#complete-script-template) -- [Function Reference](#function-reference) -- [Advanced Features](#advanced-features) -- [Real Examples](#real-examples) -- [Troubleshooting](#troubleshooting) -- [Contribution Checklist](#contribution-checklist) - ---- - -## Overview - -### Purpose - -Container scripts (`ct/AppName.sh`) are **entry points for creating LXC containers** with specific applications pre-installed. They: - -1. Define container defaults (CPU, RAM, disk, OS) -2. Call the main build orchestrator (`build.func`) -3. Implement application-specific update mechanisms -4. Provide user-facing success messages - -### Execution Context - -``` -Proxmox Host - ↓ -ct/AppName.sh sourced (runs as root on host) - ↓ -build.func: Creates LXC container + runs install script inside - ↓ -install/AppName-install.sh (runs inside container) - ↓ -Container ready with app installed -``` - -### Key Integration Points - -- **build.func** - Main orchestrator (container creation, storage, variable management) -- **install.func** - Container-specific setup (OS update, package management) -- **tools.func** - Tool installation helpers (repositories, GitHub releases) -- **core.func** - UI/messaging functions (colors, spinners, validation) -- **error_handler.func** - Error handling and signal management - ---- - -## Architecture & Flow - -### Container Creation Flow - -``` -START: bash ct/pihole.sh - ↓ -[1] Set APP, var_*, defaults - ↓ -[2] header_info() → Display ASCII art - ↓ -[3] variables() → Parse arguments & load build.func - ↓ -[4] color() → Setup ANSI codes - ↓ -[5] catch_errors() → Setup trap handlers - ↓ -[6] install_script() → Show mode menu (5 options) - ↓ - ├─ INSTALL_MODE="0" (Default) - ├─ INSTALL_MODE="1" (Advanced - 19-step wizard) - ├─ INSTALL_MODE="2" (User Defaults) - ├─ INSTALL_MODE="3" (App Defaults) - └─ INSTALL_MODE="4" (Settings Menu) - ↓ -[7] advanced_settings() → Collect user configuration (if mode=1) - ↓ -[8] start() → Confirm or re-edit settings - ↓ -[9] build_container() → Create LXC + execute install script - ↓ -[10] description() → Set container description - ↓ -[11] SUCCESS → Display access URL - ↓ -END -``` - -### Default Values Precedence - -``` -Priority 1 (Highest): Environment Variables (var_cpu, var_ram, etc.) -Priority 2: App-Specific Defaults (/defaults/AppName.vars) -Priority 3: User Global Defaults (/default.vars) -Priority 4 (Lowest): Built-in Defaults (in build.func) -``` - ---- - -## File Structure - -### Minimal ct/AppName.sh Template - -``` -#!/usr/bin/env bash # [1] Shebang - # [2] Copyright/License -source <(curl -s .../misc/build.func) # [3] Import functions - # [4] APP metadata -APP="AppName" # [5] Default values -var_tags="tag1;tag2" -var_cpu="2" -var_ram="2048" -... - -header_info "$APP" # [6] Display header -variables # [7] Process arguments -color # [8] Setup colors -catch_errors # [9] Setup error handling - -function update_script() { ... } # [10] Update function (optional) - -start # [11] Launch container creation -build_container -description -msg_ok "Completed successfully!\n" -``` - ---- - -## Complete Script Template - -### 1. File Header & Imports - -```bash -#!/usr/bin/env bash -# Copyright (c) 2021-2026 community-scripts ORG -# Author: YourUsername -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/example/project - -# Import main orchestrator -source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVE/raw/branch/main/misc/build.func) -``` - -> **⚠️ IMPORTANT**: Before opening a PR, change URL to `community-scripts` repo! - -### 2. Application Metadata - -```bash -# Application Configuration -APP="ApplicationName" -var_tags="tag1;tag2;tag3" # Max 3-4 tags, no spaces, semicolon-separated - -# Container Resources -var_cpu="2" # CPU cores -var_ram="2048" # RAM in MB -var_disk="10" # Disk in GB - -# Container Type & OS -var_os="debian" # Options: alpine, debian, ubuntu -var_version="12" # Alpine: 3.20+, Debian: 11-13, Ubuntu: 20.04+ -var_unprivileged="1" # 1=unprivileged (secure), 0=privileged (rarely needed) -``` - -**Variable Naming Convention**: -- Variables exposed to user: `var_*` (e.g., `var_cpu`, `var_hostname`, `var_ssh`) -- Internal variables: lowercase (e.g., `container_id`, `app_version`) - -### 3. Display & Initialization - -```bash -# Display header ASCII art -header_info "$APP" - -# Process command-line arguments and load configuration -variables - -# Setup ANSI color codes and formatting -color - -# Initialize error handling (trap ERR, EXIT, INT, TERM) -catch_errors -``` - -### 4. Update Function (Highly Recommended) - -```bash -function update_script() { - header_info - - # Always start with these checks - check_container_storage - check_container_resources - - # Verify app is installed - if [[ ! -d /opt/appname ]]; then - msg_error "No ${APP} Installation Found!" - exit - fi - - # Get latest version from GitHub - RELEASE=$(curl -fsSL https://api.github.com/repos/user/repo/releases/latest | \ - grep "tag_name" | awk '{print substr($2, 2, length($2)-3)}') - - # Compare with saved version - if [[ ! -f /opt/${APP}_version.txt ]] || [[ "${RELEASE}" != "$(cat /opt/${APP}_version.txt)" ]]; then - msg_info "Updating ${APP} to v${RELEASE}" - - # Backup user data - cp -r /opt/appname /opt/appname-backup - - # Perform update - cd /opt - wget -q "https://github.com/user/repo/releases/download/v${RELEASE}/app-${RELEASE}.tar.gz" - tar -xzf app-${RELEASE}.tar.gz - - # Restore user data - cp /opt/appname-backup/config/* /opt/appname/config/ - - # Cleanup - rm -rf app-${RELEASE}.tar.gz /opt/appname-backup - - # Save new version - echo "${RELEASE}" > /opt/${APP}_version.txt - - msg_ok "Updated ${APP} to v${RELEASE}" - else - msg_ok "No update required. ${APP} is already at v${RELEASE}." - fi - - exit -} -``` - -### 5. Script Launch - -```bash -# Start the container creation workflow -start - -# Build the container with selected configuration -build_container - -# Set container description/notes in Proxmox UI -description - -# Display success message -msg_ok "Completed successfully!\n" -echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" -echo -e "${INFO}${YW} Access it using the following URL:${CL}" -echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8080${CL}" -``` - ---- - -## Function Reference - -### Core Functions (From build.func) - -#### `variables()` - -**Purpose**: Initialize container variables, load user arguments, setup orchestration - -**Triggered by**: Called automatically at script start - -**Behavior**: -1. Parse command-line arguments (if any) -2. Generate random UUID for session tracking -3. Load container storage from Proxmox -4. Initialize application-specific defaults -5. Setup SSH/environment configuration - -#### `start()` - -**Purpose**: Launch the container creation menu with 5 installation modes - -**Menu Options**: -``` -1. Default Installation (Quick setup, predefined settings) -2. Advanced Installation (19-step wizard with full control) -3. User Defaults (Load ~/.community-scripts/default.vars) -4. App Defaults (Load /defaults/AppName.vars) -5. Settings Menu (Interactive mode selection) -``` - -#### `build_container()` - -**Purpose**: Main orchestrator for LXC container creation - -**Operations**: -1. Validates all variables -2. Creates LXC container via `pct create` -3. Executes `install/AppName-install.sh` inside container -4. Monitors installation progress -5. Handles errors and rollback on failure - -#### `description()` - -**Purpose**: Set container description/notes visible in Proxmox UI - ---- - -## Advanced Features - -### 1. Custom Configuration Menus - -If your app has additional setup beyond standard vars: - -```bash -custom_app_settings() { - CONFIGURE_DB=$(whiptail --title "Database Setup" \ - --yesno "Would you like to configure a custom database?" 8 60) - - if [[ $? -eq 0 ]]; then - DB_HOST=$(whiptail --inputbox "Database Host:" 8 60 3>&1 1>&2 2>&3) - DB_PORT=$(whiptail --inputbox "Database Port:" 8 60 "3306" 3>&1 1>&2 2>&3) - fi -} - -custom_app_settings -``` - -### 2. Update Function Patterns - -Save installed version for update checks - -### 3. Health Check Functions - -Add custom validation: - -```bash -function health_check() { - header_info - - if [[ ! -d /opt/appname ]]; then - msg_error "Application not found!" - exit 1 - fi - - if ! systemctl is-active --quiet appname; then - msg_error "Application service not running" - exit 1 - fi - - msg_ok "Health check passed" -} -``` - ---- - -## Real Examples - -### Example 1: Simple Web App (Debian-based) - -```bash -#!/usr/bin/env bash -source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVE/raw/branch/main/misc/build.func) - -APP="Homarr" -var_tags="dashboard;homepage" -var_cpu="2" -var_ram="1024" -var_disk="5" -var_os="debian" -var_version="12" -var_unprivileged="1" - -header_info "$APP" -variables -color -catch_errors - -function update_script() { - # Update logic here -} - -start -build_container -description -msg_ok "Completed successfully!\n" -``` - ---- - -## Troubleshooting - -### Container Creation Fails - -**Symptom**: `pct create` exits with error code 209 - -**Solution**: -```bash -# Check existing containers -pct list | grep CTID - -# Remove conflicting container -pct destroy CTID - -# Retry ct/AppName.sh -``` - -### Update Function Doesn't Detect New Version - -**Debug**: -```bash -# Check version file -cat /opt/AppName_version.txt - -# Test GitHub API -curl -fsSL https://api.github.com/repos/user/repo/releases/latest | grep tag_name -``` - ---- - -## Contribution Checklist - -Before submitting a PR: - -### Script Structure -- [ ] Shebang is `#!/usr/bin/env bash` -- [ ] Imports `build.func` from community-scripts repo -- [ ] Copyright header with author and source URL -- [ ] APP variable matches filename -- [ ] `var_tags` are semicolon-separated (no spaces) - -### Default Values -- [ ] `var_cpu` set appropriately (2-4 for most apps) -- [ ] `var_ram` set appropriately (1024-4096 MB minimum) -- [ ] `var_disk` sufficient for app + data (5-20 GB) -- [ ] `var_os` is realistic - -### Functions -- [ ] `update_script()` implemented -- [ ] Update function checks if app installed -- [ ] Proper error handling with `msg_error` - -### Testing -- [ ] Script tested with default installation -- [ ] Script tested with advanced (19-step) installation -- [ ] Update function tested on existing installation - ---- - -## Best Practices - -### ✅ DO: - -1. **Use meaningful defaults** -2. **Implement version tracking** -3. **Handle edge cases** -4. **Use proper messaging with msg_info/msg_ok/msg_error** - -### ❌ DON'T: - -1. **Hardcode versions** -2. **Use custom color codes** (use built-in variables) -3. **Forget error handling** -4. **Leave temporary files** - ---- - -**Last Updated**: December 2025 -**Compatibility**: ProxmoxVE with build.func v3+ diff --git a/docs/ct/README.md b/docs/ct/README.md deleted file mode 100644 index 09eac5f19..000000000 --- a/docs/ct/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# Container Scripts Documentation (/ct) - -This directory contains comprehensive documentation for container creation scripts in the `/ct` directory. - -## Overview - -Container scripts (`ct/*.sh`) are the entry points for creating LXC containers in Proxmox VE. They run on the host and orchestrate the entire container creation process. - -## Documentation Structure - -Each script has standardized documentation following the project pattern. - -## Key Resources - -- **[DETAILED_GUIDE.md](DETAILED_GUIDE.md)** - Complete reference for creating ct scripts -- **[../contribution/README.md](../contribution/README.md)** - How to contribute -- **[../misc/build.func/](../misc/build.func/)** - Core orchestrator documentation - -## Container Creation Flow - -``` -ct/AppName.sh (host-side) - │ - ├─ Calls: build.func (orchestrator) - │ - ├─ Variables: var_cpu, var_ram, var_disk, var_os - │ - └─ Creates: LXC Container - │ - └─ Runs: install/appname-install.sh (inside) -``` - -## Available Scripts - -See `/ct` directory for all container creation scripts. Common examples: - -- `pihole.sh` - Pi-hole DNS/DHCP server -- `docker.sh` - Docker container runtime -- `wallabag.sh` - Article reading & archiving -- `nextcloud.sh` - Private cloud storage -- `debian.sh` - Basic Debian container -- And 30+ more... - -## Quick Start - -To understand how to create a container script: - -1. Read: [UPDATED_APP-ct.md](../UPDATED_APP-ct.md) -2. Study: A similar existing script in `/ct` -3. Copy template and customize -4. Test locally -5. Submit PR - -## Contributing a New Container - -1. Create `ct/myapp.sh` -2. Create `install/myapp-install.sh` -3. Follow template in [UPDATED_APP-ct.md](../UPDATED_APP-ct.md) -4. Test thoroughly -5. Submit PR with both files - -## Common Tasks - -- **Add new container application** → [CONTRIBUTION_GUIDE.md](../CONTRIBUTION_GUIDE.md) -- **Debug container creation** → [EXIT_CODES.md](../EXIT_CODES.md) -- **Understand build.func** → [misc/build.func/](../misc/build.func/) -- **Development mode debugging** → [DEV_MODE.md](../DEV_MODE.md) - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team diff --git a/docs/guides/CONFIGURATION_REFERENCE.md b/docs/guides/CONFIGURATION_REFERENCE.md deleted file mode 100644 index 74728d3b2..000000000 --- a/docs/guides/CONFIGURATION_REFERENCE.md +++ /dev/null @@ -1,904 +0,0 @@ -# Configuration Reference - -**Complete reference for all configuration variables and options in community-scripts for Proxmox VE.** - ---- - -## Table of Contents - -1. [Variable Naming Convention](#variable-naming-convention) -2. [Complete Variable Reference](#complete-variable-reference) -3. [Resource Configuration](#resource-configuration) -4. [Network Configuration](#network-configuration) -5. [IPv6 Configuration](#ipv6-configuration) -6. [SSH Configuration](#ssh-configuration) -7. [Container Features](#container-features) -8. [Storage Configuration](#storage-configuration) -9. [Security Settings](#security-settings) -10. [Advanced Options](#advanced-options) -11. [Quick Reference Table](#quick-reference-table) - ---- - -## Variable Naming Convention - -All configuration variables follow a consistent pattern: - -``` -var_= -``` - -**Rules:** -- ✅ Always starts with `var_` -- ✅ Lowercase letters only -- ✅ Underscores for word separation -- ✅ No spaces around `=` -- ✅ Values can be quoted if needed - -**Examples:** -```bash -# ✓ Correct -var_cpu=4 -var_hostname=myserver -var_ssh_authorized_key=ssh-rsa AAAA... - -# ✗ Wrong -CPU=4 # Missing var_ prefix -var_CPU=4 # Uppercase not allowed -var_cpu = 4 # Spaces around = -var-cpu=4 # Hyphens not allowed -``` - ---- - -## Complete Variable Reference - -### var_unprivileged - -**Type:** Boolean (0 or 1) -**Default:** `1` (unprivileged) -**Description:** Determines if container runs unprivileged (recommended) or privileged. - -```bash -var_unprivileged=1 # Unprivileged (safer, recommended) -var_unprivileged=0 # Privileged (less secure, more features) -``` - -**When to use privileged (0):** -- Hardware access required -- Certain kernel modules needed -- Legacy applications -- Nested virtualization with full features - -**Security Impact:** -- Unprivileged: Container root is mapped to unprivileged user on host -- Privileged: Container root = host root (security risk) - ---- - -### var_cpu - -**Type:** Integer -**Default:** Varies by app (usually 1-4) -**Range:** 1 to host CPU count -**Description:** Number of CPU cores allocated to container. - -```bash -var_cpu=1 # Single core (minimal) -var_cpu=2 # Dual core (typical) -var_cpu=4 # Quad core (recommended for apps) -var_cpu=8 # High performance -``` - -**Best Practices:** -- Start with 2 cores for most applications -- Monitor usage with `pct exec -- htop` -- Can be changed after creation -- Consider host CPU count (don't over-allocate) - ---- - -### var_ram - -**Type:** Integer (MB) -**Default:** Varies by app (usually 512-2048) -**Range:** 512 MB to host RAM -**Description:** Amount of RAM in megabytes. - -```bash -var_ram=512 # 512 MB (minimal) -var_ram=1024 # 1 GB (typical) -var_ram=2048 # 2 GB (comfortable) -var_ram=4096 # 4 GB (recommended for databases) -var_ram=8192 # 8 GB (high memory apps) -``` - -**Conversion Guide:** -``` -512 MB = 0.5 GB -1024 MB = 1 GB -2048 MB = 2 GB -4096 MB = 4 GB -8192 MB = 8 GB -16384 MB = 16 GB -``` - -**Best Practices:** -- Minimum 512 MB for basic Linux -- 1 GB for typical applications -- 2-4 GB for web servers, databases -- Monitor with `free -h` inside container - ---- - -### var_disk - -**Type:** Integer (GB) -**Default:** Varies by app (usually 2-8) -**Range:** 0.001 GB to storage capacity -**Description:** Root disk size in gigabytes. - -```bash -var_disk=2 # 2 GB (minimal OS only) -var_disk=4 # 4 GB (typical) -var_disk=8 # 8 GB (comfortable) -var_disk=20 # 20 GB (recommended for apps) -var_disk=50 # 50 GB (large applications) -var_disk=100 # 100 GB (databases, media) -``` - -**Important Notes:** -- Can be expanded after creation (not reduced) -- Actual space depends on storage type -- Thin provisioning supported on most storage -- Plan for logs, data, updates - -**Recommended Sizes by Use Case:** -``` -Basic Linux container: 4 GB -Web server (Nginx/Apache): 8 GB -Application server: 10-20 GB -Database server: 20-50 GB -Docker host: 30-100 GB -Media server: 100+ GB -``` - ---- - -### var_hostname - -**Type:** String -**Default:** Application name -**Max Length:** 63 characters -**Description:** Container hostname (FQDN format allowed). - -```bash -var_hostname=myserver -var_hostname=pihole -var_hostname=docker-01 -var_hostname=web.example.com -``` - -**Rules:** -- Lowercase letters, numbers, hyphens -- Cannot start or end with hyphen -- No underscores allowed -- No spaces - -**Best Practices:** -```bash -# ✓ Good -var_hostname=web-server -var_hostname=db-primary -var_hostname=app.domain.com - -# ✗ Avoid -var_hostname=Web_Server # Uppercase, underscore -var_hostname=-server # Starts with hyphen -var_hostname=my server # Contains space -``` - ---- - -### var_brg - -**Type:** String -**Default:** `vmbr0` -**Description:** Network bridge interface. - -```bash -var_brg=vmbr0 # Default Proxmox bridge -var_brg=vmbr1 # Custom bridge -var_brg=vmbr2 # Isolated network -``` - -**Common Setups:** -``` -vmbr0 → Main network (LAN) -vmbr1 → Guest network -vmbr2 → DMZ -vmbr3 → Management -vmbr4 → Storage network -``` - -**Check available bridges:** -```bash -ip link show | grep vmbr -# or -brctl show -``` - ---- - -### var_net - -**Type:** String -**Options:** `dhcp` or `static` -**Default:** `dhcp` -**Description:** IPv4 network configuration method. - -```bash -var_net=dhcp # Automatic IP via DHCP -var_net=static # Manual IP configuration -``` - -**DHCP Mode:** -- Automatic IP assignment -- Easy setup -- Good for development -- Requires DHCP server on network - -**Static Mode:** -- Fixed IP address -- Requires gateway configuration -- Better for servers -- Configure via advanced settings or after creation - ---- - -### var_gateway - -**Type:** IPv4 Address -**Default:** Auto-detected from host -**Description:** Network gateway IP address. - -```bash -var_gateway=192.168.1.1 -var_gateway=10.0.0.1 -var_gateway=172.16.0.1 -``` - -**Auto-detection:** -If not specified, system detects gateway from host: -```bash -ip route | grep default -``` - -**When to specify:** -- Multiple gateways available -- Custom routing setup -- Different network segment - ---- - -### var_vlan - -**Type:** Integer -**Range:** 1-4094 -**Default:** None -**Description:** VLAN tag for network isolation. - -```bash -var_vlan=10 # VLAN 10 -var_vlan=100 # VLAN 100 -var_vlan=200 # VLAN 200 -``` - -**Common VLAN Schemes:** -``` -VLAN 10 → Management -VLAN 20 → Servers -VLAN 30 → Desktops -VLAN 40 → Guest WiFi -VLAN 50 → IoT devices -VLAN 99 → DMZ -``` - -**Requirements:** -- Switch must support VLANs -- Proxmox bridge configured for VLAN aware -- Gateway on same VLAN - ---- - -### var_mtu - -**Type:** Integer -**Default:** `1500` -**Range:** 68-9000 -**Description:** Maximum Transmission Unit size. - -```bash -var_mtu=1500 # Standard Ethernet -var_mtu=1492 # PPPoE -var_mtu=9000 # Jumbo frames -``` - -**Common Values:** -``` -1500 → Standard Ethernet (default) -1492 → PPPoE connections -1400 → Some VPN setups -9000 → Jumbo frames (10GbE networks) -``` - -**When to change:** -- Jumbo frames for performance on 10GbE -- PPPoE internet connections -- VPN tunnels with overhead -- Specific network requirements - ---- - -### var_mac - -**Type:** MAC Address -**Format:** `XX:XX:XX:XX:XX:XX` -**Default:** Auto-generated -**Description:** Container MAC address. - -```bash -var_mac=02:00:00:00:00:01 -var_mac=DE:AD:BE:EF:00:01 -``` - -**When to specify:** -- MAC-based licensing -- Static DHCP reservations -- Network access control -- Cloning configurations - -**Best Practices:** -- Use locally administered addresses (2nd bit set) -- Start with `02:`, `06:`, `0A:`, `0E:` -- Avoid vendor OUIs -- Document custom MACs - ---- - -### var_ipv6_method - -**Type:** String -**Options:** `auto`, `dhcp`, `static`, `none`, `disable` -**Default:** `none` -**Description:** IPv6 configuration method. - -```bash -var_ipv6_method=auto # SLAAC (auto-configuration) -var_ipv6_method=dhcp # DHCPv6 -var_ipv6_method=static # Manual configuration -var_ipv6_method=none # IPv6 enabled but not configured -var_ipv6_method=disable # IPv6 completely disabled -``` - -**Detailed Options:** - -**auto (SLAAC)** -- Stateless Address Auto-Configuration -- Router advertisements -- No DHCPv6 server needed -- Recommended for most cases - -**dhcp (DHCPv6)** -- Stateful configuration -- Requires DHCPv6 server -- More control over addressing - -**static** -- Manual IPv6 address -- Manual gateway -- Full control - -**none** -- IPv6 stack active -- No address configured -- Can configure later - -**disable** -- IPv6 completely disabled at kernel level -- Use when IPv6 causes issues -- Sets `net.ipv6.conf.all.disable_ipv6=1` - ---- - -### var_ns - -**Type:** IP Address -**Default:** Auto (from host) -**Description:** DNS nameserver IP. - -```bash -var_ns=8.8.8.8 # Google DNS -var_ns=1.1.1.1 # Cloudflare DNS -var_ns=9.9.9.9 # Quad9 DNS -var_ns=192.168.1.1 # Local DNS -``` - -**Common DNS Servers:** -``` -8.8.8.8, 8.8.4.4 → Google Public DNS -1.1.1.1, 1.0.0.1 → Cloudflare DNS -9.9.9.9, 149.112.112.112 → Quad9 DNS -208.67.222.222 → OpenDNS -192.168.1.1 → Local router/Pi-hole -``` - ---- - -### var_ssh - -**Type:** Boolean -**Options:** `yes` or `no` -**Default:** `no` -**Description:** Enable SSH server in container. - -```bash -var_ssh=yes # SSH server enabled -var_ssh=no # SSH server disabled (console only) -``` - -**When enabled:** -- OpenSSH server installed -- Started on boot -- Port 22 open -- Root login allowed - -**Security Considerations:** -- Disable if not needed -- Use SSH keys instead of passwords -- Consider non-standard port -- Firewall rules recommended - ---- - -### var_ssh_authorized_key - -**Type:** String (SSH public key) -**Default:** None -**Description:** SSH public key for root user. - -```bash -var_ssh_authorized_key=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC... user@host -var_ssh_authorized_key=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAA... user@host -``` - -**Supported Key Types:** -- RSA (2048-4096 bits) -- Ed25519 (recommended) -- ECDSA -- DSA (deprecated) - -**How to get your public key:** -```bash -cat ~/.ssh/id_rsa.pub -# or -cat ~/.ssh/id_ed25519.pub -``` - -**Multiple keys:** -Separate with newlines (in file) or use multiple deployments. - ---- - -### var_pw - -**Type:** String -**Default:** Empty (auto-login) -**Description:** Root password. - -```bash -var_pw=SecurePassword123! # Set password -var_pw= # Auto-login (empty) -``` - -**Auto-login behavior:** -- No password required for console -- Automatic login on console access -- SSH still requires key if enabled -- Suitable for development - -**Password best practices:** -- Minimum 12 characters -- Mix upper/lower/numbers/symbols -- Use password manager -- Rotate regularly - ---- - -### var_nesting - -**Type:** Boolean (0 or 1) -**Default:** `1` -**Description:** Allow nested containers (required for Docker). - -```bash -var_nesting=1 # Nested containers allowed -var_nesting=0 # Nested containers disabled -``` - -**Required for:** -- Docker -- LXC inside LXC -- Systemd features -- Container orchestration - -**Security Impact:** -- Slightly reduced isolation -- Required for container platforms -- Generally safe when unprivileged - ---- - -### var_diagnostics - -**Type:** Boolean (yes or no) -**Default:** `yes` -**Description:** Determines if anonymous telemetry and diagnostic data is sent to Community-Scripts API. - -```bash -var_diagnostics=yes # Allow telemetry (helps us improve scripts) -var_diagnostics=no # Disable all telemetry -``` - -**Privacy & Usage:** -- Data is strictly anonymous (random session ID) -- Reports success/failure of installations -- Maps error codes (e.g., APT lock, out of RAM) -- No user-specific data, hostnames, or secret keys are ever sent - ---- - -### var_gpu - -**Type:** Boolean/Toggle -**Options:** `yes` or `no` -**Default:** `no` -**Description:** Enable GPU passthrough for the container. - -```bash -var_gpu=yes # Enable GPU passthrough (auto-detect) -var_gpu=no # Disable GPU passthrough (default) -``` - -**Features enabled:** -- Auto-detects Intel (QuickSync), NVIDIA, and AMD GPUs -- Passes through `/dev/dri` and render nodes -- Configures appropriate container permissions -- Crucial for media servers (Plex, Jellyfin, Immich) - -**Prerequisites:** -- Host drivers installed correctly -- Hardware present and visible to Proxmox -- IOMMU enabled (for some configurations) - ---- - -### var_tun - -**Type:** Boolean/Toggle -**Options:** `yes` or `no` -**Default:** `no` -**Description:** Enable TUN/TAP device support. - -```bash -var_tun=yes # Enable TUN/TAP support -var_tun=no # Disable TUN/TAP support (default) -``` - -**Required for:** -- VPN software (WireGuard, OpenVPN) -- Network tunneling (Tailscale, ZeroTier) -- Custom network bridges - ---- - -### var_keyctl - -**Type:** Boolean (0 or 1) -**Default:** `0` -**Description:** Enable keyctl system call. - -```bash -var_keyctl=1 # Keyctl enabled -var_keyctl=0 # Keyctl disabled -``` - -**Required for:** -- Docker in some configurations -- Systemd keyring features -- Encryption key management -- Some authentication systems - ---- - -### var_fuse - -**Type:** Boolean/Toggle -**Options:** `yes` or `no` -**Default:** `no` -**Description:** Enable FUSE filesystem support. - -```bash -var_fuse=yes # FUSE enabled -var_fuse=no # FUSE disabled -``` - -**Required for:** -- sshfs -- AppImage -- Some backup tools -- User-space filesystems - ---- - -### var_mknod - -**Type:** Boolean (0 or 1) -**Default:** `0` -**Description:** Allow device node creation. - -```bash -var_mknod=1 # Device nodes allowed -var_mknod=0 # Device nodes disabled -``` - -**Requires:** -- Kernel 5.3+ -- Experimental feature -- Use with caution - ---- - -### var_mount_fs - -**Type:** String (comma-separated) -**Default:** Empty -**Description:** Allowed mountable filesystems. - -```bash -var_mount_fs=nfs -var_mount_fs=nfs,cifs -var_mount_fs=ext4,xfs,nfs -``` - -**Common Options:** -``` -nfs → NFS network shares -cifs → SMB/CIFS shares -ext4 → Ext4 filesystems -xfs → XFS filesystems -btrfs → Btrfs filesystems -``` - ---- - -### var_protection - -**Type:** Boolean -**Options:** `yes` or `no` -**Default:** `no` -**Description:** Prevent accidental deletion. - -```bash -var_protection=yes # Protected from deletion -var_protection=no # Can be deleted normally -``` - -**When protected:** -- Cannot delete via GUI -- Cannot delete via `pct destroy` -- Must disable protection first -- Good for production containers - ---- - -### var_tags - -**Type:** String (comma-separated) -**Default:** `community-script` -**Description:** Container tags for organization. - -```bash -var_tags=production -var_tags=production,webserver -var_tags=dev,testing,temporary -``` - -**Best Practices:** -```bash -# Environment tags -var_tags=production -var_tags=development -var_tags=staging - -# Function tags -var_tags=webserver,nginx -var_tags=database,postgresql -var_tags=cache,redis - -# Project tags -var_tags=project-alpha,frontend -var_tags=customer-xyz,billing - -# Combined -var_tags=production,webserver,project-alpha -``` - ---- - -### var_timezone - -**Type:** String (TZ database format) -**Default:** Host timezone -**Description:** Container timezone. - -```bash -var_timezone=Europe/Berlin -var_timezone=America/New_York -var_timezone=Asia/Tokyo -``` - -**Common Timezones:** -``` -Europe/London -Europe/Berlin -Europe/Paris -America/New_York -America/Chicago -America/Los_Angeles -Asia/Tokyo -Asia/Singapore -Australia/Sydney -UTC -``` - -**List all timezones:** -```bash -timedatectl list-timezones -``` - ---- - -### var_verbose - -**Type:** Boolean -**Options:** `yes` or `no` -**Default:** `no` -**Description:** Enable verbose output. - -```bash -var_verbose=yes # Show all commands -var_verbose=no # Silent mode -``` - -**When enabled:** -- Shows all executed commands -- Displays detailed progress -- Useful for debugging -- More log output - ---- - -### var_apt_cacher - -**Type:** Boolean -**Options:** `yes` or `no` -**Default:** `no` -**Description:** Use APT caching proxy. - -```bash -var_apt_cacher=yes -var_apt_cacher=no -``` - -**Benefits:** -- Faster package installs -- Reduced bandwidth -- Offline package cache -- Speeds up multiple containers - ---- - -### var_apt_cacher_ip - -**Type:** IP Address -**Default:** None -**Description:** APT cacher proxy IP. - -```bash -var_apt_cacher=yes -var_apt_cacher_ip=192.168.1.100 -``` - -**Setup apt-cacher-ng:** -```bash -apt install apt-cacher-ng -# Runs on port 3142 -``` - ---- - -### var_container_storage - -**Type:** String -**Default:** Auto-detected -**Description:** Storage for container. - -```bash -var_container_storage=local -var_container_storage=local-zfs -var_container_storage=pve-storage -``` - -**List available storage:** -```bash -pvesm status -``` - ---- - -### var_template_storage - -**Type:** String -**Default:** Auto-detected -**Description:** Storage for templates. - -```bash -var_template_storage=local -var_template_storage=nfs-templates -``` - ---- - -## Quick Reference Table - -| Variable | Type | Default | Example | -|----------|------|---------|---------| -| `var_unprivileged` | 0/1 | 1 | `var_unprivileged=1` | -| `var_cpu` | int | varies | `var_cpu=4` | -| `var_ram` | int (MB) | varies | `var_ram=4096` | -| `var_disk` | int (GB) | varies | `var_disk=20` | -| `var_hostname` | string | app name | `var_hostname=server` | -| `var_brg` | string | vmbr0 | `var_brg=vmbr1` | -| `var_net` | dhcp/static | dhcp | `var_net=dhcp` | -| `var_gateway` | IP | auto | `var_gateway=192.168.1.1` | -| `var_ipv6_method` | string | none | `var_ipv6_method=disable` | -| `var_vlan` | int | - | `var_vlan=100` | -| `var_mtu` | int | 1500 | `var_mtu=9000` | -| `var_mac` | MAC | auto | `var_mac=02:00:00:00:00:01` | -| `var_ns` | IP | auto | `var_ns=8.8.8.8` | -| `var_ssh` | yes/no | no | `var_ssh=yes` | -| `var_ssh_authorized_key` | string | - | `var_ssh_authorized_key=ssh-rsa...` | -| `var_pw` | string | empty | `var_pw=password` | -| `var_nesting` | 0/1 | 1 | `var_nesting=1` | -| `var_keyctl` | 0/1 | 0 | `var_keyctl=1` | -| `var_fuse` | 0/1 | 0 | `var_fuse=1` | -| `var_mknod` | 0/1 | 0 | `var_mknod=1` | -| `var_mount_fs` | string | - | `var_mount_fs=nfs,cifs` | -| `var_protection` | yes/no | no | `var_protection=yes` | -| `var_tags` | string | community-script | `var_tags=prod,web` | -| `var_timezone` | string | host TZ | `var_timezone=Europe/Berlin` | -| `var_verbose` | yes/no | no | `var_verbose=yes` | -| `var_apt_cacher` | yes/no | no | `var_apt_cacher=yes` | -| `var_apt_cacher_ip` | IP | - | `var_apt_cacher_ip=192.168.1.10` | -| `var_container_storage` | string | auto | `var_container_storage=local-zfs` | -| `var_template_storage` | string | auto | `var_template_storage=local` | - ---- - -## See Also - -- [Defaults System Guide](DEFAULTS_GUIDE.md) -- [Unattended Deployments](UNATTENDED_DEPLOYMENTS.md) -- [Security Best Practices](SECURITY_GUIDE.md) -- [Network Configuration](NETWORK_GUIDE.md) diff --git a/docs/guides/DEFAULTS_SYSTEM_GUIDE.md b/docs/guides/DEFAULTS_SYSTEM_GUIDE.md deleted file mode 100644 index 2fcda738a..000000000 --- a/docs/guides/DEFAULTS_SYSTEM_GUIDE.md +++ /dev/null @@ -1,760 +0,0 @@ -# Configuration & Defaults System - User Guide - -> **Complete Guide to App Defaults and User Defaults** -> -> *Learn how to configure, save, and reuse your installation settings* - ---- - -## Table of Contents - -1. [Quick Start](#quick-start) -2. [Understanding the Defaults System](#understanding-the-defaults-system) -3. [Installation Modes](#installation-modes) -4. [How to Save Defaults](#how-to-save-defaults) -5. [How to Use Saved Defaults](#how-to-use-saved-defaults) -6. [Managing Your Defaults](#managing-your-defaults) -7. [Advanced Configuration](#advanced-configuration) -8. [Troubleshooting](#troubleshooting) - ---- - -## Quick Start - -### 30-Second Setup - -```bash -# 1. Run any container installation script -bash pihole-install.sh - -# 2. When prompted, select: "Advanced Settings" -# (This allows you to customize everything) - -# 3. Answer all configuration questions - -# 4. At the end, when asked "Save as App Defaults?" -# Select: YES - -# 5. Done! Your settings are now saved -``` - -**Next Time**: Run the same script again, select **"App Defaults"** and your settings will be applied automatically! - ---- - -## Understanding the Defaults System - -### The Three-Tier System - -Your installation settings are managed through three layers: - -#### 🔷 **Tier 1: Built-in Defaults** (Fallback) -``` -These are hardcoded in the scripts -Provide sensible defaults for each application -Example: PiHole uses 2 CPU cores by default -``` - -#### 🔶 **Tier 2: User Defaults** (Global) -``` -Your personal global defaults -Applied to ALL container installations -Location: /usr/local/community-scripts/default.vars -Example: "I always want 4 CPU cores and 2GB RAM" -``` - -#### 🔴 **Tier 3: App Defaults** (Specific) -``` -Application-specific saved settings -Only applied when installing that specific app -Location: /usr/local/community-scripts/defaults/.vars -Example: "Whenever I install PiHole, use these exact settings" -``` - -### Priority System - -When installing a container, settings are applied in this order: - -``` -┌─────────────────────────────────────┐ -│ 1. Environment Variables (HIGHEST) │ Set in shell: export var_cpu=8 -│ (these override everything) │ -├─────────────────────────────────────┤ -│ 2. App Defaults │ From: defaults/pihole.vars -│ (app-specific saved settings) │ -├─────────────────────────────────────┤ -│ 3. User Defaults │ From: default.vars -│ (your global defaults) │ -├─────────────────────────────────────┤ -│ 4. Built-in Defaults (LOWEST) │ Hardcoded in script -│ (failsafe, always available) │ -└─────────────────────────────────────┘ -``` - -**In Plain English**: -- If you set an environment variable → it wins -- Otherwise, if you have app-specific defaults → use those -- Otherwise, if you have user defaults → use those -- Otherwise, use the hardcoded defaults - ---- - -## Installation Modes - -When you run any installation script, you'll be presented with a menu: - -### Option 1️⃣ : **Default Settings** - -``` -Quick installation with standard settings -├─ Best for: First-time users, quick deployments -├─ What happens: -│ 1. Script uses built-in defaults -│ 2. Container created immediately -│ 3. No questions asked -└─ Time: ~2 minutes -``` - -**When to use**: You want a standard installation, don't need customization - ---- - -### Option 2️⃣ : **Advanced Settings** - -``` -Full customization with 19 configuration steps -├─ Best for: Power users, custom requirements -├─ What happens: -│ 1. Script asks for EVERY setting -│ 2. You control: CPU, RAM, Disk, Network, SSH, etc. -│ 3. Shows summary before creating -│ 4. Offers to save as App Defaults -└─ Time: ~5-10 minutes -``` - -**When to use**: You want full control over the configuration - -**Available Settings**: -- CPU cores, RAM amount, Disk size -- Container name, network settings -- SSH access, API access, Features -- Password, SSH keys, Tags - ---- - -### Option 3️⃣ : **User Defaults** - -``` -Use your saved global defaults -├─ Best for: Consistent deployments across many containers -├─ Requires: You've previously saved User Defaults -├─ What happens: -│ 1. Loads settings from: /usr/local/community-scripts/default.vars -│ 2. Shows you the loaded settings -│ 3. Creates container immediately -└─ Time: ~2 minutes -``` - -**When to use**: You have preferred defaults you want to use for every app - ---- - -### Option 4️⃣ : **App Defaults** (if available) - -``` -Use previously saved app-specific defaults -├─ Best for: Repeating the same configuration multiple times -├─ Requires: You've previously saved App Defaults for this app -├─ What happens: -│ 1. Loads settings from: /usr/local/community-scripts/defaults/.vars -│ 2. Shows you the loaded settings -│ 3. Creates container immediately -└─ Time: ~2 minutes -``` - -**When to use**: You've installed this app before and want identical settings - ---- - -### Option 5️⃣ : **Settings Menu** - -``` -Manage your saved configurations -├─ Functions: -│ • View current settings -│ • Edit storage selections -│ • Manage defaults location -│ • See what's currently configured -└─ Time: ~1 minute -``` - -**When to use**: You want to review or modify saved settings - ---- - -## How to Save Defaults - -### Method 1: Save While Installing - -This is the easiest way: - -#### Step-by-Step: Create App Defaults - -```bash -# 1. Run the installation script -bash pihole-install.sh - -# 2. Choose installation mode -# ┌─────────────────────────┐ -# │ Select installation mode:│ -# │ 1) Default Settings │ -# │ 2) Advanced Settings │ -# │ 3) User Defaults │ -# │ 4) App Defaults │ -# │ 5) Settings Menu │ -# └─────────────────────────┘ -# -# Enter: 2 (Advanced Settings) - -# 3. Answer all configuration questions -# • Container name? → my-pihole -# • CPU cores? → 4 -# • RAM amount? → 2048 -# • Disk size? → 20 -# • SSH access? → yes -# ... (more options) - -# 4. Review summary (shown before creation) -# ✓ Confirm to proceed - -# 5. After creation completes, you'll see: -# ┌──────────────────────────────────┐ -# │ Save as App Defaults for PiHole? │ -# │ (Yes/No) │ -# └──────────────────────────────────┘ -# -# Select: Yes - -# 6. Done! Settings saved to: -# /usr/local/community-scripts/defaults/pihole.vars -``` - -#### Step-by-Step: Create User Defaults - -```bash -# Same as App Defaults, but: -# When you select "Advanced Settings" -# FIRST app you run with this selection will offer -# to save as "User Defaults" additionally - -# This saves to: /usr/local/community-scripts/default.vars -``` - ---- - -### Method 2: Manual File Creation - -For advanced users who want to create defaults without running installation: - -```bash -# Create User Defaults manually -sudo tee /usr/local/community-scripts/default.vars > /dev/null << 'EOF' -# Global User Defaults -var_cpu=4 -var_ram=2048 -var_disk=20 -var_unprivileged=1 -var_brg=vmbr0 -var_gateway=192.168.1.1 -var_timezone=Europe/Berlin -var_ssh=yes -var_container_storage=local -var_template_storage=local -EOF - -# Create App Defaults manually -sudo tee /usr/local/community-scripts/defaults/pihole.vars > /dev/null << 'EOF' -# App-specific defaults for PiHole -var_unprivileged=1 -var_cpu=2 -var_ram=1024 -var_disk=10 -var_brg=vmbr0 -var_gateway=192.168.1.1 -var_hostname=pihole -var_container_storage=local -var_template_storage=local -EOF -``` - ---- - -### Method 3: Using Environment Variables - -Set defaults via environment before running: - -```bash -# Set as environment variables -export var_cpu=4 -export var_ram=2048 -export var_disk=20 -export var_hostname=my-container - -# Run installation -bash pihole-install.sh - -# These settings will be used -# (Can still be overridden by saved defaults) -``` - ---- - -## How to Use Saved Defaults - -### Using User Defaults - -```bash -# 1. Run any installation script -bash pihole-install.sh - -# 2. When asked for mode, select: -# Option: 3 (User Defaults) - -# 3. Your settings from default.vars are applied -# 4. Container created with your saved settings -``` - -### Using App Defaults - -```bash -# 1. Run the app you configured before -bash pihole-install.sh - -# 2. When asked for mode, select: -# Option: 4 (App Defaults) - -# 3. Your settings from defaults/pihole.vars are applied -# 4. Container created with exact same settings -``` - -### Overriding Saved Defaults - -```bash -# Even if you have defaults saved, -# you can override them with environment variables - -export var_cpu=8 # Override saved defaults -export var_hostname=custom-name - -bash pihole-install.sh -# Installation will use these values instead of saved defaults -``` - ---- - -## Managing Your Defaults - -### View Your Settings - -#### View User Defaults -```bash -cat /usr/local/community-scripts/default.vars -``` - -#### View App Defaults -```bash -cat /usr/local/community-scripts/defaults/pihole.vars -``` - -#### List All Saved App Defaults -```bash -ls -la /usr/local/community-scripts/defaults/ -``` - -### Edit Your Settings - -#### Edit User Defaults -```bash -sudo nano /usr/local/community-scripts/default.vars -``` - -#### Edit App Defaults -```bash -sudo nano /usr/local/community-scripts/defaults/pihole.vars -``` - -### Update Existing Defaults - -```bash -# Run installation again with your app -bash pihole-install.sh - -# Select: Advanced Settings -# Make desired changes -# At end, when asked to save: -# "Defaults already exist, Update?" -# Select: Yes - -# Your saved defaults are updated -``` - -### Delete Defaults - -#### Delete User Defaults -```bash -sudo rm /usr/local/community-scripts/default.vars -``` - -#### Delete App Defaults -```bash -sudo rm /usr/local/community-scripts/defaults/pihole.vars -``` - -#### Delete All App Defaults -```bash -sudo rm /usr/local/community-scripts/defaults/* -``` - ---- - -## Advanced Configuration - -### Available Variables - -All configurable variables start with `var_`: - -#### Resource Allocation -```bash -var_cpu=4 # CPU cores -var_ram=2048 # RAM in MB -var_disk=20 # Disk in GB -var_unprivileged=1 # 0=privileged, 1=unprivileged -``` - -#### Network -```bash -var_brg=vmbr0 # Bridge interface -var_net=dhcp # dhcp, static IP/CIDR, or IP range (see below) -var_gateway=192.168.1.1 # Default gateway (required for static IP) -var_mtu=1500 # MTU size -var_vlan=100 # VLAN ID -``` - -#### IP Range Scanning - -You can specify an IP range instead of a static IP. The system will ping each IP in the range and automatically assign the first free IP: - -```bash -# Format: START_IP/CIDR-END_IP/CIDR -var_net=192.168.1.100/24-192.168.1.200/24 -var_gateway=192.168.1.1 -``` - -This is useful for automated deployments where you want static IPs but don't want to track which IPs are already in use. - -#### System -```bash -var_hostname=pihole # Container name -var_timezone=Europe/Berlin # Timezone -var_pw=SecurePass123 # Root password -var_tags=dns,pihole # Tags for organization -var_verbose=yes # Enable verbose output -``` - -#### Security & Access -```bash -var_ssh=yes # Enable SSH -var_ssh_authorized_key="ssh-rsa AA..." # SSH public key -var_protection=1 # Enable protection flag -``` - -#### Features -```bash -var_fuse=1 # FUSE filesystem support -var_tun=1 # TUN device support -var_nesting=1 # Nesting (Docker in LXC) -var_keyctl=1 # Keyctl syscall -var_mknod=1 # Device node creation -``` - -#### Storage -```bash -var_container_storage=local # Where to store container -var_template_storage=local # Where to store templates -``` - -### Example Configuration Files - -#### Gaming Server Defaults -```bash -# High performance for gaming containers -var_cpu=8 -var_ram=4096 -var_disk=50 -var_unprivileged=0 -var_fuse=1 -var_nesting=1 -var_tags=gaming -``` - -#### Development Server -```bash -# Development with Docker support -var_cpu=4 -var_ram=2048 -var_disk=30 -var_unprivileged=1 -var_nesting=1 -var_ssh=yes -var_tags=development -``` - -#### IoT/Monitoring -```bash -# Low-resource, always-on containers -var_cpu=2 -var_ram=512 -var_disk=10 -var_unprivileged=1 -var_nesting=0 -var_fuse=0 -var_tun=0 -var_tags=iot,monitoring -``` - ---- - -## Troubleshooting - -### "App Defaults not available" Message - -**Problem**: You want to use App Defaults, but option says they're not available - -**Solution**: -1. You haven't created App Defaults yet for this app -2. Run the app with "Advanced Settings" -3. When finished, save as App Defaults -4. Next time, App Defaults will be available - ---- - -### "Settings not being applied" - -**Problem**: You saved defaults, but they're not being used - -**Checklist**: -```bash -# 1. Verify files exist -ls -la /usr/local/community-scripts/default.vars -ls -la /usr/local/community-scripts/defaults/.vars - -# 2. Check file permissions (should be readable) -stat /usr/local/community-scripts/default.vars - -# 3. Verify correct mode selected -# (Make sure you selected "User Defaults" or "App Defaults") - -# 4. Check for environment variable override -env | grep var_ -# If you have var_* set in environment, -# those override your saved defaults -``` - ---- - -### "Cannot write to defaults location" - -**Problem**: Permission denied when saving defaults - -**Solution**: -```bash -# Create the defaults directory if missing -sudo mkdir -p /usr/local/community-scripts/defaults - -# Fix permissions -sudo chmod 755 /usr/local/community-scripts -sudo chmod 755 /usr/local/community-scripts/defaults - -# Make sure you're running as root -sudo bash pihole-install.sh -``` - ---- - -### "Defaults directory doesn't exist" - -**Problem**: Script can't find where to save defaults - -**Solution**: -```bash -# Create the directory -sudo mkdir -p /usr/local/community-scripts/defaults - -# Verify -ls -la /usr/local/community-scripts/ -``` - ---- - -### Settings seem random or wrong - -**Problem**: Container gets different settings than expected - -**Possible Causes & Solutions**: - -```bash -# 1. Check if environment variables are set -env | grep var_ -# If you see var_* entries, those override your defaults -# Clear them: unset var_cpu var_ram (etc) - -# 2. Verify correct defaults are in files -cat /usr/local/community-scripts/default.vars -cat /usr/local/community-scripts/defaults/pihole.vars - -# 3. Check which mode you actually selected -# (Script output shows which defaults were applied) - -# 4. Check Proxmox logs for errors -sudo journalctl -u pve-daemon -n 50 -``` - ---- - -### "Variable not recognized" - -**Problem**: You set a variable that doesn't work - -**Solution**: -Only certain variables are allowed (security whitelist): - -``` -Allowed variables (starting with var_): -✓ var_cpu, var_ram, var_disk, var_unprivileged -✓ var_brg, var_gateway, var_mtu, var_vlan, var_net -✓ var_hostname, var_pw, var_timezone -✓ var_ssh, var_ssh_authorized_key -✓ var_fuse, var_tun, var_nesting, var_keyctl -✓ var_container_storage, var_template_storage -✓ var_tags, var_verbose -✓ var_apt_cacher, var_apt_cacher_ip -✓ var_protection, var_mount_fs - -✗ Other variables are NOT supported -``` - ---- - -## Best Practices - -### ✅ Do's - -✓ Use **App Defaults** when you want app-specific settings -✓ Use **User Defaults** for your global preferences -✓ Edit defaults files directly with `nano` (safe) -✓ Keep separate App Defaults for each app -✓ Back up your defaults regularly -✓ Use environment variables for temporary overrides - -### ❌ Don'ts - -✗ Don't use `source` on defaults files (security risk) -✗ Don't put sensitive passwords in defaults (use SSH keys) -✗ Don't modify defaults while installation is running -✗ Don't delete defaults.d while containers are being created -✗ Don't use special characters without escaping - ---- - -## Quick Reference - -### Defaults Locations - -| Type | Location | Example | -|------|----------|---------| -| User Defaults | `/usr/local/community-scripts/default.vars` | Global settings | -| App Defaults | `/usr/local/community-scripts/defaults/.vars` | PiHole-specific | -| Backup Dir | `/usr/local/community-scripts/defaults/` | All app defaults | - -### File Format - -```bash -# Comments start with # -var_name=value - -# No spaces around = -✓ var_cpu=4 -✗ var_cpu = 4 - -# String values don't need quotes -✓ var_hostname=mycontainer -✓ var_hostname='mycontainer' - -# Values with spaces need quotes -✓ var_tags="docker,production,testing" -✗ var_tags=docker,production,testing -``` - -### Command Reference - -```bash -# View defaults -cat /usr/local/community-scripts/default.vars - -# Edit defaults -sudo nano /usr/local/community-scripts/default.vars - -# List all app defaults -ls /usr/local/community-scripts/defaults/ - -# Backup your defaults -cp -r /usr/local/community-scripts/defaults/ ~/defaults-backup/ - -# Set temporary override -export var_cpu=8 -bash pihole-install.sh - -# Create custom defaults -sudo tee /usr/local/community-scripts/defaults/custom.vars << 'EOF' -var_cpu=4 -var_ram=2048 -EOF -``` - ---- - -## Getting Help - -### Need More Information? - -- 📖 [Main Documentation](../../docs/) -- 🐛 [Report Issues](https://github.com/community-scripts/ProxmoxVE/issues) -- 💬 [Discussions](https://github.com/community-scripts/ProxmoxVE/discussions) - -### Useful Commands - -```bash -# Check what variables are available -grep "var_" /path/to/app-install.sh | head -20 - -# Verify defaults syntax -cat /usr/local/community-scripts/default.vars - -# Monitor installation with defaults -bash pihole-install.sh 2>&1 | tee installation.log -``` - ---- - -## Document Information - -| Field | Value | -|-------|-------| -| Version | 1.0 | -| Last Updated | November 28, 2025 | -| Status | Current | -| License | MIT | - ---- - -**Happy configuring! 🚀** diff --git a/docs/guides/README.md b/docs/guides/README.md deleted file mode 100644 index 0623e2626..000000000 --- a/docs/guides/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# Configuration & Deployment Guides - -This directory contains comprehensive guides for configuring and deploying Proxmox VE containers using community-scripts. - -## 📚 Available Guides - -### [Configuration Reference](CONFIGURATION_REFERENCE.md) - -Complete reference for all configuration options, environment variables, and advanced settings available in the build system. - -**Topics covered:** - -- Container specifications (CPU, RAM, Disk) -- Network configuration (IPv4/IPv6, VLAN, MTU) -- Storage selection and management -- Privilege modes and features -- OS selection and versions - -### [Defaults System Guide](DEFAULTS_SYSTEM_GUIDE.md) - -Understanding and customizing default settings for container deployments. - -**Topics covered:** - -- Default system settings -- Per-script overrides -- Custom defaults configuration -- Environment variable precedence - -### [Unattended Deployments](UNATTENDED_DEPLOYMENTS.md) - -Automating container deployments without user interaction. - -**Topics covered:** - -- Environment variable configuration -- Batch deployments -- CI/CD integration -- Scripted installations -- Pre-configured templates - -## 🔗 Related Documentation - -- **[CT Scripts Guide](../ct/)** - Container script structure and usage -- **[Install Scripts Guide](../install/)** - Installation script internals -- **[API Documentation](../api/)** - API integration and endpoints -- **[Build Functions](../misc/build.func/)** - Build system functions reference -- **[Tools Functions](../misc/tools.func/)** - Utility functions reference - -## 💡 Quick Start - -For most users, start with the **Unattended Deployments** guide to learn how to automate your container setups. - -For advanced configuration options, refer to the **Configuration Reference**. - -## 🤝 Contributing - -If you'd like to improve these guides or add new ones, please see our [Contribution Guide](../contribution/). diff --git a/docs/guides/UNATTENDED_DEPLOYMENTS.md b/docs/guides/UNATTENDED_DEPLOYMENTS.md deleted file mode 100644 index d1b7e7463..000000000 --- a/docs/guides/UNATTENDED_DEPLOYMENTS.md +++ /dev/null @@ -1,1010 +0,0 @@ -# Unattended Deployments Guide - -Complete guide for automated, zero-interaction container deployments using community-scripts for Proxmox VE. - ---- - -## 🎯 What You'll Learn - -This comprehensive guide covers: -- ✅ Complete automation of container deployments -- ✅ Zero-interaction installations -- ✅ Batch deployments (multiple containers) -- ✅ Infrastructure as Code (Ansible, Terraform) -- ✅ CI/CD pipeline integration -- ✅ Error handling and rollback strategies -- ✅ Production-ready deployment scripts -- ✅ Security best practices - ---- - -## Table of Contents - -1. [Overview](#overview) -2. [Prerequisites](#prerequisites) -3. [Deployment Methods](#deployment-methods) -4. [Single Container Deployment](#single-container-deployment) -5. [Batch Deployments](#batch-deployments) -6. [Infrastructure as Code](#infrastructure-as-code) -7. [CI/CD Integration](#cicd-integration) -8. [Error Handling](#error-handling) -9. [Security Considerations](#security-considerations) - ---- - -## Overview - -Unattended deployments allow you to: -- ✅ Deploy containers without manual interaction -- ✅ Automate infrastructure provisioning -- ✅ Integrate with CI/CD pipelines -- ✅ Maintain consistent configurations -- ✅ Scale deployments across multiple nodes - ---- - -## Prerequisites - -### 1. Proxmox VE Access -```bash -# Verify you have root access -whoami # Should return: root - -# Check Proxmox version (8.0+ or 9.0-9.1 required) -pveversion -``` - -### 2. Network Connectivity -```bash -# Test GitHub access -curl -I https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/debian.sh - -# Test internet connectivity -ping -c 1 1.1.1.1 -``` - -### 3. Storage Available -```bash -# List available storage -pvesm status - -# Check free space -df -h -``` - ---- - -## Deployment Methods - -### Method Comparison - -| Method | Use Case | Complexity | Flexibility | -|--------|----------|------------|-------------| -| **Environment Variables** | Quick one-offs | Low | High | -| **App Defaults** | Repeat deployments | Low | Medium | -| **Shell Scripts** | Batch operations | Medium | High | -| **Ansible** | Infrastructure as Code | High | Very High | -| **Terraform** | Cloud-native IaC | High | Very High | - ---- - -## Single Container Deployment - -### Basic Unattended Deployment - -**Simplest form:** -```bash -var_hostname=myserver bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/debian.sh)" -``` - -### Complete Configuration Example - -```bash -#!/bin/bash -# deploy-single.sh - Deploy a single container with full configuration - -var_unprivileged=1 \ -var_cpu=4 \ -var_ram=4096 \ -var_disk=30 \ -var_hostname=production-app \ -var_os=debian \ -var_version=13 \ -var_brg=vmbr0 \ -var_net=dhcp \ -var_ipv6_method=none \ -var_ssh=yes \ -var_ssh_authorized_key="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ... admin@workstation" \ -var_nesting=1 \ -var_tags=production,automated \ -var_protection=yes \ -var_verbose=no \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/debian.sh)" - -echo "✓ Container deployed successfully" -``` - -### Using IP Range Scan for Automatic IP Assignment - -Instead of manually specifying static IPs, you can define an IP range. The system will automatically ping each IP and assign the first free one: - -```bash -#!/bin/bash -# deploy-with-ip-scan.sh - Auto-assign first free IP from range - -var_unprivileged=1 \ -var_cpu=4 \ -var_ram=4096 \ -var_hostname=web-server \ -var_net=192.168.1.100/24-192.168.1.150/24 \ -var_gateway=192.168.1.1 \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/debian.sh)" - -# The script will: -# 1. Ping 192.168.1.100 - if responds, skip -# 2. Ping 192.168.1.101 - if responds, skip -# 3. Continue until first IP that doesn't respond -# 4. Assign that IP to the container -``` - -> **Note**: IP range format is `START_IP/CIDR-END_IP/CIDR`. Both sides must include the same CIDR notation. - -### Using App Defaults - -**Step 1: Create defaults once (interactive)** -```bash -bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/pihole.sh)" -# Select "Advanced Settings" → Configure → Save as "App Defaults" -``` - -**Step 2: Deploy unattended (uses saved defaults)** -```bash -#!/bin/bash -# deploy-with-defaults.sh - -# App defaults are loaded automatically -bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/pihole.sh)" -# Script will use /usr/local/community-scripts/defaults/pihole.vars -``` - -### Advanced Configuration Variables - -Beyond the basic resource settings, you can control advanced container features: - -| Variable | Description | Options | -|----------|-------------|---------| -| `var_os` | Operating system template | `debian`, `ubuntu`, `alpine` | -| `var_version` | OS version | `12`, `13` (Debian), `22.04`, `24.04` (Ubuntu) | -| `var_gpu` | Enable GPU passthrough | `yes`, `no` (Default: `no`) | -| `var_tun` | Enable TUN/TAP device | `yes`, `no` (Default: `no`) | -| `var_nesting` | Enable nesting | `1`, `0` (Default: `1`) | - -**Example with GPU and TUN:** -```bash -var_gpu=yes \ -var_tun=yes \ -var_hostname=transcoder \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/plex.sh)" -``` - ---- - -## Batch Deployments - -### Deploy Multiple Containers - -#### Simple Loop - -```bash -#!/bin/bash -# batch-deploy-simple.sh - -apps=("thingsboard" "qui" "flatnotes") - -for app in "${apps[@]}"; do - echo "Deploying $app..." - var_hostname="$app-server" \ - var_cpu=2 \ - var_ram=2048 \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)" - - echo "✓ $app deployed" - sleep 5 # Wait between deployments -done -``` - -#### Advanced with Configuration Array - -```bash -#!/bin/bash -# batch-deploy-advanced.sh - Deploy multiple containers with individual configs - -declare -A CONTAINERS=( - ["beszel"]="1:512:8:vmbr0:monitoring" - ["qui"]="2:1024:10:vmbr0:torrent,ui" - ["thingsboard"]="6:8192:50:vmbr1:iot,industrial" - ["dockge"]="2:2048:10:vmbr0:docker,management" -) - -for app in "${!CONTAINERS[@]}"; do - # Parse configuration - IFS=':' read -r cpu ram disk bridge tags <<< "${CONTAINERS[$app]}" - - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "Deploying: $app" - echo " CPU: $cpu cores" - echo " RAM: $ram MB" - echo " Disk: $disk GB" - echo " Bridge: $bridge" - echo " Tags: $tags" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - - # Deploy container - var_unprivileged=1 \ - var_cpu="$cpu" \ - var_ram="$ram" \ - var_disk="$disk" \ - var_hostname="$app" \ - var_brg="$bridge" \ - var_net=dhcp \ - var_ipv6_method=none \ - var_ssh=yes \ - var_tags="$tags,automated" \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)" 2>&1 | tee "deploy-${app}.log" - - if [ $? -eq 0 ]; then - echo "✓ $app deployed successfully" - else - echo "✗ $app deployment failed - check deploy-${app}.log" - fi - - sleep 5 -done - -echo "" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "Batch deployment complete!" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -``` - -#### Parallel Deployment - -```bash -#!/bin/bash -# parallel-deploy.sh - Deploy multiple containers in parallel - -deploy_container() { - local app="$1" - local cpu="$2" - local ram="$3" - local disk="$4" - - echo "[$app] Starting deployment..." - var_cpu="$cpu" \ - var_ram="$ram" \ - var_disk="$disk" \ - var_hostname="$app" \ - var_net=dhcp \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)" \ - &> "deploy-${app}.log" - - echo "[$app] ✓ Completed" -} - -# Export function for parallel execution -export -f deploy_container - -# Deploy in parallel (max 3 at a time) -parallel -j 3 deploy_container ::: \ - "debian 2 2048 10" \ - "ubuntu 2 2048 10" \ - "alpine 1 1024 5" \ - "pihole 2 1024 8" \ - "docker 4 4096 30" - -echo "All deployments complete!" -``` - ---- - -## Infrastructure as Code - -### Ansible Playbook - -#### Basic Playbook - -```yaml ---- -# playbook-proxmox.yml -- name: Deploy ProxmoxVE Containers - hosts: proxmox_hosts - become: yes - tasks: - - name: Deploy Debian Container - shell: | - var_unprivileged=1 \ - var_cpu=2 \ - var_ram=2048 \ - var_disk=10 \ - var_hostname=debian-{{ inventory_hostname }} \ - var_net=dhcp \ - var_ssh=yes \ - var_tags=ansible,automated \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/debian.sh)" - args: - executable: /bin/bash - register: deploy_result - - - name: Display deployment result - debug: - var: deploy_result.stdout_lines -``` - -#### Advanced Playbook with Variables - -```yaml ---- -# advanced-playbook.yml -- name: Deploy Multiple Container Types - hosts: proxmox - vars: - containers: - - name: pihole - cpu: 2 - ram: 1024 - disk: 8 - tags: "dns,network" - - name: homeassistant - cpu: 4 - ram: 4096 - disk: 20 - tags: "automation,ha" - - name: docker - cpu: 6 - ram: 8192 - disk: 50 - tags: "containers,docker" - - ssh_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" - - tasks: - - name: Ensure community-scripts directory exists - file: - path: /usr/local/community-scripts/defaults - state: directory - mode: '0755' - - - name: Deploy containers - shell: | - var_unprivileged=1 \ - var_cpu={{ item.cpu }} \ - var_ram={{ item.ram }} \ - var_disk={{ item.disk }} \ - var_hostname={{ item.name }} \ - var_brg=vmbr0 \ - var_net=dhcp \ - var_ipv6_method=none \ - var_ssh=yes \ - var_ssh_authorized_key="{{ ssh_key }}" \ - var_tags="{{ item.tags }},ansible" \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/{{ item.name }}.sh)" - args: - executable: /bin/bash - loop: "{{ containers }}" - register: deployment_results - - - name: Wait for containers to be ready - wait_for: - timeout: 60 - - - name: Report deployment status - debug: - msg: "Deployed {{ item.item.name }} - Status: {{ 'Success' if item.rc == 0 else 'Failed' }}" - loop: "{{ deployment_results.results }}" -``` - -Run with: -```bash -ansible-playbook -i inventory.ini advanced-playbook.yml -``` - -### Terraform Integration - -```hcl -# main.tf - Deploy containers via Terraform - -terraform { - required_providers { - proxmox = { - source = "telmate/proxmox" - version = "2.9.14" - } - } -} - -provider "proxmox" { - pm_api_url = "https://proxmox.example.com:8006/api2/json" - pm_api_token_id = "terraform@pam!terraform" - pm_api_token_secret = var.proxmox_token -} - -resource "null_resource" "deploy_container" { - for_each = var.containers - - provisioner "remote-exec" { - inline = [ - "var_unprivileged=1", - "var_cpu=${each.value.cpu}", - "var_ram=${each.value.ram}", - "var_disk=${each.value.disk}", - "var_hostname=${each.key}", - "var_net=dhcp", - "bash -c \"$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${each.value.template}.sh)\"" - ] - - connection { - type = "ssh" - host = var.proxmox_host - user = "root" - private_key = file("~/.ssh/id_rsa") - } - } -} - -variable "containers" { - type = map(object({ - template = string - cpu = number - ram = number - disk = number - })) - - default = { - "pihole" = { - template = "pihole" - cpu = 2 - ram = 1024 - disk = 8 - } - "homeassistant" = { - template = "homeassistant" - cpu = 4 - ram = 4096 - disk = 20 - } - } -} -``` - ---- - -## CI/CD Integration - -### GitHub Actions - -```yaml -# .github/workflows/deploy-container.yml -name: Deploy Container to Proxmox - -on: - push: - branches: [main] - workflow_dispatch: - inputs: - container_type: - description: 'Container type to deploy' - required: true - type: choice - options: - - debian - - ubuntu - - docker - - pihole - -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - name: Deploy to Proxmox - uses: appleboy/ssh-action@v0.1.10 - with: - host: ${{ secrets.PROXMOX_HOST }} - username: root - key: ${{ secrets.SSH_PRIVATE_KEY }} - script: | - var_unprivileged=1 \ - var_cpu=4 \ - var_ram=4096 \ - var_disk=30 \ - var_hostname=${{ github.event.inputs.container_type }}-ci \ - var_net=dhcp \ - var_ssh=yes \ - var_tags=ci-cd,automated \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${{ github.event.inputs.container_type }}.sh)" - - - name: Notify deployment status - if: success() - run: echo "✓ Container deployed successfully" -``` - -### GitLab CI - -```yaml -# .gitlab-ci.yml -stages: - - deploy - -deploy_container: - stage: deploy - image: alpine:latest - before_script: - - apk add --no-cache openssh-client curl bash - - eval $(ssh-agent -s) - - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - - - mkdir -p ~/.ssh - - chmod 700 ~/.ssh - - ssh-keyscan $PROXMOX_HOST >> ~/.ssh/known_hosts - script: - - | - ssh root@$PROXMOX_HOST << 'EOF' - var_unprivileged=1 \ - var_cpu=4 \ - var_ram=4096 \ - var_disk=30 \ - var_hostname=gitlab-ci-container \ - var_net=dhcp \ - var_tags=gitlab-ci,automated \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/debian.sh)" - EOF - only: - - main - when: manual -``` - ---- - -## Error Handling - -### Deployment Verification Script - -```bash -#!/bin/bash -# deploy-with-verification.sh - -APP="debian" -HOSTNAME="production-server" -MAX_RETRIES=3 -RETRY_COUNT=0 - -deploy_container() { - echo "Attempting deployment (Try $((RETRY_COUNT + 1))/$MAX_RETRIES)..." - - var_unprivileged=1 \ - var_cpu=4 \ - var_ram=4096 \ - var_disk=30 \ - var_hostname="$HOSTNAME" \ - var_net=dhcp \ - var_ssh=yes \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${APP}.sh)" 2>&1 | tee deploy.log - - return ${PIPESTATUS[0]} -} - -verify_deployment() { - echo "Verifying deployment..." - - # Check if container exists - if ! pct list | grep -q "$HOSTNAME"; then - echo "✗ Container not found in pct list" - return 1 - fi - - # Check if container is running - CTID=$(pct list | grep "$HOSTNAME" | awk '{print $1}') - STATUS=$(pct status "$CTID" | awk '{print $2}') - - if [ "$STATUS" != "running" ]; then - echo "✗ Container not running (Status: $STATUS)" - return 1 - fi - - # Check network connectivity - if ! pct exec "$CTID" -- ping -c 1 1.1.1.1 &>/dev/null; then - echo "⚠ Warning: No internet connectivity" - fi - - echo "✓ Deployment verified successfully" - echo " Container ID: $CTID" - echo " Status: $STATUS" - echo " IP: $(pct exec "$CTID" -- hostname -I)" - - return 0 -} - -# Main deployment loop with retry -while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do - if deploy_container; then - if verify_deployment; then - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "✓ Deployment successful!" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - exit 0 - else - echo "✗ Deployment verification failed" - fi - else - echo "✗ Deployment failed" - fi - - RETRY_COUNT=$((RETRY_COUNT + 1)) - - if [ $RETRY_COUNT -lt $MAX_RETRIES ]; then - echo "Retrying in 10 seconds..." - sleep 10 - fi -done - -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -echo "✗ Deployment failed after $MAX_RETRIES attempts" -echo "Check deploy.log for details" -echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -exit 1 -``` - -### Rollback on Failure - -```bash -#!/bin/bash -# deploy-with-rollback.sh - -APP="debian" -HOSTNAME="test-server" -SNAPSHOT_NAME="pre-deployment" - -# Take snapshot of existing container (if exists) -backup_existing() { - EXISTING_CTID=$(pct list | grep "$HOSTNAME" | awk '{print $1}') - if [ -n "$EXISTING_CTID" ]; then - echo "Creating snapshot of existing container..." - pct snapshot "$EXISTING_CTID" "$SNAPSHOT_NAME" --description "Pre-deployment backup" - return 0 - fi - return 1 -} - -# Deploy new container -deploy() { - var_hostname="$HOSTNAME" \ - var_cpu=4 \ - var_ram=4096 \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${APP}.sh)" - return $? -} - -# Rollback to snapshot -rollback() { - local ctid="$1" - echo "Rolling back to snapshot..." - pct rollback "$ctid" "$SNAPSHOT_NAME" - pct delsnapshot "$ctid" "$SNAPSHOT_NAME" -} - -# Main execution -backup_existing -HAD_BACKUP=$? - -if deploy; then - echo "✓ Deployment successful" - [ $HAD_BACKUP -eq 0 ] && echo "You can remove the snapshot with: pct delsnapshot $SNAPSHOT_NAME" -else - echo "✗ Deployment failed" - if [ $HAD_BACKUP -eq 0 ]; then - read -p "Rollback to previous version? (y/N) " -n 1 -r - echo - if [[ $REPLY =~ ^[Yy]$ ]]; then - rollback "$EXISTING_CTID" - echo "✓ Rolled back successfully" - fi - fi - exit 1 -fi -``` - ---- - -## Security Considerations - -### Secure Deployment Script - -```bash -#!/bin/bash -# secure-deploy.sh - Production-ready secure deployment - -set -euo pipefail # Exit on error, undefined vars, pipe failures - -# Configuration -readonly APP="debian" -readonly HOSTNAME="secure-server" -readonly SSH_KEY_PATH="/root/.ssh/id_rsa.pub" -readonly LOG_FILE="/var/log/container-deployments.log" - -# Logging function -log() { - echo "[$(date +'%Y-%m-%d %H:%M:%S')] $*" | tee -a "$LOG_FILE" -} - -# Validate prerequisites -validate_environment() { - log "Validating environment..." - - # Check if running as root - if [ "$EUID" -ne 0 ]; then - log "ERROR: Must run as root" - exit 1 - fi - - # Check SSH key exists - if [ ! -f "$SSH_KEY_PATH" ]; then - log "ERROR: SSH key not found at $SSH_KEY_PATH" - exit 1 - fi - - # Check internet connectivity - if ! curl -s --max-time 5 https://github.com &>/dev/null; then - log "ERROR: No internet connectivity" - exit 1 - fi - - log "✓ Environment validated" -} - -# Secure deployment -deploy_secure() { - log "Starting secure deployment for $HOSTNAME..." - - SSH_KEY=$(cat "$SSH_KEY_PATH") - - var_unprivileged=1 \ - var_cpu=4 \ - var_ram=4096 \ - var_disk=30 \ - var_hostname="$HOSTNAME" \ - var_brg=vmbr0 \ - var_net=dhcp \ - var_ipv6_method=disable \ - var_ssh=yes \ - var_ssh_authorized_key="$SSH_KEY" \ - var_nesting=0 \ - var_keyctl=0 \ - var_fuse=0 \ - var_protection=yes \ - var_tags=production,secure,automated \ - var_verbose=no \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${APP}.sh)" 2>&1 | tee -a "$LOG_FILE" - - if [ ${PIPESTATUS[0]} -eq 0 ]; then - log "✓ Deployment successful" - return 0 - else - log "✗ Deployment failed" - return 1 - fi -} - -# Main execution -main() { - validate_environment - - if deploy_secure; then - log "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - log "Secure deployment completed successfully" - log "Container: $HOSTNAME" - log "Features: Unprivileged, SSH-only, Protected" - log "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - exit 0 - else - log "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - log "Deployment failed - check logs at $LOG_FILE" - log "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - exit 1 - fi -} - -main "$@" -``` - -### SSH Key Management - -```bash -#!/bin/bash -# deploy-with-ssh-keys.sh - Secure SSH key deployment - -# Load SSH keys from multiple sources -load_ssh_keys() { - local keys=() - - # Personal key - if [ -f ~/.ssh/id_rsa.pub ]; then - keys+=("$(cat ~/.ssh/id_rsa.pub)") - fi - - # Team keys - if [ -f /etc/ssh/authorized_keys.d/team ]; then - while IFS= read -r key; do - [ -n "$key" ] && keys+=("$key") - done < /etc/ssh/authorized_keys.d/team - fi - - # Join keys with newline - printf "%s\n" "${keys[@]}" -} - -# Deploy with multiple SSH keys -SSH_KEYS=$(load_ssh_keys) - -var_ssh=yes \ -var_ssh_authorized_key="$SSH_KEYS" \ -var_hostname=multi-key-server \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/debian.sh)" -``` - ---- - -## Complete Production Example - -```bash -#!/bin/bash -# production-deploy.sh - Complete production deployment system - -set -euo pipefail - -#━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -# Configuration -#━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - -readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -readonly LOG_DIR="/var/log/proxmox-deployments" -readonly CONFIG_FILE="$SCRIPT_DIR/deployment-config.json" - -#━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -# Functions -#━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - -setup_logging() { - mkdir -p "$LOG_DIR" - exec 1> >(tee -a "$LOG_DIR/deployment-$(date +%Y%m%d-%H%M%S).log") - exec 2>&1 -} - -log_info() { echo "[INFO] $(date +'%H:%M:%S') - $*"; } -log_error() { echo "[ERROR] $(date +'%H:%M:%S') - $*" >&2; } -log_success() { echo "[SUCCESS] $(date +'%H:%M:%S') - $*"; } - -validate_prerequisites() { - log_info "Validating prerequisites..." - - [ "$EUID" -eq 0 ] || { log_error "Must run as root"; exit 1; } - command -v jq >/dev/null 2>&1 || { log_error "jq not installed"; exit 1; } - command -v curl >/dev/null 2>&1 || { log_error "curl not installed"; exit 1; } - - log_success "Prerequisites validated" -} - -deploy_from_config() { - local config_file="$1" - - if [ ! -f "$config_file" ]; then - log_error "Config file not found: $config_file" - return 1 - fi - - local container_count - container_count=$(jq '.containers | length' "$config_file") - - log_info "Deploying $container_count containers from config..." - - for i in $(seq 0 $((container_count - 1))); do - local name cpu ram disk app tags - - name=$(jq -r ".containers[$i].name" "$config_file") - cpu=$(jq -r ".containers[$i].cpu" "$config_file") - ram=$(jq -r ".containers[$i].ram" "$config_file") - disk=$(jq -r ".containers[$i].disk" "$config_file") - app=$(jq -r ".containers[$i].app" "$config_file") - tags=$(jq -r ".containers[$i].tags" "$config_file") - - log_info "Deploying container: $name ($app)" - - var_unprivileged=1 \ - var_cpu="$cpu" \ - var_ram="$ram" \ - var_disk="$disk" \ - var_hostname="$name" \ - var_net=dhcp \ - var_ssh=yes \ - var_tags="$tags,automated" \ - var_protection=yes \ - bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)" - - if [ $? -eq 0 ]; then - log_success "Deployed: $name" - else - log_error "Failed to deploy: $name" - fi - - sleep 5 - done -} - -generate_report() { - log_info "Generating deployment report..." - - echo "" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "DEPLOYMENT REPORT" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "Time: $(date)" - echo "" - pct list - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" -} - -#━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -# Main -#━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - -main() { - setup_logging - log_info "Starting production deployment system" - - validate_prerequisites - deploy_from_config "$CONFIG_FILE" - generate_report - - log_success "Production deployment complete" -} - -main "$@" -``` - -**Example config file (deployment-config.json):** -```json -{ - "containers": [ - { - "name": "pihole", - "app": "pihole", - "cpu": 2, - "ram": 1024, - "disk": 8, - "tags": "dns,network,production" - }, - { - "name": "homeassistant", - "app": "homeassistant", - "cpu": 4, - "ram": 4096, - "disk": 20, - "tags": "automation,ha,production" - }, - { - "name": "docker-host", - "app": "docker", - "cpu": 8, - "ram": 16384, - "disk": 100, - "tags": "containers,docker,production" - } - ] -} -``` - ---- - -## See Also - -- [Defaults System Guide](DEFAULTS_GUIDE.md) -- [Configuration Reference](CONFIGURATION_REFERENCE.md) -- [Security Best Practices](SECURITY_GUIDE.md) -- [Network Configuration](NETWORK_GUIDE.md) diff --git a/docs/guides/USER_SUBMITTED_GUIDES.md b/docs/guides/USER_SUBMITTED_GUIDES.md deleted file mode 100644 index 795993950..000000000 --- a/docs/guides/USER_SUBMITTED_GUIDES.md +++ /dev/null @@ -1,44 +0,0 @@ - -

User Submitted Guides

- - In order to contribute a guide on installing with Proxmox VE Helper Scripts, you should open a pull request that adds the guide to the `USER_SUBMITTED_GUIDES.md` file. - -[Proxmox Automation with Proxmox Helper Scripts!](https://www.youtube.com/watch?v=kcpu4z5eSEU) - -[Installing Home Assistant OS using Proxmox 8](https://community.home-assistant.io/t/installing-home-assistant-os-using-proxmox-8/201835) - -[How To Separate Zigbee2MQTT From Home Assistant In Proxmox](https://smarthomescene.com/guides/how-to-separate-zigbee2mqtt-from-home-assistant-in-proxmox/) - -[How To Install Home Assistant On Proxmox: The Easy Way](https://smarthomescene.com/guides/how-to-install-home-assistant-on-proxmox-the-easy-way/) - -[Home Assistant: Installing InfluxDB (LXC)](https://www.derekseaman.com/2023/04/home-assistant-installing-influxdb-lxc.html) - -[Home Assistant: Proxmox Quick Start Guide](https://www.derekseaman.com/2023/10/home-assistant-proxmox-ve-8-0-quick-start-guide-2.html) - -[Home Assistant: Installing Grafana (LXC) with Let’s Encrypt SSL](https://www.derekseaman.com/2023/04/home-assistant-installing-grafana-lxc.html) - -[Proxmox: Plex LXC with Alder Lake Transcoding](https://www.derekseaman.com/2023/04/proxmox-plex-lxc-with-alder-lake-transcoding.html) - -[How To Backup Home Assistant In Proxmox](https://smarthomescene.com/guides/how-to-backup-home-assistant-in-proxmox/) - -[Running Frigate on Proxmox](https://www.homeautomationguy.io/blog/running-frigate-on-proxmox) - -[Frigate VM on Proxmox with PCIe Coral TPU](https://www.derekseaman.com/2023/06/home-assistant-frigate-vm-on-proxmox-with-pcie-coral-tpu.html) - -[Moving Home Assistant’s Database To MariaDB On Proxmox](https://smarthomescene.com/guides/moving-home-assistants-database-to-mariadb-on-proxmox/) - -[How-to: Proxmox VE 7.4 to 8.0 Upgrade](https://www.derekseaman.com/2023/06/how-to-proxmox-7-4-to-8-0-upgrade.html) - -[iGPU Transcoding In Proxmox with Jellyfin](https://www.youtube.com/watch?v=XAa_qpNmzZs) - -[Proxmox + NetData]() - -[Proxmox Homelab Series]() - -[The fastest installation of Docker and Portainer on Proxmox VE](https://lavr.site/en-fastest-install-docker-portainer-proxmox/) - -[How To Setup Proxmox Backuper Server Using Helper Scripts]() diff --git a/docs/install/DETAILED_GUIDE.md b/docs/install/DETAILED_GUIDE.md deleted file mode 100644 index 6ef159aa7..000000000 --- a/docs/install/DETAILED_GUIDE.md +++ /dev/null @@ -1,647 +0,0 @@ -# 🛠️ **Application Installation Scripts (install/AppName-install.sh)** - -**Modern Guide to Writing In-Container Installation Scripts** - -> **Updated**: December 2025 -> **Context**: Integrated with tools.func, error_handler.func, and install.func -> **Examples Used**: `/install/pihole-install.sh`, `/install/mealie-install.sh` - ---- - -## 📋 Table of Contents - -- [Overview](#overview) -- [Execution Context](#execution-context) -- [File Structure](#file-structure) -- [Complete Script Template](#complete-script-template) -- [Installation Phases](#installation-phases) -- [Function Reference](#function-reference) -- [Best Practices](#best-practices) -- [Real Examples](#real-examples) -- [Troubleshooting](#troubleshooting) -- [Contribution Checklist](#contribution-checklist) - ---- - -## Overview - -### Purpose - -Installation scripts (`install/AppName-install.sh`) **run inside the LXC container** and are responsible for: - -1. Setting up the container OS (updates, packages) -2. Installing application dependencies -3. Downloading and configuring the application -4. Setting up services and systemd units -5. Creating version tracking files for updates -6. Generating credentials/configurations -7. Final cleanup and validation - -### Execution Flow - -``` -ct/AppName.sh (Proxmox Host) - ↓ -build_container() - ↓ -pct exec CTID bash -c "$(cat install/AppName-install.sh)" - ↓ -install/AppName-install.sh (Inside Container) - ↓ -Container Ready with App Installed -``` - ---- - -## Execution Context - -### Environment Variables Available - -```bash -# From Proxmox/Container -CTID # Container ID (100, 101, etc.) -PCT_OSTYPE # OS type (alpine, debian, ubuntu) -HOSTNAME # Container hostname - -# From build.func -FUNCTIONS_FILE_PATH # Bash functions library (core.func + tools.func) -VERBOSE # Verbose mode (yes/no) -STD # Standard redirection variable (silent/empty) - -# From install.func -APP # Application name -NSAPP # Normalized app name (lowercase, no spaces) -METHOD # Installation method (ct/install) -RANDOM_UUID # Session UUID for telemetry -``` - ---- - -## File Structure - -### Minimal install/AppName-install.sh Template - -```bash -#!/usr/bin/env bash # [1] Shebang - -# [2] Copyright/Metadata -# Copyright (c) 2021-2026 community-scripts ORG -# Author: YourUsername -# License: MIT -# Source: https://example.com - -# [3] Load functions -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -# [4] Installation steps -msg_info "Installing Dependencies" -$STD apt-get install -y package1 package2 -msg_ok "Installed Dependencies" - -# [5] Final setup -motd_ssh -customize -cleanup_lxc -``` - ---- - -## Complete Script Template - -### Phase 1: Header & Initialization - -```bash -#!/usr/bin/env bash -# Copyright (c) 2021-2026 community-scripts ORG -# Author: YourUsername -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/application/repo - -# Load all available functions (from core.func + tools.func) -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -# Initialize environment -color # Setup ANSI colors and icons -verb_ip6 # Configure IPv6 (if needed) -catch_errors # Setup error traps -setting_up_container # Verify OS is ready -network_check # Verify internet connectivity -update_os # Update packages (apk/apt) -``` - -### Phase 2: Dependency Installation - -```bash -msg_info "Installing Dependencies" -$STD apt-get install -y \ - curl \ - wget \ - git \ - nano \ - build-essential \ - libssl-dev \ - python3-dev -msg_ok "Installed Dependencies" -``` - -### Phase 3: Tool Setup (Using tools.func) - -```bash -# Setup specific tool versions -NODE_VERSION="22" setup_nodejs -PHP_VERSION="8.4" setup_php -PYTHON_VERSION="3.12" setup_uv -``` - -### Phase 4: Application Download & Setup - -```bash -# Download from GitHub -RELEASE=$(curl -fsSL https://api.github.com/repos/user/repo/releases/latest | \ - grep "tag_name" | awk '{print substr($2, 2, length($2)-3)}') - -wget -q "https://github.com/user/repo/releases/download/v${RELEASE}/app-${RELEASE}.tar.gz" -cd /opt -tar -xzf app-${RELEASE}.tar.gz -rm -f app-${RELEASE}.tar.gz -``` - -### Phase 5: Configuration Files - -```bash -# Using cat << EOF (multiline) -cat <<'EOF' >/etc/nginx/sites-available/appname -server { - listen 80; - server_name _; - root /opt/appname/public; - index index.php index.html; -} -EOF - -# Using sed for replacements -sed -i -e "s|^DB_HOST=.*|DB_HOST=localhost|" \ - -e "s|^DB_USER=.*|DB_USER=appuser|" \ - /opt/appname/.env -``` - -### Phase 6: Database Setup (If Needed) - -```bash -msg_info "Setting up Database" - -DB_NAME="appname_db" -DB_USER="appuser" -DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) - -# For MySQL/MariaDB -mysql -u root < /opt/${APP}_version.txt - -# Or with additional metadata -cat > /opt/${APP}_version.txt < /opt/${APP}_version.txt -``` - -6. **Handle Alpine vs Debian Differences** -```bash -# ✅ Good: Detect OS -if grep -qi 'alpine' /etc/os-release; then - apk add package -else - apt-get install -y package -fi -``` - -### ❌ DON'T: - -1. **Hardcode Versions** -```bash -# ❌ Bad: Won't auto-update -wget https://example.com/app-1.2.3.tar.gz -``` - -2. **Use Root Without Password** -```bash -# ❌ Bad: Security risk -mysql -u root -``` - -3. **Forget Error Handling** -```bash -# ❌ Bad: Silent failures -wget https://example.com/file -tar -xzf file -``` - -4. **Leave Temporary Files** -```bash -# ✅ Always cleanup -rm -rf /opt/app-${RELEASE}.tar.gz -``` - ---- - -## Real Examples - -### Example 1: Node.js Application - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -color -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Node.js" -NODE_VERSION="22" setup_nodejs -msg_ok "Node.js installed" - -msg_info "Installing Application" -cd /opt -RELEASE=$(curl -fsSL https://api.github.com/repos/user/repo/releases/latest | \ - grep "tag_name" | awk '{print substr($2, 2, length($2)-3)}') -wget -q "https://github.com/user/repo/releases/download/v${RELEASE}/app.tar.gz" -tar -xzf app.tar.gz -echo "${RELEASE}" > /opt/app_version.txt -msg_ok "Application installed" - -systemctl enable --now app -cleanup_lxc -``` - -### Example 2: PHP Application with Database - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -color -catch_errors -setting_up_container -network_check -update_os - -PHP_VERSION="8.4" PHP_MODULE="bcmath,curl,pdo_mysql" setup_php -setup_mariadb # Uses distribution packages (recommended) -# Or for specific version: MARIADB_VERSION="11.4" setup_mariadb - -# Database setup -DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) -mysql -u root < /opt/app/.env < .env < /opt/app_version.txt -``` - -### Phase 10: Final Cleanup -```bash -motd_ssh -customize -cleanup_lxc -``` - -## Contributing an Installation Script - -1. Create `ct/myapp.sh` (host script) -2. Create `install/myapp-install.sh` (container script) -3. Follow 10-phase pattern in [UPDATED_APP-install.md](../UPDATED_APP-install.md) -4. Test in actual container -5. Submit PR with both files - -## Common Tasks - -- **Create new installation script** → [UPDATED_APP-install.md](../UPDATED_APP-install.md) -- **Install Node.js/PHP/Database** → [misc/tools.func/](../misc/tools.func/) -- **Setup Alpine container** → [misc/alpine-install.func/](../misc/alpine-install.func/) -- **Debug installation errors** → [EXIT_CODES.md](../EXIT_CODES.md) -- **Use dev mode** → [DEV_MODE.md](../DEV_MODE.md) - -## Alpine vs Debian - -- **Debian-based** → Use `tools.func`, `install.func`, `systemctl` -- **Alpine-based** → Use `alpine-tools.func`, `alpine-install.func`, `rc-service` - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team diff --git a/docs/misc/README.md b/docs/misc/README.md deleted file mode 100644 index 291ded111..000000000 --- a/docs/misc/README.md +++ /dev/null @@ -1,283 +0,0 @@ -# Misc Documentation - -This directory contains comprehensive documentation for all function libraries and components of the Proxmox Community Scripts project. Each section is organized as a dedicated subdirectory with detailed references, examples, and integration guides. - ---- - -## 🏗️ **Core Function Libraries** - -### 📁 [build.func/](./build.func/) -**Core LXC Container Orchestration** - Main orchestrator for Proxmox LXC container creation - -**Contents:** -- BUILD_FUNC_FLOWCHART.md - Visual execution flows and decision trees -- BUILD_FUNC_ARCHITECTURE.md - System architecture and design -- BUILD_FUNC_ENVIRONMENT_VARIABLES.md - Complete environment variable reference -- BUILD_FUNC_FUNCTIONS_REFERENCE.md - Alphabetical function reference -- BUILD_FUNC_EXECUTION_FLOWS.md - Detailed execution flows -- BUILD_FUNC_USAGE_EXAMPLES.md - Practical usage examples -- README.md - Overview and quick reference - -**Key Functions**: `variables()`, `start()`, `build_container()`, `build_defaults()`, `advanced_settings()` - ---- - -### 📁 [core.func/](./core.func/) -**System Utilities & Foundation** - Essential utility functions and system checks - -**Contents:** -- CORE_FLOWCHART.md - Visual execution flows -- CORE_FUNCTIONS_REFERENCE.md - Complete function reference -- CORE_INTEGRATION.md - Integration points -- CORE_USAGE_EXAMPLES.md - Practical examples -- README.md - Overview and quick reference - -**Key Functions**: `color()`, `msg_info()`, `msg_ok()`, `msg_error()`, `root_check()`, `pve_check()`, `parse_dev_mode()` - ---- - -### 📁 [error_handler.func/](./error_handler.func/) -**Error Handling & Signal Management** - Comprehensive error handling and signal trapping - -**Contents:** -- ERROR_HANDLER_FLOWCHART.md - Visual error handling flows -- ERROR_HANDLER_FUNCTIONS_REFERENCE.md - Function reference -- ERROR_HANDLER_INTEGRATION.md - Integration with other components -- ERROR_HANDLER_USAGE_EXAMPLES.md - Practical examples -- README.md - Overview and quick reference - -**Key Functions**: `catch_errors()`, `error_handler()`, `explain_exit_code()`, `signal_handler()` - ---- - -### 📁 [api.func/](./api.func/) -**Proxmox API Integration** - API communication and diagnostic reporting - -**Contents:** -- API_FLOWCHART.md - API communication flows -- API_FUNCTIONS_REFERENCE.md - Function reference -- API_INTEGRATION.md - Integration points -- API_USAGE_EXAMPLES.md - Practical examples -- README.md - Overview and quick reference - -**Key Functions**: `post_to_api()`, `post_update_to_api()`, `get_error_description()` - ---- - -## 📦 **Installation & Setup Function Libraries** - -### 📁 [install.func/](./install.func/) -**Container Installation Workflow** - Installation orchestration for container-internal setup - -**Contents:** -- INSTALL_FUNC_FLOWCHART.md - Installation workflow diagrams -- INSTALL_FUNC_FUNCTIONS_REFERENCE.md - Complete function reference -- INSTALL_FUNC_INTEGRATION.md - Integration with build and tools -- INSTALL_FUNC_USAGE_EXAMPLES.md - Practical examples -- README.md - Overview and quick reference - -**Key Functions**: `setting_up_container()`, `network_check()`, `update_os()`, `motd_ssh()`, `cleanup_lxc()` - ---- - -### 📁 [tools.func/](./tools.func/) -**Package & Tool Installation** - Robust package management and 30+ tool installation functions - -**Contents:** -- TOOLS_FUNC_FLOWCHART.md - Package management flows -- TOOLS_FUNC_FUNCTIONS_REFERENCE.md - 30+ function reference -- TOOLS_FUNC_INTEGRATION.md - Integration with install workflows -- TOOLS_FUNC_USAGE_EXAMPLES.md - Practical examples -- TOOLS_FUNC_ENVIRONMENT_VARIABLES.md - Configuration reference -- README.md - Overview and quick reference - -**Key Functions**: `setup_nodejs()`, `setup_php()`, `setup_mariadb()`, `setup_docker()`, `setup_deb822_repo()`, `pkg_install()`, `pkg_update()` - ---- - -### 📁 [alpine-install.func/](./alpine-install.func/) -**Alpine Container Setup** - Alpine Linux-specific installation functions - -**Contents:** -- ALPINE_INSTALL_FUNC_FLOWCHART.md - Alpine setup flows -- ALPINE_INSTALL_FUNC_FUNCTIONS_REFERENCE.md - Function reference -- ALPINE_INSTALL_FUNC_INTEGRATION.md - Integration points -- ALPINE_INSTALL_FUNC_USAGE_EXAMPLES.md - Practical examples -- README.md - Overview and quick reference - -**Key Functions**: `update_os()` (apk version), `verb_ip6()`, `motd_ssh()` (Alpine), `customize()` - ---- - -### 📁 [alpine-tools.func/](./alpine-tools.func/) -**Alpine Tool Installation** - Alpine-specific package and tool installation - -**Contents:** -- ALPINE_TOOLS_FUNC_FLOWCHART.md - Alpine package flows -- ALPINE_TOOLS_FUNC_FUNCTIONS_REFERENCE.md - Function reference -- ALPINE_TOOLS_FUNC_INTEGRATION.md - Integration with Alpine workflows -- ALPINE_TOOLS_FUNC_USAGE_EXAMPLES.md - Practical examples -- README.md - Overview and quick reference - -**Key Functions**: `apk_add()`, `apk_update()`, `apk_del()`, `add_community_repo()`, Alpine tool setup functions - ---- - -### 📁 [cloud-init.func/](./cloud-init.func/) -**VM Cloud-Init Configuration** - Cloud-init and VM provisioning functions - -**Contents:** -- CLOUD_INIT_FUNC_FLOWCHART.md - Cloud-init flows -- CLOUD_INIT_FUNC_FUNCTIONS_REFERENCE.md - Function reference -- CLOUD_INIT_FUNC_INTEGRATION.md - Integration points -- CLOUD_INIT_FUNC_USAGE_EXAMPLES.md - Practical examples -- README.md - Overview and quick reference - -**Key Functions**: `generate_cloud_init()`, `generate_user_data()`, `setup_ssh_keys()`, `setup_static_ip()` - ---- - -## 🔗 **Function Library Relationships** - -``` -┌─────────────────────────────────────────────┐ -│ Container Creation Flow │ -├─────────────────────────────────────────────┤ -│ │ -│ ct/AppName.sh │ -│ ↓ (sources) │ -│ build.func │ -│ ├─ variables() │ -│ ├─ build_container() │ -│ └─ advanced_settings() │ -│ ↓ (calls pct create with) │ -│ install/appname-install.sh │ -│ ↓ (sources) │ -│ ├─ core.func (colors, messaging) │ -│ ├─ error_handler.func (error trapping) │ -│ ├─ install.func (setup/network) │ -│ └─ tools.func (packages/tools) │ -│ │ -└─────────────────────────────────────────────┘ - -┌─────────────────────────────────────────────┐ -│ Alpine Container Flow │ -├─────────────────────────────────────────────┤ -│ │ -│ install/appname-install.sh (Alpine) │ -│ ↓ (sources) │ -│ ├─ core.func (colors) │ -│ ├─ error_handler.func (errors) │ -│ ├─ alpine-install.func (apk setup) │ -│ └─ alpine-tools.func (apk tools) │ -│ │ -└─────────────────────────────────────────────┘ - -┌─────────────────────────────────────────────┐ -│ VM Provisioning Flow │ -├─────────────────────────────────────────────┤ -│ │ -│ vm/OsName-vm.sh │ -│ ↓ (uses) │ -│ cloud-init.func │ -│ ├─ generate_cloud_init() │ -│ ├─ setup_ssh_keys() │ -│ └─ configure_network() │ -│ │ -└─────────────────────────────────────────────┘ -``` - ---- - -## 📊 **Documentation Quick Stats** - -| Library | Files | Functions | Status | -|---------|:---:|:---:|:---:| -| build.func | 7 | 50+ | ✅ Complete | -| core.func | 5 | 20+ | ✅ Complete | -| error_handler.func | 5 | 10+ | ✅ Complete | -| api.func | 5 | 5+ | ✅ Complete | -| install.func | 5 | 8+ | ✅ Complete | -| tools.func | 6 | 30+ | ✅ Complete | -| alpine-install.func | 5 | 6+ | ✅ Complete | -| alpine-tools.func | 5 | 15+ | ✅ Complete | -| cloud-init.func | 5 | 12+ | ✅ Complete | - -**Total**: 9 function libraries, 48 documentation files, 150+ functions - ---- - -## 🚀 **Getting Started** - -### For Container Creation Scripts -Start with: **[build.func/](./build.func/)** → **[tools.func/](./tools.func/)** → **[install.func/](./install.func/)** - -### For Alpine Containers -Start with: **[alpine-install.func/](./alpine-install.func/)** → **[alpine-tools.func/](./alpine-tools.func/)** - -### For VM Provisioning -Start with: **[cloud-init.func/](./cloud-init.func/)** - -### For Troubleshooting -Start with: **[error_handler.func/](./error_handler.func/)** → **[EXIT_CODES.md](../EXIT_CODES.md)** - ---- - -## 📚 **Related Top-Level Documentation** - -- **[CONTRIBUTION_GUIDE.md](../CONTRIBUTION_GUIDE.md)** - How to contribute to ProxmoxVE -- **[UPDATED_APP-ct.md](../UPDATED_APP-ct.md)** - Container script guide -- **[UPDATED_APP-install.md](../UPDATED_APP-install.md)** - Installation script guide -- **[DEFAULTS_SYSTEM_GUIDE.md](../DEFAULTS_SYSTEM_GUIDE.md)** - Configuration system -- **[TECHNICAL_REFERENCE.md](../TECHNICAL_REFERENCE.md)** - Architecture reference -- **[EXIT_CODES.md](../EXIT_CODES.md)** - Complete exit code reference -- **[DEV_MODE.md](../DEV_MODE.md)** - Development debugging modes -- **[CHANGELOG_MISC.md](../CHANGELOG_MISC.md)** - Change history - ---- - -## 🔄 **Standardized Documentation Structure** - -Each function library follows the same documentation pattern: - -``` -function-library/ -├── README.md # Quick reference & overview -├── FUNCTION_LIBRARY_FLOWCHART.md # Visual execution flows -├── FUNCTION_LIBRARY_FUNCTIONS_REFERENCE.md # Alphabetical reference -├── FUNCTION_LIBRARY_INTEGRATION.md # Integration points -├── FUNCTION_LIBRARY_USAGE_EXAMPLES.md # Practical examples -└── [FUNCTION_LIBRARY_ENVIRONMENT_VARIABLES.md] # (if applicable) -``` - -**Advantages**: -- ✅ Consistent navigation across all libraries -- ✅ Quick reference sections in each README -- ✅ Visual flowcharts for understanding -- ✅ Complete function references -- ✅ Real-world usage examples -- ✅ Integration guides for connecting libraries - ---- - -## 📝 **Documentation Standards** - -All documentation follows these standards: - -1. **README.md** - Quick overview, key features, quick reference -2. **FLOWCHART.md** - ASCII flowcharts and visual diagrams -3. **FUNCTIONS_REFERENCE.md** - Every function with full details -4. **INTEGRATION.md** - How this library connects to others -5. **USAGE_EXAMPLES.md** - Copy-paste ready examples -6. **ENVIRONMENT_VARIABLES.md** - (if applicable) Configuration reference - ---- - -## ✅ **Last Updated**: December 2025 -**Maintainers**: community-scripts team -**License**: MIT -**Status**: All 9 libraries fully documented and standardized - ---- - -*This directory contains specialized documentation for specific components of the Proxmox Community Scripts project.* diff --git a/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_FLOWCHART.md b/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_FLOWCHART.md deleted file mode 100644 index 072d613fc..000000000 --- a/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_FLOWCHART.md +++ /dev/null @@ -1,29 +0,0 @@ -# alpine-install.func Flowchart - -Alpine container initialization flow (apk-based, OpenRC init system). - -## Alpine Container Setup Flow - -``` -Alpine Container Started - ↓ -setting_up_container() - ↓ -verb_ip6() [optional - IPv6] - ↓ -update_os() [apk update/upgrade] - ↓ -network_check() - ↓ -Application Installation - ↓ -motd_ssh() - ↓ -customize() - ↓ -cleanup_lxc() - ↓ -Complete ✓ -``` - -**Last Updated**: December 2025 diff --git a/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_FUNCTIONS_REFERENCE.md b/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_FUNCTIONS_REFERENCE.md deleted file mode 100644 index 9f2cc28df..000000000 --- a/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_FUNCTIONS_REFERENCE.md +++ /dev/null @@ -1,30 +0,0 @@ -# alpine-install.func Functions Reference - -Alpine Linux-specific installation functions (apk-based, OpenRC). - -## Core Functions - -### setting_up_container() -Initialize Alpine container setup. - -### update_os() -Update Alpine packages via `apk update && apk upgrade`. - -### verb_ip6() -Enable IPv6 on Alpine with persistent configuration. - -### network_check() -Verify network connectivity in Alpine. - -### motd_ssh() -Configure SSH daemon and MOTD on Alpine. - -### customize() -Apply Alpine-specific customizations. - -### cleanup_lxc() -Final cleanup (Alpine-specific). - ---- - -**Last Updated**: December 2025 diff --git a/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_INTEGRATION.md b/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_INTEGRATION.md deleted file mode 100644 index 8fe0f2cd3..000000000 --- a/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_INTEGRATION.md +++ /dev/null @@ -1,14 +0,0 @@ -# alpine-install.func Integration Guide - -Integration of alpine-install.func with Alpine container workflows. - -## Alpine-Specific Integration - -Alpine containers use: -- `apk` instead of `apt-get` -- `OpenRC` instead of `systemd` -- Alpine-specific package names - ---- - -**Last Updated**: December 2025 diff --git a/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_USAGE_EXAMPLES.md b/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_USAGE_EXAMPLES.md deleted file mode 100644 index 15ff05630..000000000 --- a/docs/misc/alpine-install.func/ALPINE_INSTALL_FUNC_USAGE_EXAMPLES.md +++ /dev/null @@ -1,24 +0,0 @@ -# alpine-install.func Usage Examples - -Basic examples for Alpine container installation. - -### Example: Basic Alpine Setup - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -setting_up_container -update_os - -# Install Alpine packages -apk add --no-cache curl wget git - -motd_ssh -customize -cleanup_lxc -``` - ---- - -**Last Updated**: December 2025 diff --git a/docs/misc/alpine-install.func/README.md b/docs/misc/alpine-install.func/README.md deleted file mode 100644 index dedb033d6..000000000 --- a/docs/misc/alpine-install.func/README.md +++ /dev/null @@ -1,273 +0,0 @@ -# alpine-install.func Documentation - -## Overview - -The `alpine-install.func` file provides Alpine Linux-specific installation and configuration functions for LXC containers. It complements the standard `install.func` with Alpine-specific operations using the apk package manager instead of apt. - -## Purpose and Use Cases - -- **Alpine Container Setup**: Initialize Alpine Linux containers with proper configuration -- **IPv6 Management**: Enable or disable IPv6 in Alpine with persistent configuration -- **Network Verification**: Verify connectivity in Alpine environments -- **SSH Configuration**: Setup SSH daemon on Alpine -- **Auto-Login Setup**: Configure passwordless root login for Alpine containers -- **Package Management**: Safe apk operations with error handling - -## Quick Reference - -### Key Function Groups -- **Initialization**: `setting_up_container()` - Alpine setup message -- **Network**: `verb_ip6()`, `network_check()` - IPv6 and connectivity -- **OS Configuration**: `update_os()` - Alpine package updates -- **SSH/MOTD**: `motd_ssh()` - SSH and login message setup -- **Container Customization**: `customize()`, `cleanup_lxc()` - Final setup - -### Dependencies -- **External**: `apk`, `curl`, `wget`, `ping` -- **Internal**: Uses functions from `core.func`, `error_handler.func` - -### Integration Points -- Used by: Alpine-based install scripts (alpine.sh, alpine-ntfy.sh, etc.) -- Uses: Environment variables from build.func -- Provides: Alpine-specific installation and management services - -## Documentation Files - -### 📊 [ALPINE_INSTALL_FUNC_FLOWCHART.md](./ALPINE_INSTALL_FUNC_FLOWCHART.md) -Visual execution flows showing Alpine container initialization and setup workflows. - -### 📚 [ALPINE_INSTALL_FUNC_FUNCTIONS_REFERENCE.md](./ALPINE_INSTALL_FUNC_FUNCTIONS_REFERENCE.md) -Complete alphabetical reference of all functions with parameters and usage details. - -### 💡 [ALPINE_INSTALL_FUNC_USAGE_EXAMPLES.md](./ALPINE_INSTALL_FUNC_USAGE_EXAMPLES.md) -Practical examples showing how to use Alpine installation functions. - -### 🔗 [ALPINE_INSTALL_FUNC_INTEGRATION.md](./ALPINE_INSTALL_FUNC_INTEGRATION.md) -How alpine-install.func integrates with standard install workflows. - -## Key Features - -### Alpine-Specific Functions -- **apk Package Manager**: Alpine package operations (instead of apt-get) -- **OpenRC Support**: Alpine uses OpenRC init instead of systemd -- **Lightweight Setup**: Minimal dependencies appropriate for Alpine -- **IPv6 Configuration**: Persistent IPv6 settings via `/etc/network/interfaces` - -### Network & Connectivity -- **IPv6 Toggle**: Enable/disable with persistent configuration -- **Connectivity Check**: Verify internet access in Alpine -- **DNS Verification**: Resolve domain names correctly -- **Retry Logic**: Automatic recovery from transient failures - -### SSH & Auto-Login -- **SSH Daemon**: Setup and start sshd on Alpine -- **Root Keys**: Configure root SSH access -- **Auto-Login**: Optional automatic login without password -- **MOTD**: Custom login message on Alpine - -## Function Categories - -### 🔹 Core Functions -- `setting_up_container()` - Alpine container setup message -- `update_os()` - Update Alpine packages via apk -- `verb_ip6()` - Enable/disable IPv6 persistently -- `network_check()` - Verify network connectivity - -### 🔹 SSH & Configuration Functions -- `motd_ssh()` - Configure SSH daemon on Alpine -- `customize()` - Apply Alpine-specific customizations -- `cleanup_lxc()` - Final cleanup - -### 🔹 Service Management (OpenRC) -- `rc-update` - Enable/disable services for Alpine -- `rc-service` - Start/stop services on Alpine -- Service configuration files in `/etc/init.d/` - -## Differences from Debian Install - -| Feature | Debian (install.func) | Alpine (alpine-install.func) | -|---------|:---:|:---:| -| Package Manager | apt-get | apk | -| Init System | systemd | OpenRC | -| SSH Service | systemctl | rc-service | -| Config Files | /etc/systemd/ | /etc/init.d/ | -| Network Config | /etc/network/ or Netplan | /etc/network/interfaces | -| IPv6 Setup | netplan files | /etc/network/interfaces | -| Auto-Login | getty override | `/etc/inittab` or shell config | -| Size | ~200MB | ~100MB | - -## Execution Flow for Alpine - -``` -Alpine Container Started - ↓ -source $FUNCTIONS_FILE_PATH - ↓ -setting_up_container() ← Alpine setup message - ↓ -update_os() ← apk update - ↓ -verb_ip6() ← IPv6 configuration (optional) - ↓ -network_check() ← Verify connectivity - ↓ -[Application-Specific Installation] - ↓ -motd_ssh() ← Configure SSH/MOTD -customize() ← Apply customizations - ↓ -cleanup_lxc() ← Final cleanup - ↓ -Alpine Installation Complete -``` - -## Common Usage Patterns - -### Basic Alpine Setup -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -setting_up_container -update_os - -# Install Alpine-specific packages -apk add --no-cache curl wget git - -# ... application installation ... - -motd_ssh -customize -cleanup_lxc -``` - -### With IPv6 Enabled -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -setting_up_container -verb_ip6 -update_os -network_check - -# ... application installation ... - -motd_ssh -customize -cleanup_lxc -``` - -### Installing Services -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -setting_up_container -update_os - -# Install via apk -apk add --no-cache nginx - -# Enable and start service on Alpine -rc-update add nginx -rc-service nginx start - -motd_ssh -customize -cleanup_lxc -``` - -## Best Practices - -### ✅ DO -- Use `apk add --no-cache` to reduce image size -- Enable IPv6 if application needs it (`verb_ip6`) -- Use `rc-service` for service management on Alpine -- Check `/etc/network/interfaces` for IPv6 persistence -- Test network connectivity before critical operations -- Use `$STD` for output suppression in production - -### ❌ DON'T -- Use `apt-get` commands (Alpine doesn't have apt) -- Use `systemctl` (Alpine uses OpenRC, not systemd) -- Use `service` command (it may not exist on Alpine) -- Assume systemd exists on Alpine -- Forget to add `--no-cache` flag to `apk add` -- Hardcode paths from Debian (different on Alpine) - -## Alpine-Specific Considerations - -### Package Names -Some packages have different names on Alpine: -```bash -# Debian → Alpine -# curl → curl (same) -# wget → wget (same) -# python3 → python3 (same) -# libpq5 → postgresql-client -# libmariadb3 → mariadb-client -``` - -### Service Management -```bash -# Debian (systemd) → Alpine (OpenRC) -systemctl start nginx → rc-service nginx start -systemctl enable nginx → rc-update add nginx -systemctl status nginx → rc-service nginx status -``` - -### Network Configuration -```bash -# Debian (Netplan) → Alpine (/etc/network/interfaces) -/etc/netplan/01-*.yaml → /etc/network/interfaces -netplan apply → Configure directly in interfaces - -# Enable IPv6 persistently on Alpine: -# Add to /etc/network/interfaces: -# iface eth0 inet6 static -# address -``` - -## Troubleshooting - -### "apk command not found" -- This is Alpine Linux, not Debian -- Install packages with `apk add` instead of `apt-get install` -- Example: `apk add --no-cache curl wget` - -### "IPv6 not persisting after reboot" -- IPv6 must be configured in `/etc/network/interfaces` -- The `verb_ip6()` function handles this automatically -- Verify: `cat /etc/network/interfaces` - -### "Service won't start on Alpine" -- Alpine uses OpenRC, not systemd -- Use `rc-service nginx start` instead of `systemctl start nginx` -- Enable service: `rc-update add nginx` -- Check logs: `/var/log/` or `rc-service nginx status` - -### "Container too large" -- Alpine should be much smaller than Debian -- Verify using `apk add --no-cache` (removes package cache) -- Example: `apk add --no-cache nginx` (not `apk add nginx`) - -## Related Documentation - -- **[alpine-tools.func/](../alpine-tools.func/)** - Alpine tool installation -- **[install.func/](../install.func/)** - Standard installation functions -- **[core.func/](../core.func/)** - Utility functions -- **[error_handler.func/](../error_handler.func/)** - Error handling -- **[UPDATED_APP-install.md](../../UPDATED_APP-install.md)** - Application script guide - -## Recent Updates - -### Version 2.0 (Dec 2025) -- ✅ Enhanced IPv6 persistence configuration -- ✅ Improved OpenRC service management -- ✅ Better apk error handling -- ✅ Added Alpine-specific best practices documentation -- ✅ Streamlined SSH setup for Alpine - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team -**License**: MIT diff --git a/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_FLOWCHART.md b/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_FLOWCHART.md deleted file mode 100644 index 9c5f9235b..000000000 --- a/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_FLOWCHART.md +++ /dev/null @@ -1,25 +0,0 @@ -# alpine-tools.func Flowchart - -Alpine tool installation and package management flow. - -## Tool Installation on Alpine - -``` -apk_update() - ↓ -add_community_repo() [optional] - ↓ -apk_add PACKAGES - ↓ -Tool Installation - ↓ -rc-service start - ↓ -rc-update add [enable at boot] - ↓ -Complete ✓ -``` - ---- - -**Last Updated**: December 2025 diff --git a/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_FUNCTIONS_REFERENCE.md b/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_FUNCTIONS_REFERENCE.md deleted file mode 100644 index 8d653e97b..000000000 --- a/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_FUNCTIONS_REFERENCE.md +++ /dev/null @@ -1,31 +0,0 @@ -# alpine-tools.func Functions Reference - -Alpine-specific tool installation functions. - -## Core Functions - -### apk_update() -Update Alpine package lists. - -### apk_add(PACKAGES) -Install Alpine packages. - -### apk_del(PACKAGES) -Remove Alpine packages. - -### add_community_repo() -Enable Alpine community repository. - -### add_testing_repo() -Enable Alpine testing repository. - -### Alpine Tool Functions -- `setup_nodejs()` - Alpine Node.js -- `setup_php()` - Alpine PHP -- `setup_mariadb()` - Alpine MariaDB -- `setup_postgresql()` - Alpine PostgreSQL -- (+ more Alpine-specific setups) - ---- - -**Last Updated**: December 2025 diff --git a/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_INTEGRATION.md b/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_INTEGRATION.md deleted file mode 100644 index ac30a73be..000000000 --- a/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_INTEGRATION.md +++ /dev/null @@ -1,7 +0,0 @@ -# alpine-tools.func Integration Guide - -Alpine tool installation integration with container workflows. - ---- - -**Last Updated**: December 2025 diff --git a/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_USAGE_EXAMPLES.md b/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_USAGE_EXAMPLES.md deleted file mode 100644 index e2b88204d..000000000 --- a/docs/misc/alpine-tools.func/ALPINE_TOOLS_FUNC_USAGE_EXAMPLES.md +++ /dev/null @@ -1,19 +0,0 @@ -# alpine-tools.func Usage Examples - -Examples for Alpine tool installation. - -### Example: Alpine Setup with Tools - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -apk_update -setup_nodejs "20" -setup_php "8.3" -setup_mariadb "11" -``` - ---- - -**Last Updated**: December 2025 diff --git a/docs/misc/alpine-tools.func/README.md b/docs/misc/alpine-tools.func/README.md deleted file mode 100644 index 20ca097a7..000000000 --- a/docs/misc/alpine-tools.func/README.md +++ /dev/null @@ -1,297 +0,0 @@ -# alpine-tools.func Documentation - -## Overview - -The `alpine-tools.func` file provides Alpine Linux-specific tool installation functions for package and service management within Alpine LXC containers. It complements `tools.func` with Alpine-specific implementations using the apk package manager. - -## Purpose and Use Cases - -- **Alpine Tool Installation**: Install services and tools using apk on Alpine -- **Package Management**: Safe apk operations with error handling -- **Service Setup**: Install and configure common services on Alpine -- **Dependency Management**: Handle Alpine-specific package dependencies -- **Repository Management**: Setup and manage Alpine package repositories - -## Quick Reference - -### Key Function Groups -- **Package Operations**: Alpine-specific apk commands with error handling -- **Service Installation**: Install databases, web servers, tools on Alpine -- **Repository Setup**: Configure Alpine community and testing repositories -- **Tool Setup**: Install development tools and utilities - -### Dependencies -- **External**: `apk`, `curl`, `wget` -- **Internal**: Uses functions from `core.func`, `error_handler.func` - -### Integration Points -- Used by: Alpine-based application install scripts -- Uses: Environment variables from build.func -- Provides: Alpine package and tool installation services - -## Documentation Files - -### 📊 [ALPINE_TOOLS_FUNC_FLOWCHART.md](./ALPINE_TOOLS_FUNC_FLOWCHART.md) -Visual execution flows for package operations and tool installation on Alpine. - -### 📚 [ALPINE_TOOLS_FUNC_FUNCTIONS_REFERENCE.md](./ALPINE_TOOLS_FUNC_FUNCTIONS_REFERENCE.md) -Complete alphabetical reference of all Alpine tool functions. - -### 💡 [ALPINE_TOOLS_FUNC_USAGE_EXAMPLES.md](./ALPINE_TOOLS_FUNC_USAGE_EXAMPLES.md) -Practical examples for common Alpine installation patterns. - -### 🔗 [ALPINE_TOOLS_FUNC_INTEGRATION.md](./ALPINE_TOOLS_FUNC_INTEGRATION.md) -How alpine-tools.func integrates with Alpine installation workflows. - -## Key Features - -### Alpine Package Management -- **apk Add**: Safe package installation with error handling -- **apk Update**: Update package lists with retry logic -- **apk Del**: Remove packages and dependencies -- **Repository Configuration**: Add community and testing repos - -### Alpine Tool Coverage -- **Web Servers**: nginx, lighttpd -- **Databases**: mariadb, postgresql, sqlite -- **Development**: gcc, make, git, node.js (via apk) -- **Services**: sshd, docker, podman -- **Utilities**: curl, wget, htop, vim - -### Error Handling -- **Retry Logic**: Automatic recovery from transient failures -- **Dependency Resolution**: Handle missing dependencies -- **Lock Management**: Wait for apk locks to release -- **Error Reporting**: Clear error messages - -## Function Categories - -### 🔹 Package Management -- `apk_update()` - Update Alpine packages with retry -- `apk_add()` - Install packages safely -- `apk_del()` - Remove packages completely - -### 🔹 Repository Functions -- `add_community_repo()` - Enable community repositories -- `add_testing_repo()` - Enable testing repositories -- `setup_apk_repo()` - Configure custom apk repositories - -### 🔹 Service Installation Functions -- `setup_nginx()` - Install and configure nginx -- `setup_mariadb()` - Install MariaDB on Alpine -- `setup_postgresql()` - Install PostgreSQL -- `setup_docker()` - Install Docker on Alpine -- `setup_nodejs()` - Install Node.js from Alpine repos - -### 🔹 Development Tools -- `setup_build_tools()` - Install gcc, make, build-essential -- `setup_git()` - Install git version control -- `setup_python()` - Install Python 3 and pip - -## Alpine vs Debian Package Differences - -| Package | Debian | Alpine | -|---------|:---:|:---:| -| nginx | `apt-get install nginx` | `apk add nginx` | -| mariadb | `apt-get install mariadb-server` | `apk add mariadb` | -| PostgreSQL | `apt-get install postgresql` | `apk add postgresql` | -| Node.js | `apt-get install nodejs npm` | `apk add nodejs npm` | -| Docker | Special setup | `apk add docker` | -| Python | `apt-get install python3 python3-pip` | `apk add python3 py3-pip` | - -## Common Usage Patterns - -### Basic Alpine Tool Installation -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -# Update package lists -apk_update - -# Install nginx -apk_add nginx - -# Start service -rc-service nginx start -rc-update add nginx -``` - -### With Community Repository -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -# Enable community repo for more packages -add_community_repo - -# Update and install -apk_update -apk_add postgresql postgresql-client - -# Start service -rc-service postgresql start -``` - -### Development Environment -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -# Install build tools -setup_build_tools -setup_git -setup_nodejs "20" - -# Install application -git clone https://example.com/app -cd app -npm install -``` - -## Best Practices - -### ✅ DO -- Always use `apk add --no-cache` to keep images small -- Call `apk_update()` before installing packages -- Use community repository for more packages (`add_community_repo`) -- Handle apk locks gracefully with retry logic -- Use `$STD` variable for output control -- Check if tool already installed before reinstalling - -### ❌ DON'T -- Use `apt-get` commands (Alpine doesn't have apt) -- Install packages without `--no-cache` flag -- Hardcode Alpine-specific paths -- Mix Alpine and Debian commands -- Forget to enable services with `rc-update` -- Use `systemctl` (Alpine has OpenRC, not systemd) - -## Alpine Repository Configuration - -### Default Repositories -Alpine comes with main repository enabled by default. Additional repos: - -```bash -# Community repository (apk add php, go, rust, etc.) -add_community_repo - -# Testing repository (bleeding edge packages) -add_testing_repo -``` - -### Repository Locations -```bash -/etc/apk/repositories # Main repo list -/etc/apk/keys/ # GPG keys for repos -/var/cache/apk/ # Package cache -``` - -## Package Size Optimization - -Alpine is designed for small container images: - -```bash -# DON'T: Leaves package cache (increases image size) -apk add nginx - -# DO: Remove cache to reduce size -apk add --no-cache nginx - -# Expected sizes: -# Alpine base: ~5MB -# Alpine + nginx: ~10-15MB -# Debian base: ~75MB -# Debian + nginx: ~90-95MB -``` - -## Service Management on Alpine - -### Using OpenRC -```bash -# Start service immediately -rc-service nginx start - -# Stop service -rc-service nginx stop - -# Restart service -rc-service nginx restart - -# Enable at boot -rc-update add nginx - -# Disable at boot -rc-update del nginx - -# List enabled services -rc-update show -``` - -## Troubleshooting - -### "apk: lock is held by PID" -```bash -# Alpine apk database is locked (another process using apk) -# Wait a moment -sleep 5 -apk_update - -# Or manually: -rm /var/lib/apk/lock 2>/dev/null || true -apk update -``` - -### "Package not found" -```bash -# May be in community or testing repository -add_community_repo -apk_update -apk_add package-name -``` - -### "Repository not responding" -```bash -# Alpine repo may be slow or unreachable -# Try updating again with retry logic -apk_update # Built-in retry logic - -# Or manually retry -sleep 10 -apk update -``` - -### "Service fails to start" -```bash -# Check service status on Alpine -rc-service nginx status - -# View logs -tail /var/log/nginx/error.log - -# Verify configuration -nginx -t -``` - -## Related Documentation - -- **[alpine-install.func/](../alpine-install.func/)** - Alpine installation functions -- **[tools.func/](../tools.func/)** - Debian/standard tool installation -- **[core.func/](../core.func/)** - Utility functions -- **[error_handler.func/](../error_handler.func/)** - Error handling -- **[UPDATED_APP-install.md](../../UPDATED_APP-install.md)** - Application script guide - -## Recent Updates - -### Version 2.0 (Dec 2025) -- ✅ Enhanced apk error handling and retry logic -- ✅ Improved repository management -- ✅ Better service management with OpenRC -- ✅ Added Alpine-specific optimization guidance -- ✅ Enhanced package cache management - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team -**License**: MIT diff --git a/docs/misc/api.func/API_FLOWCHART.md b/docs/misc/api.func/API_FLOWCHART.md deleted file mode 100644 index a46cd56e9..000000000 --- a/docs/misc/api.func/API_FLOWCHART.md +++ /dev/null @@ -1,342 +0,0 @@ -# api.func Execution Flowchart - -## Main API Communication Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ API Communication Initialization │ -│ Entry point when api.func functions are called by installation scripts │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Prerequisites Check │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Prerequisites Validation │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check curl │ │ Check │ │ Check │ │ │ -│ │ │ Availability │ │ Diagnostics │ │ Random UUID │ │ │ -│ │ │ │ │ Setting │ │ │ │ -│ │ │ • command -v │ │ • DIAGNOSTICS │ │ • RANDOM_UUID │ │ -│ │ │ curl │ │ = "yes" │ │ not empty │ │ -│ │ │ • Return if │ │ • Return if │ │ • Return if │ │ -│ │ │ not found │ │ disabled │ │ not set │ │ -│ │ │ │ │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Data Collection │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ System Information Gathering │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Get PVE │ │ Collect │ │ Prepare JSON │ │ │ -│ │ │ Version │ │ Environment │ │ Payload │ │ -│ │ │ │ │ Variables │ │ │ │ -│ │ │ • pveversion │ │ • CT_TYPE │ │ • Create JSON │ │ -│ │ │ command │ │ • DISK_SIZE │ │ structure │ │ -│ │ │ • Parse version │ │ • CORE_COUNT │ │ • Include all │ │ -│ │ │ • Extract │ │ • RAM_SIZE │ │ variables │ │ -│ │ │ major.minor │ │ • var_os │ │ • Format for API │ │ -│ │ │ │ │ • var_version │ │ │ │ -│ │ │ │ │ • NSAPP │ │ │ │ -│ │ │ │ │ • METHOD │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ API Request Execution │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ HTTP Request Processing │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Prepare │ │ Execute │ │ Handle │ │ │ -│ │ │ Request │ │ HTTP Request │ │ Response │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • Set API URL │ │ • curl -s -w │ │ • Capture HTTP │ │ -│ │ │ • Set headers │ │ "%{http_code}" │ │ status code │ │ -│ │ │ • Set payload │ │ • POST request │ │ • Store response │ │ -│ │ │ • Content-Type │ │ • JSON data │ │ • Handle errors │ │ -│ │ │ application/ │ │ • Follow │ │ gracefully │ │ -│ │ │ json │ │ redirects │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## LXC API Reporting Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ POST_TO_API() Flow │ -│ Send LXC container installation data to API │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ LXC Data Preparation │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ LXC-Specific Data Collection │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Set LXC │ │ Include LXC │ │ Set Status │ │ │ -│ │ │ Type │ │ Variables │ │ Information │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • ct_type: 1 │ │ • DISK_SIZE │ │ • status: │ │ -│ │ │ • type: "lxc" │ │ • CORE_COUNT │ │ "installing" │ │ -│ │ │ • Include all │ │ • RAM_SIZE │ │ • Include all │ │ -│ │ │ LXC data │ │ • var_os │ │ tracking data │ │ -│ │ │ │ │ • var_version │ │ │ │ -│ │ │ │ │ • DISABLEIP6 │ │ │ │ -│ │ │ │ │ • NSAPP │ │ │ │ -│ │ │ │ │ • METHOD │ │ │ │ -│ │ │ │ │ • pve_version │ │ │ │ -│ │ │ │ │ • random_id │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ JSON Payload Creation │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ JSON Structure Generation │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Create JSON │ │ Validate │ │ Format for │ │ │ -│ │ │ Structure │ │ Data │ │ API Request │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • Use heredoc │ │ • Check all │ │ • Ensure proper │ │ -│ │ │ syntax │ │ variables │ │ JSON format │ │ -│ │ │ • Include all │ │ are set │ │ • Escape special │ │ -│ │ │ required │ │ • Validate │ │ characters │ │ -│ │ │ fields │ │ data types │ │ • Set content │ │ -│ │ │ • Format │ │ • Handle │ │ type │ │ -│ │ │ properly │ │ missing │ │ │ │ -│ │ │ │ │ values │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## VM API Reporting Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ POST_TO_API_VM() Flow │ -│ Send VM installation data to API │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ VM Data Preparation │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ VM-Specific Data Collection │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check │ │ Set VM │ │ Process Disk │ │ │ -│ │ │ Diagnostics │ │ Type │ │ Size │ │ -│ │ │ File │ │ │ │ │ │ -│ │ │ │ │ • ct_type: 2 │ │ • Remove 'G' │ │ -│ │ │ • Check file │ │ • type: "vm" │ │ suffix │ │ -│ │ │ existence │ │ • Include all │ │ • Convert to │ │ -│ │ │ • Read │ │ VM data │ │ numeric value │ │ -│ │ │ DIAGNOSTICS │ │ │ │ • Store in │ │ -│ │ │ setting │ │ │ │ DISK_SIZE_API │ │ -│ │ │ • Parse value │ │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ VM JSON Payload Creation │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ VM-Specific JSON Structure │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Include VM │ │ Set VM │ │ Format VM │ │ │ -│ │ │ Variables │ │ Status │ │ Data for API │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • DISK_SIZE_API │ │ • status: │ │ • Ensure proper │ │ -│ │ │ • CORE_COUNT │ │ "installing" │ │ JSON format │ │ -│ │ │ • RAM_SIZE │ │ • Include all │ │ • Handle VM- │ │ -│ │ │ • var_os │ │ tracking │ │ specific data │ │ -│ │ │ • var_version │ │ information │ │ • Set appropriate │ │ -│ │ │ • NSAPP │ │ │ │ content type │ │ -│ │ │ • METHOD │ │ │ │ │ │ -│ │ │ • pve_version │ │ │ │ │ │ -│ │ │ • random_id │ │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Status Update Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ POST_UPDATE_TO_API() Flow │ -│ Send installation completion status to API │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Update Prevention Check │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Duplicate Update Prevention │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check │ │ Set Flag │ │ Return Early │ │ │ -│ │ │ POST_UPDATE_ │ │ if First │ │ if Already │ │ -│ │ │ DONE │ │ Update │ │ Updated │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • Check if │ │ • Set │ │ • Return 0 │ │ -│ │ │ already │ │ POST_UPDATE_ │ │ • Skip API call │ │ -│ │ │ updated │ │ DONE=true │ │ • Prevent │ │ -│ │ │ • Prevent │ │ • Continue │ │ duplicate │ │ -│ │ │ duplicate │ │ with update │ │ requests │ │ -│ │ │ requests │ │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Status and Error Processing │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Status Determination │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Determine │ │ Get Error │ │ Prepare Status │ │ │ -│ │ │ Status │ │ Description │ │ Data │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • status: │ │ • Call │ │ • Include status │ │ -│ │ │ "success" or │ │ get_error_ │ │ • Include error │ │ -│ │ │ "failed" │ │ description() │ │ description │ │ -│ │ │ • Set exit │ │ • Get human- │ │ • Include random │ │ -│ │ │ code based │ │ readable │ │ ID for tracking │ │ -│ │ │ on status │ │ error message │ │ │ │ -│ │ │ • Default to │ │ • Handle │ │ │ │ -│ │ │ error if │ │ unknown │ │ │ │ -│ │ │ not set │ │ errors │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Status Update API Request │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Status Update Payload Creation │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Create │ │ Send Status │ │ Mark Update │ │ │ -│ │ │ Status JSON │ │ Update │ │ Complete │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • Include │ │ • POST to │ │ • Set │ │ -│ │ │ status │ │ updatestatus │ │ POST_UPDATE_ │ │ -│ │ │ • Include │ │ endpoint │ │ DONE=true │ │ -│ │ │ error │ │ • Include JSON │ │ • Prevent further │ │ -│ │ │ description │ │ payload │ │ updates │ │ -│ │ │ • Include │ │ • Handle │ │ • Complete │ │ -│ │ │ random_id │ │ response │ │ process │ │ -│ │ │ │ │ gracefully │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Error Description Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ GET_ERROR_DESCRIPTION() Flow │ -│ Convert numeric exit codes to human-readable explanations │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Error Code Classification │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Error Code Categories │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ General │ │ Network │ │ LXC-Specific │ │ │ -│ │ │ System │ │ Errors │ │ Errors │ │ -│ │ │ Errors │ │ │ │ │ │ -│ │ │ │ │ • 18: Connection│ │ • 100-101: LXC │ │ -│ │ │ • 0-9: Basic │ │ failed │ │ install errors │ │ -│ │ │ errors │ │ • 22: Invalid │ │ • 200-209: LXC │ │ -│ │ │ • 126-128: │ │ argument │ │ creation errors │ │ -│ │ │ Command │ │ • 28: No space │ │ │ │ -│ │ │ errors │ │ • 35: Timeout │ │ │ │ -│ │ │ • 129-143: │ │ • 56: TLS error │ │ │ │ -│ │ │ Signal │ │ • 60: SSL cert │ │ │ │ -│ │ │ errors │ │ error │ │ │ │ -│ │ │ • 152: Resource │ │ │ │ │ │ -│ │ │ limit │ │ │ │ │ │ -│ │ │ • 255: Unknown │ │ │ │ │ │ -│ │ │ critical │ │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Error Message Return │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Error Message Formatting │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Match Error │ │ Return │ │ Default Case │ │ │ -│ │ │ Code │ │ Description │ │ │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • Use case │ │ • Return │ │ • Return "Unknown │ │ -│ │ │ statement │ │ human- │ │ error code │ │ -│ │ │ • Match │ │ readable │ │ (exit_code)" │ │ -│ │ │ specific │ │ message │ │ • Handle │ │ -│ │ │ codes │ │ • Include │ │ unrecognized │ │ -│ │ │ • Handle │ │ context │ │ codes │ │ -│ │ │ ranges │ │ information │ │ • Provide fallback │ │ -│ │ │ │ │ │ │ message │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Integration Points - -### With Installation Scripts -- **build.func**: Sends LXC installation data -- **vm-core.func**: Sends VM installation data -- **install.func**: Reports installation status -- **alpine-install.func**: Reports Alpine installation data - -### With Error Handling -- **error_handler.func**: Provides error explanations -- **core.func**: Uses error descriptions in silent execution -- **Diagnostic reporting**: Tracks error patterns - -### External Dependencies -- **curl**: HTTP client for API communication -- **Community Scripts API**: External API endpoint -- **Network connectivity**: Required for API communication diff --git a/docs/misc/api.func/API_FUNCTIONS_REFERENCE.md b/docs/misc/api.func/API_FUNCTIONS_REFERENCE.md deleted file mode 100644 index 98477ad87..000000000 --- a/docs/misc/api.func/API_FUNCTIONS_REFERENCE.md +++ /dev/null @@ -1,433 +0,0 @@ -# api.func Functions Reference - -## Overview - -This document provides a comprehensive alphabetical reference of all functions in `api.func`, including parameters, dependencies, usage examples, and error handling. - -## Function Categories - -### Error Description Functions - -#### `get_error_description()` -**Purpose**: Convert numeric exit codes to human-readable explanations -**Parameters**: -- `$1` - Exit code to explain -**Returns**: Human-readable error explanation string -**Side Effects**: None -**Dependencies**: None -**Environment Variables Used**: None - -**Supported Exit Codes**: -- **General System**: 0-9, 18, 22, 28, 35, 56, 60, 125-128, 129-143, 152, 255 -- **LXC-Specific**: 100-101, 200-209 -- **Docker**: 125 - -**Usage Example**: -```bash -error_msg=$(get_error_description 127) -echo "Error 127: $error_msg" -# Output: Error 127: Command not found: Incorrect path or missing dependency. -``` - -**Error Code Examples**: -```bash -get_error_description 0 # " " (space) -get_error_description 1 # "General error: An unspecified error occurred." -get_error_description 127 # "Command not found: Incorrect path or missing dependency." -get_error_description 200 # "LXC creation failed." -get_error_description 255 # "Unknown critical error, often due to missing permissions or broken scripts." -``` - -### API Communication Functions - -#### `post_to_api()` -**Purpose**: Send LXC container installation data to community-scripts.org API -**Parameters**: None (uses environment variables) -**Returns**: None -**Side Effects**: -- Sends HTTP POST request to API -- Stores response in RESPONSE variable -- Requires curl command and network connectivity -**Dependencies**: `curl` command -**Environment Variables Used**: `DIAGNOSTICS`, `RANDOM_UUID`, `CT_TYPE`, `DISK_SIZE`, `CORE_COUNT`, `RAM_SIZE`, `var_os`, `var_version`, `DISABLEIP6`, `NSAPP`, `METHOD` - -**Prerequisites**: -- `curl` command must be available -- `DIAGNOSTICS` must be set to "yes" -- `RANDOM_UUID` must be set and not empty - -**API Endpoint**: `https://api.community-scripts.org/dev/upload` - -**JSON Payload Structure**: -```json -{ - "ct_type": 1, - "type": "lxc", - "disk_size": 8, - "core_count": 2, - "ram_size": 2048, - "os_type": "debian", - "os_version": "12", - "disableip6": "true", - "nsapp": "plex", - "method": "install", - "pve_version": "8.0", - "status": "installing", - "random_id": "uuid-string" -} -``` - -**Usage Example**: -```bash -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" -export CT_TYPE=1 -export DISK_SIZE=8 -export CORE_COUNT=2 -export RAM_SIZE=2048 -export var_os="debian" -export var_version="12" -export NSAPP="plex" -export METHOD="install" - -post_to_api -``` - -#### `post_to_api_vm()` -**Purpose**: Send VM installation data to community-scripts.org API -**Parameters**: None (uses environment variables) -**Returns**: None -**Side Effects**: -- Sends HTTP POST request to API -- Stores response in RESPONSE variable -- Requires curl command and network connectivity -**Dependencies**: `curl` command, diagnostics file -**Environment Variables Used**: `DIAGNOSTICS`, `RANDOM_UUID`, `DISK_SIZE`, `CORE_COUNT`, `RAM_SIZE`, `var_os`, `var_version`, `NSAPP`, `METHOD` - -**Prerequisites**: -- `/usr/local/community-scripts/diagnostics` file must exist -- `DIAGNOSTICS` must be set to "yes" in diagnostics file -- `curl` command must be available -- `RANDOM_UUID` must be set and not empty - -**API Endpoint**: `https://api.community-scripts.org/dev/upload` - -**JSON Payload Structure**: -```json -{ - "ct_type": 2, - "type": "vm", - "disk_size": 8, - "core_count": 2, - "ram_size": 2048, - "os_type": "debian", - "os_version": "12", - "disableip6": "", - "nsapp": "plex", - "method": "install", - "pve_version": "8.0", - "status": "installing", - "random_id": "uuid-string" -} -``` - -**Usage Example**: -```bash -# Create diagnostics file -echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics - -export RANDOM_UUID="$(uuidgen)" -export DISK_SIZE="8G" -export CORE_COUNT=2 -export RAM_SIZE=2048 -export var_os="debian" -export var_version="12" -export NSAPP="plex" -export METHOD="install" - -post_to_api_vm -``` - -#### `post_update_to_api()` -**Purpose**: Send installation completion status to community-scripts.org API -**Parameters**: -- `$1` - Status ("success" or "failed", default: "failed") -- `$2` - Exit code (default: 1) -**Returns**: None -**Side Effects**: -- Sends HTTP POST request to API -- Sets POST_UPDATE_DONE=true to prevent duplicates -- Stores response in RESPONSE variable -**Dependencies**: `curl` command, `get_error_description()` -**Environment Variables Used**: `DIAGNOSTICS`, `RANDOM_UUID` - -**Prerequisites**: -- `curl` command must be available -- `DIAGNOSTICS` must be set to "yes" -- `RANDOM_UUID` must be set and not empty -- POST_UPDATE_DONE must be false (prevents duplicates) - -**API Endpoint**: `https://api.community-scripts.org/dev/upload/updatestatus` - -**JSON Payload Structure**: -```json -{ - "status": "success", - "error": "Error description from get_error_description()", - "random_id": "uuid-string" -} -``` - -**Usage Example**: -```bash -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" - -# Report successful installation -post_update_to_api "success" 0 - -# Report failed installation -post_update_to_api "failed" 127 -``` - -## Function Call Hierarchy - -### API Communication Flow -``` -post_to_api() -├── Check curl availability -├── Check DIAGNOSTICS setting -├── Check RANDOM_UUID -├── Get PVE version -├── Create JSON payload -└── Send HTTP POST request - -post_to_api_vm() -├── Check diagnostics file -├── Check curl availability -├── Check DIAGNOSTICS setting -├── Check RANDOM_UUID -├── Process disk size -├── Get PVE version -├── Create JSON payload -└── Send HTTP POST request - -post_update_to_api() -├── Check POST_UPDATE_DONE flag -├── Check curl availability -├── Check DIAGNOSTICS setting -├── Check RANDOM_UUID -├── Determine status and exit code -├── Get error description -├── Create JSON payload -├── Send HTTP POST request -└── Set POST_UPDATE_DONE=true -``` - -### Error Description Flow -``` -get_error_description() -├── Match exit code -├── Return appropriate description -└── Handle unknown codes -``` - -## Error Code Reference - -### General System Errors -| Code | Description | -|------|-------------| -| 0 | (space) | -| 1 | General error: An unspecified error occurred. | -| 2 | Incorrect shell usage or invalid command arguments. | -| 3 | Unexecuted function or invalid shell condition. | -| 4 | Error opening a file or invalid path. | -| 5 | I/O error: An input/output failure occurred. | -| 6 | No such device or address. | -| 7 | Insufficient memory or resource exhaustion. | -| 8 | Non-executable file or invalid file format. | -| 9 | Failed child process execution. | -| 18 | Connection to a remote server failed. | -| 22 | Invalid argument or faulty network connection. | -| 28 | No space left on device. | -| 35 | Timeout while establishing a connection. | -| 56 | Faulty TLS connection. | -| 60 | SSL certificate error. | - -### Command Execution Errors -| Code | Description | -|------|-------------| -| 125 | Docker error: Container could not start. | -| 126 | Command not executable: Incorrect permissions or missing dependencies. | -| 127 | Command not found: Incorrect path or missing dependency. | -| 128 | Invalid exit signal, e.g., incorrect Git command. | - -### Signal Errors -| Code | Description | -|------|-------------| -| 129 | Signal 1 (SIGHUP): Process terminated due to hangup. | -| 130 | Signal 2 (SIGINT): Manual termination via Ctrl+C. | -| 132 | Signal 4 (SIGILL): Illegal machine instruction. | -| 133 | Signal 5 (SIGTRAP): Debugging error or invalid breakpoint signal. | -| 134 | Signal 6 (SIGABRT): Program aborted itself. | -| 135 | Signal 7 (SIGBUS): Memory error, invalid memory address. | -| 137 | Signal 9 (SIGKILL): Process forcibly terminated (OOM-killer or 'kill -9'). | -| 139 | Signal 11 (SIGSEGV): Segmentation fault, possibly due to invalid pointer access. | -| 141 | Signal 13 (SIGPIPE): Pipe closed unexpectedly. | -| 143 | Signal 15 (SIGTERM): Process terminated normally. | -| 152 | Signal 24 (SIGXCPU): CPU time limit exceeded. | - -### LXC-Specific Errors -| Code | Description | -|------|-------------| -| 100 | LXC install error: Unexpected error in create_lxc.sh. | -| 101 | LXC install error: No network connection detected. | -| 200 | LXC creation failed. | -| 201 | LXC error: Invalid Storage class. | -| 202 | User aborted menu in create_lxc.sh. | -| 203 | CTID not set in create_lxc.sh. | -| 204 | PCT_OSTYPE not set in create_lxc.sh. | -| 205 | CTID cannot be less than 100 in create_lxc.sh. | -| 206 | CTID already in use in create_lxc.sh. | -| 207 | Template not found in create_lxc.sh. | -| 208 | Error downloading template in create_lxc.sh. | -| 209 | Container creation failed, but template is intact in create_lxc.sh. | - -### Other Errors -| Code | Description | -|------|-------------| -| 255 | Unknown critical error, often due to missing permissions or broken scripts. | -| * | Unknown error code (exit_code). | - -## Environment Variable Dependencies - -### Required Variables -- **`DIAGNOSTICS`**: Enable/disable diagnostic reporting ("yes"/"no") -- **`RANDOM_UUID`**: Unique identifier for tracking - -### Optional Variables -- **`CT_TYPE`**: Container type (1 for LXC, 2 for VM) -- **`DISK_SIZE`**: Disk size in GB (or GB with 'G' suffix for VM) -- **`CORE_COUNT`**: Number of CPU cores -- **`RAM_SIZE`**: RAM size in MB -- **`var_os`**: Operating system type -- **`var_version`**: OS version -- **`DISABLEIP6`**: IPv6 disable setting -- **`NSAPP`**: Namespace application name -- **`METHOD`**: Installation method - -### Internal Variables -- **`POST_UPDATE_DONE`**: Prevents duplicate status updates -- **`API_URL`**: Community scripts API endpoint -- **`JSON_PAYLOAD`**: API request payload -- **`RESPONSE`**: API response -- **`DISK_SIZE_API`**: Processed disk size for VM API - -## Error Handling Patterns - -### API Communication Errors -- All API functions handle curl failures gracefully -- Network errors don't block installation process -- Missing prerequisites cause early return -- Duplicate updates are prevented - -### Error Description Errors -- Unknown error codes return generic message -- All error codes are handled with case statement -- Fallback message includes the actual error code - -### Prerequisites Validation -- Check curl availability before API calls -- Validate DIAGNOSTICS setting -- Ensure RANDOM_UUID is set -- Check for duplicate updates - -## Integration Examples - -### With build.func -```bash -#!/usr/bin/env bash -source core.func -source api.func -source build.func - -# Set up API reporting -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" - -# Report installation start -post_to_api - -# Container creation... -# ... build.func code ... - -# Report completion -if [[ $? -eq 0 ]]; then - post_update_to_api "success" 0 -else - post_update_to_api "failed" $? -fi -``` - -### With vm-core.func -```bash -#!/usr/bin/env bash -source core.func -source api.func -source vm-core.func - -# Set up API reporting -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" - -# Report VM installation start -post_to_api_vm - -# VM creation... -# ... vm-core.func code ... - -# Report completion -post_update_to_api "success" 0 -``` - -### With error_handler.func -```bash -#!/usr/bin/env bash -source core.func -source error_handler.func -source api.func - -# Use error descriptions -error_code=127 -error_msg=$(get_error_description $error_code) -echo "Error $error_code: $error_msg" - -# Report error to API -post_update_to_api "failed" $error_code -``` - -## Best Practices - -### API Usage -1. Always check prerequisites before API calls -2. Use unique identifiers for tracking -3. Handle API failures gracefully -4. Don't block installation on API failures - -### Error Reporting -1. Use appropriate error codes -2. Provide meaningful error descriptions -3. Report both success and failure cases -4. Prevent duplicate status updates - -### Diagnostic Reporting -1. Respect user privacy settings -2. Only send data when diagnostics enabled -3. Use anonymous tracking identifiers -4. Include relevant system information - -### Error Handling -1. Handle unknown error codes gracefully -2. Provide fallback error messages -3. Include error code in unknown error messages -4. Use consistent error message format diff --git a/docs/misc/api.func/API_INTEGRATION.md b/docs/misc/api.func/API_INTEGRATION.md deleted file mode 100644 index f325dace2..000000000 --- a/docs/misc/api.func/API_INTEGRATION.md +++ /dev/null @@ -1,643 +0,0 @@ -# api.func Integration Guide - -## Overview - -This document describes how `api.func` integrates with other components in the Proxmox Community Scripts project, including dependencies, data flow, and API surface. - -## Dependencies - -### External Dependencies - -#### Required Commands -- **`curl`**: HTTP client for API communication -- **`uuidgen`**: Generate unique identifiers (optional, can use other methods) - -#### Optional Commands -- **None**: No other external command dependencies - -### Internal Dependencies - -#### Environment Variables from Other Scripts -- **build.func**: Provides container creation variables -- **vm-core.func**: Provides VM creation variables -- **core.func**: Provides system information variables -- **Installation scripts**: Provide application-specific variables - -## Integration Points - -### With build.func - -#### LXC Container Reporting -```bash -# build.func uses api.func for container reporting -source core.func -source api.func -source build.func - -# Set up API reporting -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" - -# Container creation with API reporting -create_container() { - # Set container parameters - export CT_TYPE=1 - export DISK_SIZE="$var_disk" - export CORE_COUNT="$var_cpu" - export RAM_SIZE="$var_ram" - export var_os="$var_os" - export var_version="$var_version" - export NSAPP="$APP" - export METHOD="install" - - # Report installation start - post_to_api - - # Container creation using build.func - # ... build.func container creation logic ... - - # Report completion - if [[ $? -eq 0 ]]; then - post_update_to_api "success" 0 - else - post_update_to_api "failed" $? - fi -} -``` - -#### Error Reporting Integration -```bash -# build.func uses api.func for error reporting -handle_container_error() { - local exit_code=$1 - local error_msg=$(get_error_description $exit_code) - - echo "Container creation failed: $error_msg" - post_update_to_api "failed" $exit_code -} -``` - -### With vm-core.func - -#### VM Installation Reporting -```bash -# vm-core.func uses api.func for VM reporting -source core.func -source api.func -source vm-core.func - -# Set up VM API reporting -mkdir -p /usr/local/community-scripts -echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics - -export RANDOM_UUID="$(uuidgen)" - -# VM creation with API reporting -create_vm() { - # Set VM parameters - export DISK_SIZE="${var_disk}G" - export CORE_COUNT="$var_cpu" - export RAM_SIZE="$var_ram" - export var_os="$var_os" - export var_version="$var_version" - export NSAPP="$APP" - export METHOD="install" - - # Report VM installation start - post_to_api_vm - - # VM creation using vm-core.func - # ... vm-core.func VM creation logic ... - - # Report completion - post_update_to_api "success" 0 -} -``` - -### With core.func - -#### System Information Integration -```bash -# core.func provides system information for api.func -source core.func -source api.func - -# Get system information for API reporting -get_system_info_for_api() { - # Get PVE version using core.func utilities - local pve_version=$(pveversion | awk -F'[/ ]' '{print $2}') - - # Set API parameters - export var_os="$var_os" - export var_version="$var_version" - - # Use core.func error handling with api.func reporting - if silent apt-get update; then - post_update_to_api "success" 0 - else - post_update_to_api "failed" $? - fi -} -``` - -### With error_handler.func - -#### Error Description Integration -```bash -# error_handler.func uses api.func for error descriptions -source core.func -source error_handler.func -source api.func - -# Enhanced error handler with API reporting -enhanced_error_handler() { - local exit_code=${1:-$?} - local command=${2:-${BASH_COMMAND:-unknown}} - - # Get error description from api.func - local error_msg=$(get_error_description $exit_code) - - # Display error information - echo "Error $exit_code: $error_msg" - echo "Command: $command" - - # Report error to API - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - post_update_to_api "failed" $exit_code - - # Use standard error handler - error_handler $exit_code $command -} -``` - -### With install.func - -#### Installation Process Reporting -```bash -# install.func uses api.func for installation reporting -source core.func -source api.func -source install.func - -# Installation with API reporting -install_package_with_reporting() { - local package="$1" - - # Set up API reporting - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - export NSAPP="$package" - export METHOD="install" - - # Report installation start - post_to_api - - # Package installation using install.func - if install_package "$package"; then - echo "$package installed successfully" - post_update_to_api "success" 0 - return 0 - else - local exit_code=$? - local error_msg=$(get_error_description $exit_code) - echo "$package installation failed: $error_msg" - post_update_to_api "failed" $exit_code - return $exit_code - fi -} -``` - -### With alpine-install.func - -#### Alpine Installation Reporting -```bash -# alpine-install.func uses api.func for Alpine reporting -source core.func -source api.func -source alpine-install.func - -# Alpine installation with API reporting -install_alpine_with_reporting() { - local app="$1" - - # Set up API reporting - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - export NSAPP="$app" - export METHOD="install" - export var_os="alpine" - - # Report Alpine installation start - post_to_api - - # Alpine installation using alpine-install.func - if install_alpine_app "$app"; then - echo "Alpine $app installed successfully" - post_update_to_api "success" 0 - return 0 - else - local exit_code=$? - local error_msg=$(get_error_description $exit_code) - echo "Alpine $app installation failed: $error_msg" - post_update_to_api "failed" $exit_code - return $exit_code - fi -} -``` - -### With alpine-tools.func - -#### Alpine Tools Reporting -```bash -# alpine-tools.func uses api.func for Alpine tools reporting -source core.func -source api.func -source alpine-tools.func - -# Alpine tools with API reporting -run_alpine_tool_with_reporting() { - local tool="$1" - - # Set up API reporting - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - export NSAPP="alpine-tools" - export METHOD="tool" - - # Report tool execution start - post_to_api - - # Run Alpine tool using alpine-tools.func - if run_alpine_tool "$tool"; then - echo "Alpine tool $tool executed successfully" - post_update_to_api "success" 0 - return 0 - else - local exit_code=$? - local error_msg=$(get_error_description $exit_code) - echo "Alpine tool $tool failed: $error_msg" - post_update_to_api "failed" $exit_code - return $exit_code - fi -} -``` - -### With passthrough.func - -#### Hardware Passthrough Reporting -```bash -# passthrough.func uses api.func for hardware reporting -source core.func -source api.func -source passthrough.func - -# Hardware passthrough with API reporting -configure_passthrough_with_reporting() { - local hardware_type="$1" - - # Set up API reporting - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - export NSAPP="passthrough" - export METHOD="hardware" - - # Report passthrough configuration start - post_to_api - - # Configure passthrough using passthrough.func - if configure_passthrough "$hardware_type"; then - echo "Hardware passthrough configured successfully" - post_update_to_api "success" 0 - return 0 - else - local exit_code=$? - local error_msg=$(get_error_description $exit_code) - echo "Hardware passthrough failed: $error_msg" - post_update_to_api "failed" $exit_code - return $exit_code - fi -} -``` - -### With tools.func - -#### Maintenance Operations Reporting -```bash -# tools.func uses api.func for maintenance reporting -source core.func -source api.func -source tools.func - -# Maintenance operations with API reporting -run_maintenance_with_reporting() { - local operation="$1" - - # Set up API reporting - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - export NSAPP="maintenance" - export METHOD="tool" - - # Report maintenance start - post_to_api - - # Run maintenance using tools.func - if run_maintenance_operation "$operation"; then - echo "Maintenance operation $operation completed successfully" - post_update_to_api "success" 0 - return 0 - else - local exit_code=$? - local error_msg=$(get_error_description $exit_code) - echo "Maintenance operation $operation failed: $error_msg" - post_update_to_api "failed" $exit_code - return $exit_code - fi -} -``` - -## Data Flow - -### Input Data - -#### Environment Variables from Other Scripts -- **`CT_TYPE`**: Container type (1 for LXC, 2 for VM) -- **`DISK_SIZE`**: Disk size in GB -- **`CORE_COUNT`**: Number of CPU cores -- **`RAM_SIZE`**: RAM size in MB -- **`var_os`**: Operating system type -- **`var_version`**: OS version -- **`DISABLEIP6`**: IPv6 disable setting -- **`NSAPP`**: Namespace application name -- **`METHOD`**: Installation method -- **`DIAGNOSTICS`**: Enable/disable diagnostic reporting -- **`RANDOM_UUID`**: Unique identifier for tracking - -#### Function Parameters -- **Exit codes**: Passed to `get_error_description()` and `post_update_to_api()` -- **Status information**: Passed to `post_update_to_api()` -- **API endpoints**: Hardcoded in functions - -#### System Information -- **PVE version**: Retrieved from `pveversion` command -- **Disk size processing**: Processed for VM API (removes 'G' suffix) -- **Error codes**: Retrieved from command exit codes - -### Processing Data - -#### API Request Preparation -- **JSON payload creation**: Format data for API consumption -- **Data validation**: Ensure required fields are present -- **Error handling**: Handle missing or invalid data -- **Content type setting**: Set appropriate HTTP headers - -#### Error Processing -- **Error code mapping**: Map numeric codes to descriptions -- **Error message formatting**: Format error descriptions -- **Unknown error handling**: Handle unrecognized error codes -- **Fallback messages**: Provide default error messages - -#### API Communication -- **HTTP request preparation**: Prepare curl commands -- **Response handling**: Capture HTTP response codes -- **Error handling**: Handle network and API errors -- **Duplicate prevention**: Prevent duplicate status updates - -### Output Data - -#### API Communication -- **HTTP requests**: Sent to community-scripts.org API -- **Response codes**: Captured from API responses -- **Error information**: Reported to API -- **Status updates**: Sent to API - -#### Error Information -- **Error descriptions**: Human-readable error messages -- **Error codes**: Mapped to descriptions -- **Context information**: Error context and details -- **Fallback messages**: Default error messages - -#### System State -- **POST_UPDATE_DONE**: Prevents duplicate updates -- **RESPONSE**: Stores API response -- **JSON_PAYLOAD**: Stores formatted API data -- **API_URL**: Stores API endpoint - -## API Surface - -### Public Functions - -#### Error Description -- **`get_error_description()`**: Convert exit codes to explanations -- **Parameters**: Exit code to explain -- **Returns**: Human-readable explanation string -- **Usage**: Called by other functions and scripts - -#### API Communication -- **`post_to_api()`**: Send LXC installation data -- **`post_to_api_vm()`**: Send VM installation data -- **`post_update_to_api()`**: Send status updates -- **Parameters**: Status and exit code (for updates) -- **Returns**: None -- **Usage**: Called by installation scripts - -### Internal Functions - -#### None -- All functions in api.func are public -- No internal helper functions -- Direct implementation of all functionality - -### Global Variables - -#### Configuration Variables -- **`DIAGNOSTICS`**: Diagnostic reporting setting -- **`RANDOM_UUID`**: Unique tracking identifier -- **`POST_UPDATE_DONE`**: Duplicate update prevention - -#### Data Variables -- **`CT_TYPE`**: Container type -- **`DISK_SIZE`**: Disk size -- **`CORE_COUNT`**: CPU core count -- **`RAM_SIZE`**: RAM size -- **`var_os`**: Operating system -- **`var_version`**: OS version -- **`DISABLEIP6`**: IPv6 setting -- **`NSAPP`**: Application namespace -- **`METHOD`**: Installation method - -#### Internal Variables -- **`API_URL`**: API endpoint URL -- **`JSON_PAYLOAD`**: API request payload -- **`RESPONSE`**: API response -- **`DISK_SIZE_API`**: Processed disk size for VM API - -## Integration Patterns - -### Standard Integration Pattern - -```bash -#!/usr/bin/env bash -# Standard integration pattern - -# 1. Source core.func first -source core.func - -# 2. Source api.func -source api.func - -# 3. Set up API reporting -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" - -# 4. Set application parameters -export NSAPP="$APP" -export METHOD="install" - -# 5. Report installation start -post_to_api - -# 6. Perform installation -# ... installation logic ... - -# 7. Report completion -post_update_to_api "success" 0 -``` - -### Minimal Integration Pattern - -```bash -#!/usr/bin/env bash -# Minimal integration pattern - -source api.func - -# Basic error reporting -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" - -# Report failure -post_update_to_api "failed" 127 -``` - -### Advanced Integration Pattern - -```bash -#!/usr/bin/env bash -# Advanced integration pattern - -source core.func -source api.func -source error_handler.func - -# Set up comprehensive API reporting -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" -export CT_TYPE=1 -export DISK_SIZE=8 -export CORE_COUNT=2 -export RAM_SIZE=2048 -export var_os="debian" -export var_version="12" -export METHOD="install" - -# Enhanced error handling with API reporting -enhanced_error_handler() { - local exit_code=${1:-$?} - local command=${2:-${BASH_COMMAND:-unknown}} - - local error_msg=$(get_error_description $exit_code) - echo "Error $exit_code: $error_msg" - - post_update_to_api "failed" $exit_code - error_handler $exit_code $command -} - -trap 'enhanced_error_handler' ERR - -# Advanced operations with API reporting -post_to_api -# ... operations ... -post_update_to_api "success" 0 -``` - -## Error Handling Integration - -### Automatic Error Reporting -- **Error Descriptions**: Provides human-readable error messages -- **API Integration**: Reports errors to community-scripts.org API -- **Error Tracking**: Tracks error patterns for project improvement -- **Diagnostic Data**: Contributes to anonymous usage analytics - -### Manual Error Reporting -- **Custom Error Codes**: Use appropriate error codes for different scenarios -- **Error Context**: Provide context information for errors -- **Status Updates**: Report both success and failure cases -- **Error Analysis**: Analyze error patterns and trends - -### API Communication Errors -- **Network Failures**: Handle API communication failures gracefully -- **Missing Prerequisites**: Check prerequisites before API calls -- **Duplicate Prevention**: Prevent duplicate status updates -- **Error Recovery**: Handle API errors without blocking installation - -## Performance Considerations - -### API Communication Overhead -- **Minimal Impact**: API calls add minimal overhead -- **Asynchronous**: API calls don't block installation process -- **Error Handling**: API failures don't affect installation -- **Optional**: API reporting is optional and can be disabled - -### Memory Usage -- **Minimal Footprint**: API functions use minimal memory -- **Variable Reuse**: Global variables reused across functions -- **No Memory Leaks**: Proper cleanup prevents memory leaks -- **Efficient Processing**: Efficient JSON payload creation - -### Execution Speed -- **Fast API Calls**: Quick API communication -- **Efficient Error Processing**: Fast error code processing -- **Minimal Delay**: Minimal delay in API operations -- **Non-blocking**: API calls don't block installation - -## Security Considerations - -### Data Privacy -- **Anonymous Reporting**: Only anonymous data is sent -- **No Sensitive Data**: No sensitive information is transmitted -- **User Control**: Users can disable diagnostic reporting -- **Data Minimization**: Only necessary data is sent - -### API Security -- **HTTPS**: API communication uses secure protocols -- **Data Validation**: API data is validated before sending -- **Error Handling**: API errors are handled securely -- **No Credentials**: No authentication credentials are sent - -### Network Security -- **Secure Communication**: Uses secure HTTP protocols -- **Error Handling**: Network errors are handled gracefully -- **No Data Leakage**: No sensitive data is leaked -- **Secure Endpoints**: Uses trusted API endpoints - -## Future Integration Considerations - -### Extensibility -- **New API Endpoints**: Easy to add new API endpoints -- **Additional Data**: Easy to add new data fields -- **Error Codes**: Easy to add new error code descriptions -- **API Versions**: Easy to support new API versions - -### Compatibility -- **API Versioning**: Compatible with different API versions -- **Data Format**: Compatible with different data formats -- **Error Codes**: Compatible with different error code systems -- **Network Protocols**: Compatible with different network protocols - -### Performance -- **Optimization**: API communication can be optimized -- **Caching**: API responses can be cached -- **Batch Operations**: Multiple operations can be batched -- **Async Processing**: API calls can be made asynchronous diff --git a/docs/misc/api.func/API_USAGE_EXAMPLES.md b/docs/misc/api.func/API_USAGE_EXAMPLES.md deleted file mode 100644 index c8f6a5cdc..000000000 --- a/docs/misc/api.func/API_USAGE_EXAMPLES.md +++ /dev/null @@ -1,794 +0,0 @@ -# api.func Usage Examples - -## Overview - -This document provides practical usage examples for `api.func` functions, covering common scenarios, integration patterns, and best practices. - -## Basic API Setup - -### Standard API Initialization - -```bash -#!/usr/bin/env bash -# Standard API setup for LXC containers - -source api.func - -# Set up diagnostic reporting -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" - -# Set container parameters -export CT_TYPE=1 -export DISK_SIZE=8 -export CORE_COUNT=2 -export RAM_SIZE=2048 -export var_os="debian" -export var_version="12" -export NSAPP="plex" -export METHOD="install" - -# Report installation start -post_to_api - -# Your installation code here -# ... installation logic ... - -# Report completion -if [[ $? -eq 0 ]]; then - post_update_to_api "success" 0 -else - post_update_to_api "failed" $? -fi -``` - -### VM API Setup - -```bash -#!/usr/bin/env bash -# API setup for VMs - -source api.func - -# Create diagnostics file for VM -mkdir -p /usr/local/community-scripts -echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics - -# Set up VM parameters -export RANDOM_UUID="$(uuidgen)" -export DISK_SIZE="20G" -export CORE_COUNT=4 -export RAM_SIZE=4096 -export var_os="ubuntu" -export var_version="22.04" -export NSAPP="nextcloud" -export METHOD="install" - -# Report VM installation start -post_to_api_vm - -# Your VM installation code here -# ... VM creation logic ... - -# Report completion -post_update_to_api "success" 0 -``` - -## Error Description Examples - -### Basic Error Explanation - -```bash -#!/usr/bin/env bash -source api.func - -# Explain common error codes -echo "Error 0: '$(get_error_description 0)'" -echo "Error 1: $(get_error_description 1)" -echo "Error 127: $(get_error_description 127)" -echo "Error 200: $(get_error_description 200)" -echo "Error 255: $(get_error_description 255)" -``` - -### Error Code Testing - -```bash -#!/usr/bin/env bash -source api.func - -# Test all error codes -test_error_codes() { - local codes=(0 1 2 127 128 130 137 139 143 200 203 205 255) - - for code in "${codes[@]}"; do - echo "Code $code: $(get_error_description $code)" - done -} - -test_error_codes -``` - -### Error Handling with Descriptions - -```bash -#!/usr/bin/env bash -source api.func - -# Function with error handling -run_command_with_error_handling() { - local command="$1" - local description="$2" - - echo "Running: $description" - - if $command; then - echo "Success: $description" - return 0 - else - local exit_code=$? - local error_msg=$(get_error_description $exit_code) - echo "Error $exit_code: $error_msg" - return $exit_code - fi -} - -# Usage -run_command_with_error_handling "apt-get update" "Package list update" -run_command_with_error_handling "nonexistent_command" "Test command" -``` - -## API Communication Examples - -### LXC Installation Reporting - -```bash -#!/usr/bin/env bash -source api.func - -# Complete LXC installation with API reporting -install_lxc_with_reporting() { - local app="$1" - local ctid="$2" - - # Set up API reporting - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - export CT_TYPE=1 - export DISK_SIZE=10 - export CORE_COUNT=2 - export RAM_SIZE=2048 - export var_os="debian" - export var_version="12" - export NSAPP="$app" - export METHOD="install" - - # Report installation start - post_to_api - - # Installation process - echo "Installing $app container (ID: $ctid)..." - - # Simulate installation - sleep 2 - - # Check if installation succeeded - if [[ $? -eq 0 ]]; then - echo "Installation completed successfully" - post_update_to_api "success" 0 - return 0 - else - echo "Installation failed" - post_update_to_api "failed" $? - return 1 - fi -} - -# Install multiple containers -install_lxc_with_reporting "plex" "100" -install_lxc_with_reporting "nextcloud" "101" -install_lxc_with_reporting "nginx" "102" -``` - -### VM Installation Reporting - -```bash -#!/usr/bin/env bash -source api.func - -# Complete VM installation with API reporting -install_vm_with_reporting() { - local app="$1" - local vmid="$2" - - # Create diagnostics file - mkdir -p /usr/local/community-scripts - echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics - - # Set up API reporting - export RANDOM_UUID="$(uuidgen)" - export DISK_SIZE="20G" - export CORE_COUNT=4 - export RAM_SIZE=4096 - export var_os="ubuntu" - export var_version="22.04" - export NSAPP="$app" - export METHOD="install" - - # Report VM installation start - post_to_api_vm - - # VM installation process - echo "Installing $app VM (ID: $vmid)..." - - # Simulate VM creation - sleep 3 - - # Check if VM creation succeeded - if [[ $? -eq 0 ]]; then - echo "VM installation completed successfully" - post_update_to_api "success" 0 - return 0 - else - echo "VM installation failed" - post_update_to_api "failed" $? - return 1 - fi -} - -# Install multiple VMs -install_vm_with_reporting "nextcloud" "200" -install_vm_with_reporting "wordpress" "201" -``` - -## Status Update Examples - -### Success Reporting - -```bash -#!/usr/bin/env bash -source api.func - -# Report successful installation -report_success() { - local operation="$1" - - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - - echo "Reporting successful $operation" - post_update_to_api "success" 0 -} - -# Usage -report_success "container installation" -report_success "package installation" -report_success "service configuration" -``` - -### Failure Reporting - -```bash -#!/usr/bin/env bash -source api.func - -# Report failed installation -report_failure() { - local operation="$1" - local exit_code="$2" - - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - - local error_msg=$(get_error_description $exit_code) - echo "Reporting failed $operation: $error_msg" - post_update_to_api "failed" $exit_code -} - -# Usage -report_failure "container creation" 200 -report_failure "package installation" 127 -report_failure "service start" 1 -``` - -### Conditional Status Reporting - -```bash -#!/usr/bin/env bash -source api.func - -# Conditional status reporting -report_installation_status() { - local operation="$1" - local exit_code="$2" - - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - - if [[ $exit_code -eq 0 ]]; then - echo "Reporting successful $operation" - post_update_to_api "success" 0 - else - local error_msg=$(get_error_description $exit_code) - echo "Reporting failed $operation: $error_msg" - post_update_to_api "failed" $exit_code - fi -} - -# Usage -report_installation_status "container creation" 0 -report_installation_status "package installation" 127 -``` - -## Advanced Usage Examples - -### Batch Installation with API Reporting - -```bash -#!/usr/bin/env bash -source api.func - -# Batch installation with comprehensive API reporting -batch_install_with_reporting() { - local apps=("plex" "nextcloud" "nginx" "mysql") - local ctids=(100 101 102 103) - - # Set up API reporting - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - export CT_TYPE=1 - export DISK_SIZE=8 - export CORE_COUNT=2 - export RAM_SIZE=2048 - export var_os="debian" - export var_version="12" - export METHOD="install" - - local success_count=0 - local failure_count=0 - - for i in "${!apps[@]}"; do - local app="${apps[$i]}" - local ctid="${ctids[$i]}" - - echo "Installing $app (ID: $ctid)..." - - # Set app-specific parameters - export NSAPP="$app" - - # Report installation start - post_to_api - - # Simulate installation - if install_app "$app" "$ctid"; then - echo "$app installed successfully" - post_update_to_api "success" 0 - ((success_count++)) - else - echo "$app installation failed" - post_update_to_api "failed" $? - ((failure_count++)) - fi - - echo "---" - done - - echo "Batch installation completed: $success_count successful, $failure_count failed" -} - -# Mock installation function -install_app() { - local app="$1" - local ctid="$2" - - # Simulate installation - sleep 1 - - # Simulate occasional failures - if [[ $((RANDOM % 10)) -eq 0 ]]; then - return 1 - fi - - return 0 -} - -batch_install_with_reporting -``` - -### Error Analysis and Reporting - -```bash -#!/usr/bin/env bash -source api.func - -# Analyze and report errors -analyze_and_report_errors() { - local log_file="$1" - - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - - if [[ ! -f "$log_file" ]]; then - echo "Log file not found: $log_file" - return 1 - fi - - # Extract error codes from log - local error_codes=$(grep -o 'exit code [0-9]\+' "$log_file" | grep -o '[0-9]\+' | sort -u) - - if [[ -z "$error_codes" ]]; then - echo "No errors found in log" - post_update_to_api "success" 0 - return 0 - fi - - echo "Found error codes: $error_codes" - - # Report each unique error - for code in $error_codes; do - local error_msg=$(get_error_description $code) - echo "Error $code: $error_msg" - post_update_to_api "failed" $code - done -} - -# Usage -analyze_and_report_errors "/var/log/installation.log" -``` - -### API Health Check - -```bash -#!/usr/bin/env bash -source api.func - -# Check API connectivity and functionality -check_api_health() { - echo "Checking API health..." - - # Test prerequisites - if ! command -v curl >/dev/null 2>&1; then - echo "ERROR: curl not available" - return 1 - fi - - # Test error description function - local test_error=$(get_error_description 127) - if [[ -z "$test_error" ]]; then - echo "ERROR: Error description function not working" - return 1 - fi - - echo "Error description test: $test_error" - - # Test API connectivity (without sending data) - local api_url="https://api.community-scripts.org/dev/upload" - if curl -s --head "$api_url" >/dev/null 2>&1; then - echo "API endpoint is reachable" - else - echo "WARNING: API endpoint not reachable" - fi - - echo "API health check completed" -} - -check_api_health -``` - -## Integration Examples - -### With build.func - -```bash -#!/usr/bin/env bash -# Integration with build.func - -source core.func -source api.func -source build.func - -# Set up API reporting -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" - -# Container creation with API reporting -create_container_with_reporting() { - local app="$1" - local ctid="$2" - - # Set container parameters - export APP="$app" - export CTID="$ctid" - export var_hostname="${app}-server" - export var_os="debian" - export var_version="12" - export var_cpu="2" - export var_ram="2048" - export var_disk="10" - export var_net="vmbr0" - export var_gateway="192.168.1.1" - export var_ip="192.168.1.$ctid" - export var_template_storage="local" - export var_container_storage="local" - - # Report installation start - post_to_api - - # Create container using build.func - if source build.func; then - echo "Container $app created successfully" - post_update_to_api "success" 0 - return 0 - else - echo "Container $app creation failed" - post_update_to_api "failed" $? - return 1 - fi -} - -# Create containers -create_container_with_reporting "plex" "100" -create_container_with_reporting "nextcloud" "101" -``` - -### With vm-core.func - -```bash -#!/usr/bin/env bash -# Integration with vm-core.func - -source core.func -source api.func -source vm-core.func - -# Set up VM API reporting -mkdir -p /usr/local/community-scripts -echo "DIAGNOSTICS=yes" > /usr/local/community-scripts/diagnostics - -export RANDOM_UUID="$(uuidgen)" - -# VM creation with API reporting -create_vm_with_reporting() { - local app="$1" - local vmid="$2" - - # Set VM parameters - export APP="$app" - export VMID="$vmid" - export var_hostname="${app}-vm" - export var_os="ubuntu" - export var_version="22.04" - export var_cpu="4" - export var_ram="4096" - export var_disk="20" - - # Report VM installation start - post_to_api_vm - - # Create VM using vm-core.func - if source vm-core.func; then - echo "VM $app created successfully" - post_update_to_api "success" 0 - return 0 - else - echo "VM $app creation failed" - post_update_to_api "failed" $? - return 1 - fi -} - -# Create VMs -create_vm_with_reporting "nextcloud" "200" -create_vm_with_reporting "wordpress" "201" -``` - -### With error_handler.func - -```bash -#!/usr/bin/env bash -# Integration with error_handler.func - -source core.func -source error_handler.func -source api.func - -# Enhanced error handling with API reporting -enhanced_error_handler() { - local exit_code=${1:-$?} - local command=${2:-${BASH_COMMAND:-unknown}} - - # Get error description from api.func - local error_msg=$(get_error_description $exit_code) - - # Display error information - echo "Error $exit_code: $error_msg" - echo "Command: $command" - - # Report error to API - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - post_update_to_api "failed" $exit_code - - # Use standard error handler - error_handler $exit_code $command -} - -# Set up enhanced error handling -trap 'enhanced_error_handler' ERR - -# Test enhanced error handling -nonexistent_command -``` - -## Best Practices Examples - -### Comprehensive API Integration - -```bash -#!/usr/bin/env bash -# Comprehensive API integration example - -source core.func -source api.func - -# Set up comprehensive API reporting -setup_api_reporting() { - # Enable diagnostics - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - - # Set common parameters - export CT_TYPE=1 - export DISK_SIZE=8 - export CORE_COUNT=2 - export RAM_SIZE=2048 - export var_os="debian" - export var_version="12" - export METHOD="install" - - echo "API reporting configured" -} - -# Installation with comprehensive reporting -install_with_comprehensive_reporting() { - local app="$1" - local ctid="$2" - - # Set up API reporting - setup_api_reporting - export NSAPP="$app" - - # Report installation start - post_to_api - - # Installation process - echo "Installing $app..." - - # Simulate installation steps - local steps=("Downloading" "Installing" "Configuring" "Starting") - for step in "${steps[@]}"; do - echo "$step $app..." - sleep 1 - done - - # Check installation result - if [[ $? -eq 0 ]]; then - echo "$app installation completed successfully" - post_update_to_api "success" 0 - return 0 - else - echo "$app installation failed" - post_update_to_api "failed" $? - return 1 - fi -} - -# Install multiple applications -apps=("plex" "nextcloud" "nginx" "mysql") -ctids=(100 101 102 103) - -for i in "${!apps[@]}"; do - install_with_comprehensive_reporting "${apps[$i]}" "${ctids[$i]}" - echo "---" -done -``` - -### Error Recovery with API Reporting - -```bash -#!/usr/bin/env bash -source api.func - -# Error recovery with API reporting -retry_with_api_reporting() { - local operation="$1" - local max_attempts=3 - local attempt=1 - - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - - while [[ $attempt -le $max_attempts ]]; do - echo "Attempt $attempt of $max_attempts: $operation" - - if $operation; then - echo "Operation succeeded on attempt $attempt" - post_update_to_api "success" 0 - return 0 - else - local exit_code=$? - local error_msg=$(get_error_description $exit_code) - echo "Attempt $attempt failed: $error_msg" - - post_update_to_api "failed" $exit_code - - ((attempt++)) - - if [[ $attempt -le $max_attempts ]]; then - echo "Retrying in 5 seconds..." - sleep 5 - fi - fi - done - - echo "Operation failed after $max_attempts attempts" - return 1 -} - -# Usage -retry_with_api_reporting "apt-get update" -retry_with_api_reporting "apt-get install -y package" -``` - -### API Reporting with Logging - -```bash -#!/usr/bin/env bash -source api.func - -# API reporting with detailed logging -install_with_logging_and_api() { - local app="$1" - local log_file="/var/log/${app}_installation.log" - - # Set up API reporting - export DIAGNOSTICS="yes" - export RANDOM_UUID="$(uuidgen)" - export NSAPP="$app" - - # Start logging - exec > >(tee -a "$log_file") - exec 2>&1 - - echo "Starting $app installation at $(date)" - - # Report installation start - post_to_api - - # Installation process - echo "Installing $app..." - - # Simulate installation - if install_app "$app"; then - echo "$app installation completed successfully at $(date)" - post_update_to_api "success" 0 - return 0 - else - local exit_code=$? - local error_msg=$(get_error_description $exit_code) - echo "$app installation failed at $(date): $error_msg" - post_update_to_api "failed" $exit_code - return $exit_code - fi -} - -# Mock installation function -install_app() { - local app="$1" - echo "Installing $app..." - sleep 2 - return 0 -} - -# Install with logging and API reporting -install_with_logging_and_api "plex" -``` diff --git a/docs/misc/api.func/README.md b/docs/misc/api.func/README.md deleted file mode 100644 index 6cf90d23d..000000000 --- a/docs/misc/api.func/README.md +++ /dev/null @@ -1,199 +0,0 @@ -# api.func Documentation - -## Overview - -The `api.func` file provides Proxmox API integration and diagnostic reporting functionality for the Community Scripts project. It handles API communication, error reporting, and status updates to the community-scripts.org API. - -## Purpose and Use Cases - -- **API Communication**: Send installation and status data to community-scripts.org API -- **Diagnostic Reporting**: Report installation progress and errors for analytics -- **Error Description**: Provide detailed error code explanations -- **Status Updates**: Track installation success/failure status -- **Analytics**: Contribute anonymous usage data for project improvement - -## Quick Reference - -### Key Function Groups -- **Error Handling**: `get_error_description()` - Convert exit codes to human-readable messages -- **API Communication**: `post_to_api()`, `post_to_api_vm()` - Send installation data -- **Status Updates**: `post_update_to_api()` - Report installation completion status - -### Dependencies -- **External**: `curl` command for HTTP requests -- **Internal**: Uses environment variables from other scripts - -### Integration Points -- Used by: All installation scripts for diagnostic reporting -- Uses: Environment variables from build.func and other scripts -- Provides: API communication and error reporting services - -## Documentation Files - -### 📊 [API_FLOWCHART.md](./API_FLOWCHART.md) -Visual execution flows showing API communication processes and error handling. - -### 📚 [API_FUNCTIONS_REFERENCE.md](./API_FUNCTIONS_REFERENCE.md) -Complete alphabetical reference of all functions with parameters, dependencies, and usage details. - -### 💡 [API_USAGE_EXAMPLES.md](./API_USAGE_EXAMPLES.md) -Practical examples showing how to use API functions and common patterns. - -### 🔗 [API_INTEGRATION.md](./API_INTEGRATION.md) -How api.func integrates with other components and provides API services. - -## Key Features - -### Error Code Descriptions -- **Comprehensive Coverage**: 50+ error codes with detailed explanations -- **LXC-Specific Errors**: Container creation and management errors -- **System Errors**: General system and network errors -- **Signal Errors**: Process termination and signal errors - -### API Communication -- **LXC Reporting**: Send LXC container installation data -- **VM Reporting**: Send VM installation data -- **Status Updates**: Report installation success/failure -- **Diagnostic Data**: Anonymous usage analytics - -### Diagnostic Integration -- **Optional Reporting**: Only sends data when diagnostics enabled -- **Privacy Respect**: Respects user privacy settings -- **Error Tracking**: Tracks installation errors for improvement -- **Usage Analytics**: Contributes to project statistics - -## Common Usage Patterns - -### Basic API Setup -```bash -#!/usr/bin/env bash -# Basic API setup - -source api.func - -# Set up diagnostic reporting -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" - -# Report installation start -post_to_api -``` - -### Error Reporting -```bash -#!/usr/bin/env bash -source api.func - -# Get error description -error_msg=$(get_error_description 127) -echo "Error 127: $error_msg" -# Output: Error 127: Command not found: Incorrect path or missing dependency. -``` - -### Status Updates -```bash -#!/usr/bin/env bash -source api.func - -# Report successful installation -post_update_to_api "success" 0 - -# Report failed installation -post_update_to_api "failed" 127 -``` - -## Environment Variables - -### Required Variables -- `DIAGNOSTICS`: Enable/disable diagnostic reporting ("yes"/"no") -- `RANDOM_UUID`: Unique identifier for tracking - -### Optional Variables -- `CT_TYPE`: Container type (1 for LXC, 2 for VM) -- `DISK_SIZE`: Disk size in GB -- `CORE_COUNT`: Number of CPU cores -- `RAM_SIZE`: RAM size in MB -- `var_os`: Operating system type -- `var_version`: OS version -- `DISABLEIP6`: IPv6 disable setting -- `NSAPP`: Namespace application name -- `METHOD`: Installation method - -### Internal Variables -- `POST_UPDATE_DONE`: Prevents duplicate status updates -- `API_URL`: Community scripts API endpoint -- `JSON_PAYLOAD`: API request payload -- `RESPONSE`: API response - -## Error Code Categories - -### General System Errors -- **0-9**: Basic system errors -- **18, 22, 28, 35**: Network and I/O errors -- **56, 60**: TLS/SSL errors -- **125-128**: Command execution errors -- **129-143**: Signal errors -- **152**: Resource limit errors -- **255**: Unknown critical errors - -### LXC-Specific Errors -- **100-101**: LXC installation errors -- **200-209**: LXC creation and management errors - -### Docker Errors -- **125**: Docker container start errors - -## Best Practices - -### Diagnostic Reporting -1. Always check if diagnostics are enabled -2. Respect user privacy settings -3. Use unique identifiers for tracking -4. Report both success and failure cases - -### Error Handling -1. Use appropriate error codes -2. Provide meaningful error descriptions -3. Handle API communication failures gracefully -4. Don't block installation on API failures - -### API Usage -1. Check for curl availability -2. Handle network failures gracefully -3. Use appropriate HTTP methods -4. Include all required data - -## Troubleshooting - -### Common Issues -1. **API Communication Fails**: Check network connectivity and curl availability -2. **Diagnostics Not Working**: Verify DIAGNOSTICS setting and RANDOM_UUID -3. **Missing Error Descriptions**: Check error code coverage -4. **Duplicate Updates**: POST_UPDATE_DONE prevents duplicates - -### Debug Mode -Enable diagnostic reporting for debugging: -```bash -export DIAGNOSTICS="yes" -export RANDOM_UUID="$(uuidgen)" -``` - -### API Testing -Test API communication: -```bash -source api.func -export DIAGNOSTICS="yes" -export RANDOM_UUID="test-$(date +%s)" -post_to_api -``` - -## Related Documentation - -- [core.func](../core.func/) - Core utilities and error handling -- [error_handler.func](../error_handler.func/) - Error handling utilities -- [build.func](../build.func/) - Container creation with API integration -- [tools.func](../tools.func/) - Extended utilities with API integration - ---- - -*This documentation covers the api.func file which provides API communication and diagnostic reporting for all Proxmox Community Scripts.* diff --git a/docs/misc/build.func/BUILD_FUNC_ADVANCED_SETTINGS.md b/docs/misc/build.func/BUILD_FUNC_ADVANCED_SETTINGS.md deleted file mode 100644 index aac584988..000000000 --- a/docs/misc/build.func/BUILD_FUNC_ADVANCED_SETTINGS.md +++ /dev/null @@ -1,164 +0,0 @@ -# Advanced Settings Wizard Reference - -## Overview - -The Advanced Settings wizard provides a 28-step interactive configuration for LXC container creation. It allows users to customize every aspect of the container while inheriting sensible defaults from the CT script. - -## Key Features - -- **Inherit App Defaults**: All `var_*` values from CT scripts pre-populate wizard fields -- **Back Navigation**: Press Cancel/Back to return to previous step -- **App Default Hints**: Each dialog shows `(App default: X)` to indicate script defaults -- **Full Customization**: Every configurable option is accessible - -## Wizard Steps - -| Step | Title | Variable(s) | Description | -| ---- | ------------------------ | --------------------------------- | ----------------------------------------------------- | -| 1 | Container Type | `var_unprivileged` | Privileged (0) or Unprivileged (1) container | -| 2 | Root Password | `var_pw` | Set password or use automatic login | -| 3 | Container ID | `var_ctid` | Unique container ID (auto-suggested) | -| 4 | Hostname | `var_hostname` | Container hostname | -| 5 | Disk Size | `var_disk` | Disk size in GB | -| 6 | CPU Cores | `var_cpu` | Number of CPU cores | -| 7 | RAM Size | `var_ram` | RAM size in MiB | -| 8 | Network Bridge | `var_brg` | Network bridge (vmbr0, etc.) | -| 9 | IPv4 Configuration | `var_net`, `var_gateway` | DHCP or static IP with gateway | -| 10 | IPv6 Configuration | `var_ipv6_method` | Auto, DHCP, Static, or None | -| 11 | MTU Size | `var_mtu` | Network MTU (default: 1500) | -| 12 | DNS Search Domain | `var_searchdomain` | DNS search domain | -| 13 | DNS Server | `var_ns` | Custom DNS server IP | -| 14 | MAC Address | `var_mac` | Custom MAC address (auto-generated if empty) | -| 15 | VLAN Tag | `var_vlan` | VLAN tag ID | -| 16 | Tags | `var_tags` | Container tags (comma/semicolon separated) | -| 17 | SSH Settings | `var_ssh` | SSH key selection and root access | -| 18 | FUSE Support | `var_fuse` | Enable FUSE for rclone, mergerfs, AppImage | -| 19 | TUN/TAP Support | `var_tun` | Enable for VPN apps (WireGuard, OpenVPN, Tailscale) | -| 20 | Nesting Support | `var_nesting` | Enable for Docker, LXC in LXC, Podman | -| 21 | GPU Passthrough | `var_gpu` | Auto-detect and pass through Intel/AMD/NVIDIA GPUs | -| 22 | Keyctl Support | `var_keyctl` | Enable for Docker, systemd-networkd | -| 23 | APT Cacher Proxy | `var_apt_cacher`, `var_apt_cacher_ip` | Use apt-cacher-ng for faster downloads | -| 24 | Container Timezone | `var_timezone` | Set timezone (e.g., Europe/Berlin) | -| 25 | Container Protection | `var_protection` | Prevent accidental deletion | -| 26 | Device Node Creation | `var_mknod` | Allow mknod (experimental, kernel 5.3+) | -| 27 | Mount Filesystems | `var_mount_fs` | Allow specific mounts: nfs, cifs, fuse, etc. | -| 28 | Verbose Mode & Confirm | `var_verbose` | Enable verbose output + final confirmation | - -## Default Value Inheritance - -The wizard inherits defaults from multiple sources: - -```text -CT Script (var_*) → default.vars → app.vars → User Input -``` - -### Example: VPN Container (alpine-wireguard.sh) - -```bash -# CT script sets: -var_tun="${var_tun:-1}" # TUN enabled by default - -# In Advanced Settings Step 19: -# Dialog shows: "(App default: 1)" and pre-selects "Yes" -``` - -### Example: Media Server (jellyfin.sh) - -```bash -# CT script sets: -var_gpu="${var_gpu:-yes}" # GPU enabled by default - -# In Advanced Settings Step 21: -# Dialog shows: "(App default: yes)" and pre-selects "Yes" -``` - -## Feature Matrix - -| Feature | Variable | When to Enable | -| ----------------- | ---------------- | --------------------------------------------------- | -| FUSE | `var_fuse` | rclone, mergerfs, AppImage, SSHFS | -| TUN/TAP | `var_tun` | WireGuard, OpenVPN, Tailscale, VPN containers | -| Nesting | `var_nesting` | Docker, Podman, LXC-in-LXC, systemd-nspawn | -| GPU Passthrough | `var_gpu` | Plex, Jellyfin, Emby, Frigate, Ollama, ComfyUI | -| Keyctl | `var_keyctl` | Docker (unprivileged), systemd-networkd | -| Protection | `var_protection` | Production containers, prevent accidental deletion | -| Mknod | `var_mknod` | Device node creation (experimental) | -| Mount FS | `var_mount_fs` | NFS mounts, CIFS shares, custom filesystems | -| APT Cacher | `var_apt_cacher` | Speed up downloads with local apt-cacher-ng | - -## Confirmation Summary - -Step 28 displays a comprehensive summary before creation: - -```text -Container Type: Unprivileged -Container ID: 100 -Hostname: jellyfin - -Resources: - Disk: 8 GB - CPU: 2 cores - RAM: 2048 MiB - -Network: - Bridge: vmbr0 - IPv4: dhcp - IPv6: auto - -Features: - FUSE: no | TUN: no - Nesting: Enabled | Keyctl: Disabled - GPU: yes | Protection: No - -Advanced: - Timezone: Europe/Berlin - APT Cacher: no - Verbose: no -``` - -## Usage Examples - -### Skip to Advanced Settings - -```bash -# Run script, select "Advanced" from menu -bash -c "$(curl -fsSL https://...jellyfin.sh)" -# Then select option 3 "Advanced" -``` - -### Pre-set Defaults via Environment - -```bash -# Set defaults before running -export var_cpu=4 -export var_ram=4096 -export var_gpu=yes -bash -c "$(curl -fsSL https://...jellyfin.sh)" -# Advanced settings will inherit these values -``` - -### Non-Interactive with All Options - -```bash -# Set all variables for fully automated deployment -export var_unprivileged=1 -export var_cpu=2 -export var_ram=2048 -export var_disk=8 -export var_net=dhcp -export var_fuse=no -export var_tun=no -export var_gpu=yes -export var_nesting=1 -export var_protection=no -export var_verbose=no -bash -c "$(curl -fsSL https://...jellyfin.sh)" -``` - -## Notes - -- **Cancel at Step 1**: Exits the script entirely -- **Cancel at Steps 2-28**: Goes back to previous step -- **Empty fields**: Use default value -- **Keyctl**: Automatically enabled for unprivileged containers -- **Nesting**: Enabled by default (required for many apps) diff --git a/docs/misc/build.func/BUILD_FUNC_ARCHITECTURE.md b/docs/misc/build.func/BUILD_FUNC_ARCHITECTURE.md deleted file mode 100644 index 1d9c5ed22..000000000 --- a/docs/misc/build.func/BUILD_FUNC_ARCHITECTURE.md +++ /dev/null @@ -1,410 +0,0 @@ -# build.func Architecture Guide - -## Overview - -This document provides a high-level architectural overview of `build.func`, including module dependencies, data flow, integration points, and system architecture. - -## High-Level Architecture - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Proxmox Host System │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ build.func │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────────┐ │ │ -│ │ │ Entry Point │ │ Configuration │ │ Container Creation │ │ │ -│ │ │ │ │ │ │ │ │ │ -│ │ │ • start() │ │ • variables() │ │ • build_container() │ │ │ -│ │ │ • install_ │ │ • base_ │ │ • create_lxc_container() │ │ │ -│ │ │ script() │ │ settings() │ │ • configure_gpu_ │ │ │ -│ │ │ • advanced_ │ │ • select_ │ │ passthrough() │ │ │ -│ │ │ settings() │ │ storage() │ │ • fix_gpu_gids() │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Module Dependencies │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────────┐ │ │ -│ │ │ core.func │ │ error_handler. │ │ api.func │ │ │ -│ │ │ │ │ func │ │ │ │ │ -│ │ │ • Basic │ │ • Error │ │ • Proxmox API │ │ │ -│ │ │ utilities │ │ handling │ │ interactions │ │ │ -│ │ │ • Common │ │ • Error │ │ • Container │ │ │ -│ │ │ functions │ │ recovery │ │ management │ │ │ -│ │ │ • System │ │ • Cleanup │ │ • Status │ │ │ -│ │ │ utilities │ │ functions │ │ monitoring │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────────────┘ │ │ -│ │ │ │ -│ │ ┌─────────────────────────────────────────────────────────────────────────┐ │ │ -│ │ │ tools.func │ │ │ -│ │ │ │ │ │ -│ │ │ • Additional utilities │ │ │ -│ │ │ • Helper functions │ │ │ -│ │ │ • System tools │ │ │ -│ │ └─────────────────────────────────────────────────────────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Module Dependencies - -### Core Dependencies - -``` -build.func Dependencies: -├── core.func -│ ├── Basic system utilities -│ ├── Common functions -│ ├── System information -│ └── File operations -├── error_handler.func -│ ├── Error handling -│ ├── Error recovery -│ ├── Cleanup functions -│ └── Error logging -├── api.func -│ ├── Proxmox API interactions -│ ├── Container management -│ ├── Status monitoring -│ └── Configuration updates -└── tools.func - ├── Additional utilities - ├── Helper functions - ├── System tools - └── Custom functions -``` - -### Dependency Flow - -``` -Dependency Flow: -├── build.func -│ ├── Sources core.func -│ ├── Sources error_handler.func -│ ├── Sources api.func -│ └── Sources tools.func -├── core.func -│ ├── Basic utilities -│ └── System functions -├── error_handler.func -│ ├── Error management -│ └── Recovery functions -├── api.func -│ ├── Proxmox integration -│ └── Container operations -└── tools.func - ├── Additional tools - └── Helper functions -``` - -## Data Flow Architecture - -### Configuration Data Flow - -``` -Configuration Data Flow: -├── Environment Variables -│ ├── Hard environment variables -│ ├── App-specific .vars -│ ├── Global default.vars -│ └── Built-in defaults -├── Variable Resolution -│ ├── Apply precedence chain -│ ├── Validate settings -│ └── Resolve conflicts -├── Configuration Storage -│ ├── Memory variables -│ ├── Temporary files -│ └── Persistent storage -└── Configuration Usage - ├── Container creation - ├── Feature configuration - └── Settings persistence -``` - -### Container Data Flow - -``` -Container Data Flow: -├── Input Data -│ ├── Configuration variables -│ ├── Resource specifications -│ ├── Network settings -│ └── Storage requirements -├── Processing -│ ├── Validation -│ ├── Conflict resolution -│ ├── Resource allocation -│ └── Configuration generation -├── Container Creation -│ ├── LXC container creation -│ ├── Network configuration -│ ├── Storage setup -│ └── Feature configuration -└── Output - ├── Container status - ├── Access information - ├── Configuration files - └── Log files -``` - -## Integration Architecture - -### With Proxmox System - -``` -Proxmox Integration: -├── Proxmox Host -│ ├── LXC container management -│ ├── Storage management -│ ├── Network management -│ └── Resource management -├── Proxmox API -│ ├── Container operations -│ ├── Configuration updates -│ ├── Status monitoring -│ └── Error handling -├── Proxmox Configuration -│ ├── /etc/pve/lxc/.conf -│ ├── Storage configuration -│ ├── Network configuration -│ └── Resource configuration -└── Proxmox Services - ├── Container services - ├── Network services - ├── Storage services - └── Monitoring services -``` - -### With Install Scripts - -``` -Install Script Integration: -├── build.func -│ ├── Creates container -│ ├── Configures basic settings -│ ├── Starts container -│ └── Provides access -├── Install Scripts -│ ├── -install.sh -│ ├── Downloads application -│ ├── Configures application -│ └── Sets up services -├── Container -│ ├── Running application -│ ├── Configured services -│ ├── Network access -│ └── Storage access -└── Integration Points - ├── Container creation - ├── Network configuration - ├── Storage setup - └── Service configuration -``` - -## System Architecture Components - -### Core Components - -``` -System Components: -├── Entry Point -│ ├── start() function -│ ├── Context detection -│ ├── Environment capture -│ └── Workflow routing -├── Configuration Management -│ ├── Variable resolution -│ ├── Settings persistence -│ ├── Default management -│ └── Validation -├── Container Creation -│ ├── LXC container creation -│ ├── Network configuration -│ ├── Storage setup -│ └── Feature configuration -├── Hardware Integration -│ ├── GPU passthrough -│ ├── USB passthrough -│ ├── Storage management -│ └── Network management -└── Error Handling - ├── Error detection - ├── Error recovery - ├── Cleanup functions - └── User notification -``` - -### User Interface Components - -``` -UI Components: -├── Menu System -│ ├── Installation mode selection -│ ├── Configuration menus -│ ├── Storage selection -│ └── GPU configuration -├── Interactive Elements -│ ├── Whiptail menus -│ ├── User prompts -│ ├── Confirmation dialogs -│ └── Error messages -├── Non-Interactive Mode -│ ├── Environment variable driven -│ ├── Silent execution -│ ├── Automated configuration -│ └── Error handling -└── Output - ├── Status messages - ├── Progress indicators - ├── Completion information - └── Access details -``` - -## Security Architecture - -### Security Considerations - -``` -Security Architecture: -├── Container Security -│ ├── Unprivileged containers (default) -│ ├── Privileged containers (when needed) -│ ├── Resource limits -│ └── Access controls -├── Network Security -│ ├── Network isolation -│ ├── VLAN support -│ ├── Firewall integration -│ └── Access controls -├── Storage Security -│ ├── Storage isolation -│ ├── Access controls -│ ├── Encryption support -│ └── Backup integration -├── GPU Security -│ ├── Device isolation -│ ├── Permission management -│ ├── Access controls -│ └── Security validation -└── API Security - ├── Authentication - ├── Authorization - ├── Input validation - └── Error handling -``` - -## Performance Architecture - -### Performance Considerations - -``` -Performance Architecture: -├── Execution Optimization -│ ├── Parallel operations -│ ├── Efficient algorithms -│ ├── Minimal user interaction -│ └── Optimized validation -├── Resource Management -│ ├── Memory efficiency -│ ├── CPU optimization -│ ├── Disk usage optimization -│ └── Network efficiency -├── Caching -│ ├── Configuration caching -│ ├── Template caching -│ ├── Storage caching -│ └── GPU detection caching -└── Monitoring - ├── Performance monitoring - ├── Resource monitoring - ├── Error monitoring - └── Status monitoring -``` - -## Deployment Architecture - -### Deployment Scenarios - -``` -Deployment Scenarios: -├── Single Container -│ ├── Individual application -│ ├── Standard configuration -│ ├── Basic networking -│ └── Standard storage -├── Multiple Containers -│ ├── Application stack -│ ├── Shared networking -│ ├── Shared storage -│ └── Coordinated deployment -├── High Availability -│ ├── Redundant containers -│ ├── Load balancing -│ ├── Failover support -│ └── Monitoring integration -└── Development Environment - ├── Development containers - ├── Testing containers - ├── Staging containers - └── Production containers -``` - -## Maintenance Architecture - -### Maintenance Components - -``` -Maintenance Architecture: -├── Updates -│ ├── Container updates -│ ├── Application updates -│ ├── Configuration updates -│ └── Security updates -├── Monitoring -│ ├── Container monitoring -│ ├── Resource monitoring -│ ├── Performance monitoring -│ └── Error monitoring -├── Backup -│ ├── Configuration backup -│ ├── Container backup -│ ├── Storage backup -│ └── Recovery procedures -└── Troubleshooting - ├── Error diagnosis - ├── Log analysis - ├── Performance analysis - └── Recovery procedures -``` - -## Future Architecture Considerations - -### Scalability - -``` -Scalability Considerations: -├── Horizontal Scaling -│ ├── Multiple containers -│ ├── Load balancing -│ ├── Distributed deployment -│ └── Resource distribution -├── Vertical Scaling -│ ├── Resource scaling -│ ├── Performance optimization -│ ├── Capacity planning -│ └── Resource management -├── Automation -│ ├── Automated deployment -│ ├── Automated scaling -│ ├── Automated monitoring -│ └── Automated recovery -└── Integration - ├── External systems - ├── Cloud integration - ├── Container orchestration - └── Service mesh -``` diff --git a/docs/misc/build.func/BUILD_FUNC_ENVIRONMENT_VARIABLES.md b/docs/misc/build.func/BUILD_FUNC_ENVIRONMENT_VARIABLES.md deleted file mode 100644 index d0c7bd94c..000000000 --- a/docs/misc/build.func/BUILD_FUNC_ENVIRONMENT_VARIABLES.md +++ /dev/null @@ -1,294 +0,0 @@ -# build.func Environment Variables Reference - -## Overview - -This document provides a comprehensive reference of all environment variables used in `build.func`, organized by category and usage context. - -## Variable Categories - -### Core Container Variables - -| Variable | Description | Default | Set In | Used In | -| --------- | -------------------------------------------- | --------- | ----------- | ------------------ | -| `APP` | Application name (e.g., "plex", "nextcloud") | - | Environment | Throughout | -| `NSAPP` | Namespace application name | `$APP` | Environment | Throughout | -| `CTID` | Container ID | - | Environment | Container creation | -| `CT_TYPE` | Container type ("install" or "update") | "install" | Environment | Entry point | -| `CT_NAME` | Container name | `$APP` | Environment | Container creation | - -### Operating System Variables - -| Variable | Description | Default | Set In | Used In | -| -------------- | -------------------------- | -------------- | --------------- | ------------------ | -| `var_os` | Operating system selection | "debian" | base_settings() | OS selection | -| `var_version` | OS version | "12" | base_settings() | Template selection | -| `var_template` | Template name | Auto-generated | base_settings() | Template download | - -### Resource Configuration Variables - -| Variable | Description | Default | Set In | Used In | -| ------------ | ----------------------- | ----------- | --------------- | ------------------ | -| `var_cpu` | CPU cores | "2" | base_settings() | Container creation | -| `var_ram` | RAM in MB | "2048" | base_settings() | Container creation | -| `var_disk` | Disk size in GB | "8" | base_settings() | Container creation | -| `DISK_SIZE` | Disk size (alternative) | `$var_disk` | Environment | Container creation | -| `CORE_COUNT` | CPU cores (alternative) | `$var_cpu` | Environment | Container creation | -| `RAM_SIZE` | RAM size (alternative) | `$var_ram` | Environment | Container creation | - -### Network Configuration Variables - -| Variable | Description | Default | Set In | Used In | -| ------------- | ------------------------------- | -------------- | --------------- | -------------- | -| `var_net` | Network interface | "vmbr0" | base_settings() | Network config | -| `var_bridge` | Bridge interface | "vmbr0" | base_settings() | Network config | -| `var_gateway` | Gateway IP | "192.168.1.1" | base_settings() | Network config | -| `var_ip` | Container IP address | - | User input | Network config | -| `var_ipv6` | IPv6 address | - | User input | Network config | -| `var_vlan` | VLAN ID | - | User input | Network config | -| `var_mtu` | MTU size | "1500" | base_settings() | Network config | -| `var_mac` | MAC address | Auto-generated | base_settings() | Network config | -| `NET` | Network interface (alternative) | `$var_net` | Environment | Network config | -| `BRG` | Bridge interface (alternative) | `$var_bridge` | Environment | Network config | -| `GATE` | Gateway IP (alternative) | `$var_gateway` | Environment | Network config | -| `IPV6_METHOD` | IPv6 configuration method | "none" | Environment | Network config | -| `VLAN` | VLAN ID (alternative) | `$var_vlan` | Environment | Network config | -| `MTU` | MTU size (alternative) | `$var_mtu` | Environment | Network config | -| `MAC` | MAC address (alternative) | `$var_mac` | Environment | Network config | - -### Storage Configuration Variables - -| Variable | Description | Default | Set In | Used In | -| ----------------------- | ------------------------------- | ------------------------ | ---------------- | ----------------- | -| `var_template_storage` | Storage for templates | - | select_storage() | Template storage | -| `var_container_storage` | Storage for container disks | - | select_storage() | Container storage | -| `TEMPLATE_STORAGE` | Template storage (alternative) | `$var_template_storage` | Environment | Template storage | -| `CONTAINER_STORAGE` | Container storage (alternative) | `$var_container_storage` | Environment | Container storage | - -### Feature Flags - -| Variable | Description | Default | Set In | Used In | -| ---------------- | ------------------------------ | ------- | ------------------------------- | ------------------ | -| `var_fuse` | Enable FUSE support | "no" | CT script / Advanced Settings | Container features | -| `var_tun` | Enable TUN/TAP support | "no" | CT script / Advanced Settings | Container features | -| `var_nesting` | Enable nesting support | "1" | CT script / Advanced Settings | Container features | -| `var_keyctl` | Enable keyctl support | "0" | CT script / Advanced Settings | Container features | -| `var_mknod` | Allow device node creation | "0" | CT script / Advanced Settings | Container features | -| `var_mount_fs` | Allowed filesystem mounts | "" | CT script / Advanced Settings | Container features | -| `var_protection` | Enable container protection | "no" | CT script / Advanced Settings | Container creation | -| `var_timezone` | Container timezone | "" | CT script / Advanced Settings | Container creation | -| `var_verbose` | Enable verbose output | "no" | Environment / Advanced Settings | Logging | -| `var_ssh` | Enable SSH key provisioning | "no" | CT script / Advanced Settings | SSH setup | -| `ENABLE_FUSE` | FUSE flag (internal) | "no" | Advanced Settings | Container creation | -| `ENABLE_TUN` | TUN/TAP flag (internal) | "no" | Advanced Settings | Container creation | -| `ENABLE_NESTING` | Nesting flag (internal) | "1" | Advanced Settings | Container creation | -| `ENABLE_KEYCTL` | Keyctl flag (internal) | "0" | Advanced Settings | Container creation | -| `ENABLE_MKNOD` | Mknod flag (internal) | "0" | Advanced Settings | Container creation | -| `PROTECT_CT` | Protection flag (internal) | "no" | Advanced Settings | Container creation | -| `CT_TIMEZONE` | Timezone setting (internal) | "" | Advanced Settings | Container creation | -| `VERBOSE` | Verbose mode flag | "no" | Environment | Logging | -| `SSH` | SSH access flag | "no" | Advanced Settings | SSH setup | - -### APT Cacher Configuration - -| Variable | Description | Default | Set In | Used In | -| ------------------ | ------------------------ | ------- | ----------------------------- | ------------------- | -| `var_apt_cacher` | Enable APT cacher proxy | "no" | CT script / Advanced Settings | Package management | -| `var_apt_cacher_ip`| APT cacher server IP | "" | CT script / Advanced Settings | Package management | -| `APT_CACHER` | APT cacher flag | "no" | Advanced Settings | Container creation | -| `APT_CACHER_IP` | APT cacher IP (internal) | "" | Advanced Settings | Container creation | - -### GPU Passthrough Variables - -| Variable | Description | Default | Set In | Used In | -| ------------ | ------------------------------- | ------- | ------------------------------------------- | ------------------ | -| `var_gpu` | Enable GPU passthrough | "no" | CT script / Environment / Advanced Settings | GPU passthrough | -| `ENABLE_GPU` | GPU passthrough flag (internal) | "no" | Advanced Settings | Container creation | - -**Note**: GPU passthrough is controlled via `var_gpu`. Apps that benefit from GPU acceleration (media servers, AI/ML, transcoding) have `var_gpu=yes` as default in their CT scripts. - -**Apps with GPU enabled by default**: - -- Media: jellyfin, plex, emby, channels, ersatztv, tunarr, immich -- Transcoding: tdarr, unmanic, fileflows -- AI/ML: ollama, openwebui -- NVR: frigate - -**Usage Examples**: - -```bash -# Disable GPU for a specific installation -var_gpu=no bash -c "$(curl -fsSL https://...jellyfin.sh)" - -# Enable GPU for apps without default GPU support -var_gpu=yes bash -c "$(curl -fsSL https://...debian.sh)" - -# Set in default.vars for all apps -echo "var_gpu=yes" >> /usr/local/community-scripts/default.vars -``` - -### API and Diagnostics Variables - -| Variable | Description | Default | Set In | Used In | -| ------------- | ------------------------ | --------- | ----------- | ----------------- | -| `DIAGNOSTICS` | Enable diagnostics mode | "false" | Environment | Diagnostics | -| `METHOD` | Installation method | "install" | Environment | Installation flow | -| `RANDOM_UUID` | Random UUID for tracking | - | Environment | Logging | -| `API_TOKEN` | Proxmox API token | - | Environment | API calls | -| `API_USER` | Proxmox API user | - | Environment | API calls | - -### Settings Persistence Variables - -| Variable | Description | Default | Set In | Used In | -| ------------------- | -------------------------- | ------------------------------------------------- | ----------- | -------------------- | -| `SAVE_DEFAULTS` | Save settings as defaults | "false" | User input | Settings persistence | -| `SAVE_APP_DEFAULTS` | Save app-specific defaults | "false" | User input | Settings persistence | -| `DEFAULT_VARS_FILE` | Path to default.vars | "/usr/local/community-scripts/default.vars" | Environment | Settings persistence | -| `APP_DEFAULTS_FILE` | Path to app.vars | "/usr/local/community-scripts/defaults/$APP.vars" | Environment | Settings persistence | - -## Variable Precedence Chain - -Variables are resolved in the following order (highest to lowest priority): - -1. **Hard Environment Variables**: Set before script execution -2. **App-specific .vars file**: `/usr/local/community-scripts/defaults/.vars` -3. **Global default.vars file**: `/usr/local/community-scripts/default.vars` -4. **Built-in defaults**: Set in `base_settings()` function - -## Critical Variables for Non-Interactive Use - -For silent/non-interactive execution, these variables must be set: - -```bash -# Core container settings -export APP="plex" -export CTID="100" -export var_hostname="plex-server" - -# OS selection -export var_os="debian" -export var_version="12" - -# Resource allocation -export var_cpu="4" -export var_ram="4096" -export var_disk="20" - -# Network configuration -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.100" - -# Storage selection -export var_template_storage="local" -export var_container_storage="local" - -# Feature flags -export ENABLE_FUSE="true" -export ENABLE_TUN="true" -export SSH="true" -``` - -## Environment Variable Usage Patterns - -### 1. Container Creation - -```bash -# Basic container creation -export APP="nextcloud" -export CTID="101" -export var_hostname="nextcloud-server" -export var_os="debian" -export var_version="12" -export var_cpu="2" -export var_ram="2048" -export var_disk="10" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.101" -export var_template_storage="local" -export var_container_storage="local" -``` - -### 2. GPU Passthrough - -```bash -# Enable GPU passthrough -export GPU_APPS="plex,jellyfin,emby" -export var_gpu="intel" -export ENABLE_PRIVILEGED="true" -``` - -### 3. Advanced Network Configuration - -```bash -# VLAN and IPv6 configuration -export var_vlan="100" -export var_ipv6="2001:db8::100" -export IPV6_METHOD="static" -export var_mtu="9000" -``` - -### 4. Storage Configuration - -```bash -# Custom storage locations -export var_template_storage="nfs-storage" -export var_container_storage="ssd-storage" -``` - -## Variable Validation - -The script validates variables at several points: - -1. **Container ID validation**: Must be unique and within valid range -2. **IP address validation**: Must be valid IPv4/IPv6 format -3. **Storage validation**: Must exist and support required content types -4. **Resource validation**: Must be within reasonable limits -5. **Network validation**: Must be valid network configuration - -## Common Variable Combinations - -### Development Container - -```bash -export APP="dev-container" -export CTID="200" -export var_hostname="dev-server" -export var_os="ubuntu" -export var_version="22.04" -export var_cpu="4" -export var_ram="4096" -export var_disk="20" -export ENABLE_NESTING="true" -export ENABLE_PRIVILEGED="true" -``` - -### Media Server with GPU - -```bash -export APP="plex" -export CTID="300" -export var_hostname="plex-server" -export var_os="debian" -export var_version="12" -export var_cpu="6" -export var_ram="8192" -export var_disk="50" -export GPU_APPS="plex" -export var_gpu="nvidia" -export ENABLE_PRIVILEGED="true" -``` - -### Lightweight Service - -```bash -export APP="nginx" -export CTID="400" -export var_hostname="nginx-proxy" -export var_os="alpine" -export var_version="3.18" -export var_cpu="1" -export var_ram="512" -export var_disk="2" -export ENABLE_UNPRIVILEGED="true" -``` diff --git a/docs/misc/build.func/BUILD_FUNC_EXECUTION_FLOWS.md b/docs/misc/build.func/BUILD_FUNC_EXECUTION_FLOWS.md deleted file mode 100644 index 47a0035e2..000000000 --- a/docs/misc/build.func/BUILD_FUNC_EXECUTION_FLOWS.md +++ /dev/null @@ -1,413 +0,0 @@ -# build.func Execution Flows - -## Overview - -This document details the execution flows for different installation modes and scenarios in `build.func`, including variable precedence, decision trees, and workflow patterns. - -## Installation Modes - -### 1. Default Install Flow - -**Purpose**: Uses built-in defaults with minimal user interaction -**Use Case**: Quick container creation with standard settings - -``` -Default Install Flow: -├── start() -│ ├── Detect execution context -│ ├── Capture hard environment variables -│ └── Set CT_TYPE="install" -├── install_script() -│ ├── Display installation mode menu -│ ├── User selects "Default Install" -│ └── Proceed with defaults -├── variables() -│ ├── base_settings() # Set built-in defaults -│ ├── Load app.vars (if exists) -│ ├── Load default.vars (if exists) -│ └── Apply variable precedence -├── build_container() -│ ├── validate_settings() -│ ├── check_conflicts() -│ └── create_lxc_container() -└── default_var_settings() - └── Offer to save as defaults -``` - -**Key Characteristics**: -- Minimal user prompts -- Uses built-in defaults -- Fast execution -- Suitable for standard deployments - -### 2. Advanced Install Flow - -**Purpose**: Full interactive configuration via whiptail menus -**Use Case**: Custom container configuration with full control - -``` -Advanced Install Flow: -├── start() -│ ├── Detect execution context -│ ├── Capture hard environment variables -│ └── Set CT_TYPE="install" -├── install_script() -│ ├── Display installation mode menu -│ ├── User selects "Advanced Install" -│ └── Proceed with advanced configuration -├── variables() -│ ├── base_settings() # Set built-in defaults -│ ├── Load app.vars (if exists) -│ ├── Load default.vars (if exists) -│ └── Apply variable precedence -├── advanced_settings() -│ ├── OS Selection Menu -│ ├── Resource Configuration Menu -│ ├── Network Configuration Menu -│ ├── select_storage() -│ │ ├── resolve_storage_preselect() -│ │ └── choose_and_set_storage_for_file() -│ ├── GPU Configuration Menu -│ │ └── detect_gpu_devices() -│ └── Feature Flags Menu -├── build_container() -│ ├── validate_settings() -│ ├── check_conflicts() -│ └── create_lxc_container() -└── default_var_settings() - └── Offer to save as defaults -``` - -**Key Characteristics**: -- Full interactive configuration -- Whiptail menus for all options -- Complete control over settings -- Suitable for custom deployments - -### 3. My Defaults Flow - -**Purpose**: Loads settings from global default.vars file -**Use Case**: Using previously saved global defaults - -``` -My Defaults Flow: -├── start() -│ ├── Detect execution context -│ ├── Capture hard environment variables -│ └── Set CT_TYPE="install" -├── install_script() -│ ├── Display installation mode menu -│ ├── User selects "My Defaults" -│ └── Proceed with loaded defaults -├── variables() -│ ├── base_settings() # Set built-in defaults -│ ├── Load app.vars (if exists) -│ ├── Load default.vars # Load global defaults -│ └── Apply variable precedence -├── build_container() -│ ├── validate_settings() -│ ├── check_conflicts() -│ └── create_lxc_container() -└── default_var_settings() - └── Offer to save as defaults -``` - -**Key Characteristics**: -- Uses global default.vars file -- Minimal user interaction -- Consistent with previous settings -- Suitable for repeated deployments - -### 4. App Defaults Flow - -**Purpose**: Loads settings from app-specific .vars file -**Use Case**: Using previously saved app-specific defaults - -``` -App Defaults Flow: -├── start() -│ ├── Detect execution context -│ ├── Capture hard environment variables -│ └── Set CT_TYPE="install" -├── install_script() -│ ├── Display installation mode menu -│ ├── User selects "App Defaults" -│ └── Proceed with app-specific defaults -├── variables() -│ ├── base_settings() # Set built-in defaults -│ ├── Load app.vars # Load app-specific defaults -│ ├── Load default.vars (if exists) -│ └── Apply variable precedence -├── build_container() -│ ├── validate_settings() -│ ├── check_conflicts() -│ └── create_lxc_container() -└── default_var_settings() - └── Offer to save as defaults -``` - -**Key Characteristics**: -- Uses app-specific .vars file -- Minimal user interaction -- App-optimized settings -- Suitable for app-specific deployments - -## Variable Precedence Chain - -### Precedence Order (Highest to Lowest) - -1. **Hard Environment Variables**: Set before script execution -2. **App-specific .vars file**: `/usr/local/community-scripts/defaults/.vars` -3. **Global default.vars file**: `/usr/local/community-scripts/default.vars` -4. **Built-in defaults**: Set in `base_settings()` function - -### Variable Resolution Process - -``` -Variable Resolution: -├── Capture hard environment variables at start() -├── Load built-in defaults in base_settings() -├── Load global default.vars (if exists) -├── Load app-specific .vars (if exists) -└── Apply precedence chain - ├── Hard env vars override all - ├── App.vars override default.vars and built-ins - ├── Default.vars override built-ins - └── Built-ins are fallback defaults -``` - -## Storage Selection Logic - -### Storage Resolution Flow - -``` -Storage Selection: -├── Check if storage is preselected -│ ├── var_template_storage set? → Validate and use -│ └── var_container_storage set? → Validate and use -├── Count available storage options -│ ├── Only 1 option → Auto-select -│ └── Multiple options → Prompt user -├── User selection via whiptail -│ ├── Template storage selection -│ └── Container storage selection -└── Validate selected storage - ├── Check availability - ├── Check content type support - └── Proceed with selection -``` - -### Storage Validation - -``` -Storage Validation: -├── Check storage exists -├── Check storage is online -├── Check content type support -│ ├── Template storage: vztmpl support -│ └── Container storage: rootdir support -├── Check available space -└── Validate permissions -``` - -## GPU Passthrough Flow - -### GPU Detection and Configuration - -``` -GPU Passthrough Flow: -├── detect_gpu_devices() -│ ├── Scan for Intel GPUs -│ │ ├── Check i915 driver -│ │ └── Detect devices -│ ├── Scan for AMD GPUs -│ │ ├── Check AMDGPU driver -│ │ └── Detect devices -│ └── Scan for NVIDIA GPUs -│ ├── Check NVIDIA driver -│ ├── Detect devices -│ └── Check CUDA support -├── Check GPU passthrough eligibility -│ ├── Is app in GPU_APPS list? -│ ├── Is container privileged? -│ └── Proceed if eligible -├── GPU selection logic -│ ├── Single GPU type → Auto-select -│ └── Multiple GPU types → Prompt user -├── configure_gpu_passthrough() -│ ├── Add GPU device entries -│ ├── Configure permissions -│ └── Update container config -└── fix_gpu_gids() - ├── Update GPU group IDs - └── Configure access permissions -``` - -### GPU Eligibility Check - -``` -GPU Eligibility: -├── Check app support -│ ├── Is APP in GPU_APPS list? -│ └── Proceed if supported -├── Check container privileges -│ ├── Is ENABLE_PRIVILEGED="true"? -│ └── Proceed if privileged -└── Check hardware availability - ├── Are GPUs detected? - └── Proceed if available -``` - -## Network Configuration Flow - -### Network Setup Process - -``` -Network Configuration: -├── Basic network settings -│ ├── var_net (network interface) -│ ├── var_bridge (bridge interface) -│ └── var_gateway (gateway IP) -├── IP configuration -│ ├── var_ip (IPv4 address) -│ ├── var_ipv6 (IPv6 address) -│ └── IPV6_METHOD (IPv6 method) -├── Advanced network settings -│ ├── var_vlan (VLAN ID) -│ ├── var_mtu (MTU size) -│ └── var_mac (MAC address) -└── Network validation - ├── Check IP format - ├── Check gateway reachability - └── Validate network configuration -``` - -## Container Creation Flow - -### LXC Container Creation Process - -``` -Container Creation: -├── create_lxc_container() -│ ├── Create basic container -│ ├── Configure network -│ ├── Set up storage -│ ├── Configure features -│ ├── Set resource limits -│ ├── Configure startup -│ └── Start container -├── Post-creation configuration -│ ├── Wait for network -│ ├── Configure GPU (if enabled) -│ ├── Set up SSH keys -│ └── Run post-install scripts -└── Finalization - ├── Display container info - ├── Show access details - └── Provide next steps -``` - -## Error Handling Flows - -### Validation Error Flow - -``` -Validation Error Flow: -├── validate_settings() -│ ├── Check configuration validity -│ └── Return error if invalid -├── check_conflicts() -│ ├── Check for conflicts -│ └── Return error if conflicts found -├── Error handling -│ ├── Display error message -│ ├── cleanup_on_error() -│ └── Exit with error code -└── User notification - ├── Show error details - └── Suggest fixes -``` - -### Storage Error Flow - -``` -Storage Error Flow: -├── Storage selection fails -├── Retry storage selection -│ ├── Show available options -│ └── Allow user to retry -├── Storage validation fails -│ ├── Show validation errors -│ └── Allow user to fix -└── Fallback to default storage - ├── Use fallback storage - └── Continue with creation -``` - -### GPU Error Flow - -``` -GPU Error Flow: -├── GPU detection fails -├── Fall back to no GPU -│ ├── Disable GPU passthrough -│ └── Continue without GPU -├── GPU configuration fails -│ ├── Show configuration errors -│ └── Allow user to retry -└── GPU permission errors - ├── Fix GPU permissions - └── Retry configuration -``` - -## Integration Flows - -### With Install Scripts - -``` -Install Script Integration: -├── build.func creates container -├── Container starts successfully -├── Install script execution -│ ├── Download and install app -│ ├── Configure app settings -│ └── Set up services -└── Post-installation configuration - ├── Verify installation - ├── Configure access - └── Display completion info -``` - -### With Proxmox API - -``` -Proxmox API Integration: -├── API authentication -├── Container creation via API -├── Configuration updates via API -├── Status monitoring via API -└── Error handling via API -``` - -## Performance Considerations - -### Execution Time Optimization - -``` -Performance Optimization: -├── Parallel operations where possible -├── Minimal user interaction in default mode -├── Efficient storage selection -├── Optimized GPU detection -└── Streamlined validation -``` - -### Resource Usage - -``` -Resource Usage: -├── Minimal memory footprint -├── Efficient disk usage -├── Optimized network usage -└── Minimal CPU overhead -``` diff --git a/docs/misc/build.func/BUILD_FUNC_FLOWCHART.md b/docs/misc/build.func/BUILD_FUNC_FLOWCHART.md deleted file mode 100644 index e406f46fd..000000000 --- a/docs/misc/build.func/BUILD_FUNC_FLOWCHART.md +++ /dev/null @@ -1,244 +0,0 @@ -# build.func Execution Flowchart - -## Main Execution Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ START() │ -│ Entry point when build.func is sourced or executed │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Check Environment │ -│ • Detect if running on Proxmox host vs inside container │ -│ • Capture hard environment variables │ -│ • Set CT_TYPE based on context │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Determine Action │ -│ • If CT_TYPE="update" → update_script() │ -│ • If CT_TYPE="install" → install_script() │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ INSTALL_SCRIPT() │ -│ Main container creation workflow │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Installation Mode Selection │ -│ │ -│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────┐ │ -│ │ Default │ │ Advanced │ │ My Defaults │ │ App Defaults│ │ -│ │ Install │ │ Install │ │ │ │ │ │ -│ │ │ │ │ │ │ │ │ │ -│ │ • Use built-in │ │ • Full whiptail │ │ • Load from │ │ • Load from │ │ -│ │ defaults │ │ menus │ │ default.vars │ │ app.vars │ │ -│ │ • Minimal │ │ • Interactive │ │ • Override │ │ • App- │ │ -│ │ prompts │ │ configuration │ │ built-ins │ │ specific │ │ -│ └─────────────────┘ └─────────────────┘ └─────────────────┘ └─────────────┘ │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ VARIABLES() │ -│ • Load variable precedence chain: │ -│ 1. Hard environment variables │ -│ 2. App-specific .vars file │ -│ 3. Global default.vars file │ -│ 4. Built-in defaults in base_settings() │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ BASE_SETTINGS() │ -│ • Set core container parameters │ -│ • Configure OS selection │ -│ • Set resource defaults (CPU, RAM, Disk) │ -│ • Configure network defaults │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Storage Selection Logic │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ SELECT_STORAGE() │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────┐ │ │ -│ │ │ Template │ │ Container │ │ Resolution │ │ │ -│ │ │ Storage │ │ Storage │ │ Logic │ │ │ -│ │ │ │ │ │ │ │ │ │ -│ │ │ • Check if │ │ • Check if │ │ 1. Only 1 storage │ │ │ -│ │ │ preselected │ │ preselected │ │ → Auto-select │ │ │ -│ │ │ • Validate │ │ • Validate │ │ 2. Preselected │ │ │ -│ │ │ availability │ │ availability │ │ → Validate & use │ │ │ -│ │ │ • Prompt if │ │ • Prompt if │ │ 3. Multiple options │ │ │ -│ │ │ needed │ │ needed │ │ → Prompt user │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ BUILD_CONTAINER() │ -│ • Validate all settings │ -│ • Check for conflicts │ -│ • Prepare container configuration │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ CREATE_LXC_CONTAINER() │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Container Creation Process │ │ -│ │ │ │ -│ │ 1. Create LXC container with basic configuration │ │ -│ │ 2. Configure network settings │ │ -│ │ 3. Set up storage and mount points │ │ -│ │ 4. Configure features (FUSE, TUN, etc.) │ │ -│ │ 5. Set resource limits │ │ -│ │ 6. Configure startup options │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ GPU Passthrough Decision Tree │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ DETECT_GPU_DEVICES() │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────┐ │ │ -│ │ │ Intel GPU │ │ AMD GPU │ │ NVIDIA GPU │ │ │ -│ │ │ │ │ │ │ │ │ │ -│ │ │ • Check i915 │ │ • Check AMDGPU │ │ • Check NVIDIA │ │ │ -│ │ │ driver │ │ driver │ │ driver │ │ │ -│ │ │ • Detect │ │ • Detect │ │ • Detect devices │ │ │ -│ │ │ devices │ │ devices │ │ • Check CUDA support │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ GPU Selection Logic │ │ -│ │ │ │ -│ │ • Is app in GPU_APPS list? OR Is container privileged? │ │ -│ │ └─ YES → Proceed with GPU configuration │ │ -│ │ └─ NO → Skip GPU passthrough │ │ -│ │ │ │ -│ │ • Single GPU type detected? │ │ -│ │ └─ YES → Auto-select and configure │ │ -│ │ └─ NO → Prompt user for selection │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ CONFIGURE_GPU_PASSTHROUGH() │ -│ • Add GPU device entries to /etc/pve/lxc/.conf │ -│ • Configure proper device permissions │ -│ • Set up device mapping │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Container Finalization │ -│ • Start container │ -│ • Wait for network connectivity │ -│ • Fix GPU GIDs (if GPU passthrough enabled) │ -│ • Configure SSH keys (if enabled) │ -│ • Run post-installation scripts │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Settings Persistence │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ DEFAULT_VAR_SETTINGS() │ │ -│ │ │ │ -│ │ • Offer to save current settings as defaults │ │ -│ │ • Save to /usr/local/community-scripts/default.vars │ │ -│ │ • Save to /usr/local/community-scripts/defaults/.vars │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ COMPLETION │ -│ • Display container information │ -│ • Show access details │ -│ • Provide next steps │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Key Decision Points - -### 1. Installation Mode Selection -- **Default**: Uses built-in defaults, minimal user interaction -- **Advanced**: Full interactive configuration via whiptail menus -- **My Defaults**: Loads settings from global default.vars file -- **App Defaults**: Loads settings from app-specific .vars file - -### 2. Storage Selection Logic -``` -Storage Selection Flow: -├── Check if storage is preselected via environment variables -│ ├── YES → Validate availability and use -│ └── NO → Continue to resolution logic -├── Count available storage options for content type -│ ├── Only 1 option → Auto-select -│ └── Multiple options → Prompt user via whiptail -└── Validate selected storage and proceed -``` - -### 3. GPU Passthrough Decision Tree -``` -GPU Passthrough Flow: -├── Detect available GPU hardware -│ ├── Intel GPU detected -│ ├── AMD GPU detected -│ └── NVIDIA GPU detected -├── Check if GPU passthrough should be enabled -│ ├── App is in GPU_APPS list? → YES -│ ├── Container is privileged? → YES -│ └── Neither? → Skip GPU passthrough -├── Configure GPU passthrough -│ ├── Single GPU type → Auto-configure -│ └── Multiple GPU types → Prompt user -└── Fix GPU GIDs post-creation -``` - -### 4. Variable Precedence Chain -``` -Variable Resolution Order: -1. Hard environment variables (captured at start) -2. App-specific .vars file (/usr/local/community-scripts/defaults/.vars) -3. Global default.vars file (/usr/local/community-scripts/default.vars) -4. Built-in defaults in base_settings() function -``` - -## Error Handling Flow - -``` -Error Handling: -├── Validation errors → Display error message and exit -├── Storage errors → Retry storage selection -├── Network errors → Retry network configuration -├── GPU errors → Fall back to no GPU passthrough -└── Container creation errors → Cleanup and exit -``` - -## Integration Points - -- **Core Functions**: Depends on core.func for basic utilities -- **Error Handling**: Uses error_handler.func for error management -- **API Functions**: Uses api.func for Proxmox API interactions -- **Tools**: Uses tools.func for additional utilities -- **Install Scripts**: Integrates with -install.sh scripts diff --git a/docs/misc/build.func/BUILD_FUNC_FUNCTIONS_REFERENCE.md b/docs/misc/build.func/BUILD_FUNC_FUNCTIONS_REFERENCE.md deleted file mode 100644 index 2e605e085..000000000 --- a/docs/misc/build.func/BUILD_FUNC_FUNCTIONS_REFERENCE.md +++ /dev/null @@ -1,489 +0,0 @@ -# build.func Functions Reference - -## Overview - -This document provides a comprehensive reference of all functions in `build.func`, organized alphabetically with detailed descriptions, parameters, and usage information. - -## Function Categories - -### Initialization Functions - -#### `start()` - -**Purpose**: Main entry point when build.func is sourced or executed -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Detects execution context (Proxmox host vs container) -- Captures hard environment variables -- Sets CT_TYPE based on context -- Routes to appropriate workflow (install_script or update_script) - **Dependencies**: None - **Environment Variables Used**: `CT_TYPE`, `APP`, `CTID` - -#### `variables()` - -**Purpose**: Load and resolve all configuration variables using precedence chain -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Loads app-specific .vars file -- Loads global default.vars file -- Applies variable precedence chain -- Sets all configuration variables - **Dependencies**: `base_settings()` - **Environment Variables Used**: All configuration variables - -#### `base_settings()` - -**Purpose**: Set built-in default values for all configuration variables -**Parameters**: None -**Returns**: None -**Side Effects**: Sets default values for all variables -**Dependencies**: None -**Environment Variables Used**: All configuration variables - -### UI and Menu Functions - -#### `install_script()` - -**Purpose**: Main installation workflow coordinator -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Displays installation mode selection menu -- Coordinates the entire installation process -- Handles user interaction and validation - **Dependencies**: `variables()`, `build_container()`, `default_var_settings()` - **Environment Variables Used**: `APP`, `CTID`, `var_hostname` - -#### `advanced_settings()` - -**Purpose**: Provide advanced configuration options via whiptail menus -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Displays whiptail menus for configuration -- Updates configuration variables based on user input -- Validates user selections - **Dependencies**: `select_storage()`, `detect_gpu_devices()` - **Environment Variables Used**: All configuration variables - -#### `settings_menu()` - -**Purpose**: Display and handle settings configuration menu -**Parameters**: None -**Returns**: None -**Side Effects**: Updates configuration variables -**Dependencies**: `advanced_settings()` -**Environment Variables Used**: All configuration variables - -### Storage Functions - -#### `select_storage()` - -**Purpose**: Handle storage selection for templates and containers -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Resolves storage preselection -- Prompts user for storage selection if needed -- Validates storage availability -- Sets var_template_storage and var_container_storage - **Dependencies**: `resolve_storage_preselect()`, `choose_and_set_storage_for_file()` - **Environment Variables Used**: `var_template_storage`, `var_container_storage`, `TEMPLATE_STORAGE`, `CONTAINER_STORAGE` - -#### `resolve_storage_preselect()` - -**Purpose**: Resolve preselected storage options -**Parameters**: - -- `storage_type`: Type of storage (template or container) - **Returns**: Storage name if valid, empty if invalid - **Side Effects**: Validates storage availability - **Dependencies**: None - **Environment Variables Used**: `var_template_storage`, `var_container_storage` - -#### `choose_and_set_storage_for_file()` - -**Purpose**: Interactive storage selection via whiptail -**Parameters**: - -- `storage_type`: Type of storage (template or container) -- `content_type`: Content type (vztmpl or rootdir) - **Returns**: None - **Side Effects**: -- Displays whiptail menu -- Updates storage variables -- Validates selection - **Dependencies**: None - **Environment Variables Used**: `var_template_storage`, `var_container_storage` - -### Container Creation Functions - -#### `build_container()` - -**Purpose**: Validate settings and prepare container creation -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Validates all configuration -- Checks for conflicts -- Prepares container configuration -- Calls create_lxc_container() - **Dependencies**: `create_lxc_container()` - **Environment Variables Used**: All configuration variables - -#### `create_lxc_container()` - -**Purpose**: Create the actual LXC container -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Creates LXC container with basic configuration -- Configures network settings -- Sets up storage and mount points -- Configures features (FUSE, TUN, etc.) -- Sets resource limits -- Configures startup options -- Starts container - **Dependencies**: `configure_gpu_passthrough()`, `fix_gpu_gids()` - **Environment Variables Used**: All configuration variables - -### GPU and Hardware Functions - -#### `detect_gpu_devices()` - -**Purpose**: Detect available GPU hardware on the system -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Scans for Intel, AMD, and NVIDIA GPUs -- Updates var_gpu_type and var_gpu_devices -- Determines GPU capabilities - **Dependencies**: None - **Environment Variables Used**: `var_gpu_type`, `var_gpu_devices`, `GPU_APPS` - -#### `configure_gpu_passthrough()` - -**Purpose**: Configure GPU passthrough for the container -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Adds GPU device entries to container config -- Configures proper device permissions -- Sets up device mapping -- Updates /etc/pve/lxc/.conf - **Dependencies**: `detect_gpu_devices()` - **Environment Variables Used**: `var_gpu`, `var_gpu_type`, `var_gpu_devices`, `CTID` - -#### `fix_gpu_gids()` - -**Purpose**: Fix GPU group IDs after container creation -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Updates GPU group IDs in container -- Ensures proper GPU access permissions -- Configures video and render groups - **Dependencies**: `configure_gpu_passthrough()` - **Environment Variables Used**: `CTID`, `var_gpu_type` - -### SSH Configuration Functions - -#### `configure_ssh_settings()` - -**Purpose**: Interactive SSH key and access configuration wizard -**Parameters**: - -- `step_info` (optional): Step indicator string (e.g., "Step 17/19") for consistent dialog headers - **Returns**: None - **Side Effects**: -- Creates temporary file for SSH keys -- Discovers and presents available SSH keys from host -- Allows manual key entry or folder/glob scanning -- Sets `SSH` variable to "yes" or "no" based on user selection -- Sets `SSH_AUTHORIZED_KEY` if manual key provided -- Populates `SSH_KEYS_FILE` with selected keys - **Dependencies**: `ssh_discover_default_files()`, `ssh_build_choices_from_files()` - **Environment Variables Used**: `SSH`, `SSH_AUTHORIZED_KEY`, `SSH_KEYS_FILE` - -**SSH Key Source Options**: - -1. `found` - Select from auto-detected host keys -2. `manual` - Paste a single public key -3. `folder` - Scan custom folder or glob pattern -4. `none` - No SSH keys - -**Note**: The "Enable root SSH access?" dialog is always shown, regardless of whether SSH keys or password are configured. This ensures users can always enable SSH access even with automatic login. - -#### `ssh_discover_default_files()` - -**Purpose**: Discover SSH public key files on the host system -**Parameters**: None -**Returns**: Array of discovered key file paths -**Side Effects**: Scans common SSH key locations -**Dependencies**: None -**Environment Variables Used**: `var_ssh_import_glob` - -#### `ssh_build_choices_from_files()` - -**Purpose**: Build whiptail checklist choices from SSH key files -**Parameters**: - -- Array of file paths to process - **Returns**: None - **Side Effects**: -- Sets `CHOICES` array for whiptail checklist -- Sets `COUNT` variable with number of keys found -- Creates `MAPFILE` for key tag to content mapping - **Dependencies**: None - **Environment Variables Used**: `CHOICES`, `COUNT`, `MAPFILE` - -### Settings Persistence Functions - -#### `default_var_settings()` - -**Purpose**: Offer to save current settings as defaults -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Prompts user to save settings -- Saves to default.vars file -- Saves to app-specific .vars file - **Dependencies**: `maybe_offer_save_app_defaults()` - **Environment Variables Used**: All configuration variables - -#### `maybe_offer_save_app_defaults()` - -**Purpose**: Offer to save app-specific defaults -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Prompts user to save app-specific settings -- Saves to app.vars file -- Updates app-specific configuration - **Dependencies**: None - **Environment Variables Used**: `APP`, `SAVE_APP_DEFAULTS` - -### Utility Functions - -#### `validate_settings()` - -**Purpose**: Validate all configuration settings -**Parameters**: None -**Returns**: 0 if valid, 1 if invalid -**Side Effects**: - -- Checks for configuration conflicts -- Validates resource limits -- Validates network configuration -- Validates storage configuration - **Dependencies**: None - **Environment Variables Used**: All configuration variables - -#### `check_conflicts()` - -**Purpose**: Check for configuration conflicts -**Parameters**: None -**Returns**: 0 if no conflicts, 1 if conflicts found -**Side Effects**: - -- Checks for conflicting settings -- Validates resource allocation -- Checks network configuration - **Dependencies**: None - **Environment Variables Used**: All configuration variables - -#### `cleanup_on_error()` - -**Purpose**: Clean up resources on error -**Parameters**: None -**Returns**: None -**Side Effects**: - -- Removes partially created containers -- Cleans up temporary files -- Resets configuration - **Dependencies**: None - **Environment Variables Used**: `CTID` - -## Function Call Flow - -### Main Installation Flow - -``` -start() -├── variables() -│ ├── base_settings() -│ ├── Load app.vars -│ └── Load default.vars -├── install_script() -│ ├── advanced_settings() -│ │ ├── select_storage() -│ │ │ ├── resolve_storage_preselect() -│ │ │ └── choose_and_set_storage_for_file() -│ │ └── detect_gpu_devices() -│ ├── build_container() -│ │ ├── validate_settings() -│ │ ├── check_conflicts() -│ │ └── create_lxc_container() -│ │ ├── configure_gpu_passthrough() -│ │ └── fix_gpu_gids() -│ └── default_var_settings() -│ └── maybe_offer_save_app_defaults() -``` - -### Error Handling Flow - -``` -Error Detection -├── validate_settings() -│ └── check_conflicts() -├── Error Handling -│ └── cleanup_on_error() -└── Exit with error code -``` - -## Function Dependencies - -### Core Dependencies - -- `start()` → `install_script()` → `build_container()` → `create_lxc_container()` -- `variables()` → `base_settings()` -- `advanced_settings()` → `select_storage()` → `detect_gpu_devices()` - -### Storage Dependencies - -- `select_storage()` → `resolve_storage_preselect()` -- `select_storage()` → `choose_and_set_storage_for_file()` - -### GPU Dependencies - -- `configure_gpu_passthrough()` → `detect_gpu_devices()` -- `fix_gpu_gids()` → `configure_gpu_passthrough()` - -### Settings Dependencies - -- `default_var_settings()` → `maybe_offer_save_app_defaults()` - -## Function Usage Examples - -### Basic Container Creation - -```bash -# Set required variables -export APP="plex" -export CTID="100" -export var_hostname="plex-server" - -# Call main functions -start() # Entry point -# → variables() # Load configuration -# → install_script() # Main workflow -# → build_container() # Create container -# → create_lxc_container() # Actual creation -``` - -### Advanced Configuration - -```bash -# Set advanced variables -export var_os="debian" -export var_version="12" -export var_cpu="4" -export var_ram="4096" -export var_disk="20" - -# Call advanced functions -advanced_settings() # Interactive configuration -# → select_storage() # Storage selection -# → detect_gpu_devices() # GPU detection -``` - -### GPU Passthrough - -```bash -# Enable GPU passthrough -export GPU_APPS="plex" -export var_gpu="nvidia" - -# Call GPU functions -detect_gpu_devices() # Detect hardware -configure_gpu_passthrough() # Configure passthrough -fix_gpu_gids() # Fix permissions -``` - -### Settings Persistence - -```bash -# Save settings as defaults -export SAVE_DEFAULTS="true" -export SAVE_APP_DEFAULTS="true" - -# Call persistence functions -default_var_settings() # Save global defaults -maybe_offer_save_app_defaults() # Save app defaults -``` - -### Container Resource & ID Management - -#### `validate_container_id()` -**Purpose**: Validates if a container ID is available for use. -**Parameters**: `ctid` (Integer) -**Returns**: `0` if available, `1` if already in use or invalid. -**Description**: Checks for existing config files in `/etc/pve/lxc/` or `/etc/pve/qemu-server/`, and verifies LVM logical volumes. - -#### `get_valid_container_id()` -**Purpose**: Returns the next available, unused container ID. -**Parameters**: `suggested_id` (Optional) -**Returns**: A valid container ID string. -**Description**: If the suggested ID is taken, it increments until it finds an available one. - -#### `maxkeys_check()` -**Purpose**: Ensures host kernel parameters support high numbers of keys (required for some apps). -**Parameters**: None -**Description**: Checks and optionally updates `kernel.keys.maxkeys` and `kernel.keys.maxbytes`. - -#### `get_current_ip()` -**Purpose**: Retrieves the current IP address of the container. -**Parameters**: `ctid` (Integer) -**Returns**: IP address string. - -#### `update_motd_ip()` -**Purpose**: Updates the Message of the Day (MOTD) file with the container's IP. -**Parameters**: None - -## Function Error Handling - -### Validation Functions - -- `validate_settings()`: Returns 0 for valid, 1 for invalid -- `check_conflicts()`: Returns 0 for no conflicts, 1 for conflicts - -### Error Recovery - -- `cleanup_on_error()`: Cleans up on any error -- Error codes are propagated up the call stack -- Critical errors cause script termination - -### Error Types - -1. **Configuration Errors**: Invalid settings or conflicts -2. **Resource Errors**: Insufficient resources or conflicts -3. **Network Errors**: Invalid network configuration -4. **Storage Errors**: Storage not available or invalid -5. **GPU Errors**: GPU configuration failures -6. **Container Creation Errors**: LXC creation failures diff --git a/docs/misc/build.func/BUILD_FUNC_USAGE_EXAMPLES.md b/docs/misc/build.func/BUILD_FUNC_USAGE_EXAMPLES.md deleted file mode 100644 index 299b2be71..000000000 --- a/docs/misc/build.func/BUILD_FUNC_USAGE_EXAMPLES.md +++ /dev/null @@ -1,600 +0,0 @@ -# build.func Usage Examples - -## Overview - -This document provides practical usage examples for `build.func`, covering common scenarios, CLI examples, and environment variable combinations. - -## Basic Usage Examples - -### 1. Simple Container Creation - -**Scenario**: Create a basic Plex media server container - -```bash -# Set basic environment variables -export APP="plex" -export CTID="100" -export var_hostname="plex-server" -export var_os="debian" -export var_version="12" -export var_cpu="4" -export var_ram="4096" -export var_disk="20" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.100" -export var_template_storage="local" -export var_container_storage="local" - -# Execute build.func -source build.func -``` - -**Expected Output**: -``` -Creating Plex container... -Container ID: 100 -Hostname: plex-server -OS: Debian 12 -Resources: 4 CPU, 4GB RAM, 20GB Disk -Network: 192.168.1.100/24 -Container created successfully! -``` - -### 2. Advanced Configuration - -**Scenario**: Create a Nextcloud container with custom settings - -```bash -# Set advanced environment variables -export APP="nextcloud" -export CTID="101" -export var_hostname="nextcloud-server" -export var_os="ubuntu" -export var_version="22.04" -export var_cpu="6" -export var_ram="8192" -export var_disk="50" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.101" -export var_vlan="100" -export var_mtu="9000" -export var_template_storage="ssd-storage" -export var_container_storage="ssd-storage" -export var_fuse="yes" -export var_tun="yes" -export SSH="true" - -# Execute build.func -source build.func -``` - -### 3. GPU Passthrough Configuration - -**Scenario**: Create a Jellyfin container with NVIDIA GPU passthrough - -```bash -# Set GPU passthrough variables -export APP="jellyfin" -export CTID="102" -export var_hostname="jellyfin-server" -export var_os="debian" -export var_version="12" -export var_cpu="8" -export var_ram="16384" -export var_disk="30" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.102" -export var_template_storage="local" -export var_container_storage="local" -export GPU_APPS="jellyfin" -export var_gpu="nvidia" -export ENABLE_PRIVILEGED="true" -export ENABLE_FUSE="true" -export ENABLE_TUN="true" - -# Execute build.func -source build.func -``` - -## Silent/Non-Interactive Examples - -### 1. Automated Deployment - -**Scenario**: Deploy multiple containers without user interaction - -```bash -#!/bin/bash -# Automated deployment script - -# Function to create container -create_container() { - local app=$1 - local ctid=$2 - local ip=$3 - - export APP="$app" - export CTID="$ctid" - export var_hostname="${app}-server" - export var_os="debian" - export var_version="12" - export var_cpu="2" - export var_ram="2048" - export var_disk="10" - export var_net="vmbr0" - export var_gateway="192.168.1.1" - export var_ip="$ip" - export var_template_storage="local" - export var_container_storage="local" - export ENABLE_FUSE="true" - export ENABLE_TUN="true" - export SSH="true" - - source build.func -} - -# Create multiple containers -create_container "plex" "100" "192.168.1.100" -create_container "nextcloud" "101" "192.168.1.101" -create_container "nginx" "102" "192.168.1.102" -``` - -### 2. Development Environment Setup - -**Scenario**: Create development containers with specific configurations - -```bash -#!/bin/bash -# Development environment setup - -# Development container configuration -export APP="dev-container" -export CTID="200" -export var_hostname="dev-server" -export var_os="ubuntu" -export var_version="22.04" -export var_cpu="4" -export var_ram="4096" -export var_disk="20" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.200" -export var_template_storage="local" -export var_container_storage="local" -export ENABLE_NESTING="true" -export ENABLE_PRIVILEGED="true" -export ENABLE_FUSE="true" -export ENABLE_TUN="true" -export SSH="true" - -# Execute build.func -source build.func -``` - -## Network Configuration Examples - -### 1. VLAN Configuration - -**Scenario**: Create container with VLAN support - -```bash -# VLAN configuration -export APP="web-server" -export CTID="300" -export var_hostname="web-server" -export var_os="debian" -export var_version="12" -export var_cpu="2" -export var_ram="2048" -export var_disk="10" -export var_net="vmbr0" -export var_gateway="192.168.100.1" -export var_ip="192.168.100.100" -export var_vlan="100" -export var_mtu="1500" -export var_template_storage="local" -export var_container_storage="local" - -source build.func -``` - -### 2. IPv6 Configuration - -**Scenario**: Create container with IPv6 support - -```bash -# IPv6 configuration -export APP="ipv6-server" -export CTID="301" -export var_hostname="ipv6-server" -export var_os="debian" -export var_version="12" -export var_cpu="2" -export var_ram="2048" -export var_disk="10" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.101" -export var_ipv6="2001:db8::101" -export IPV6_METHOD="static" -export var_template_storage="local" -export var_container_storage="local" - -source build.func -``` - -## Storage Configuration Examples - -### 1. Custom Storage Locations - -**Scenario**: Use different storage for templates and containers - -```bash -# Custom storage configuration -export APP="storage-test" -export CTID="400" -export var_hostname="storage-test" -export var_os="debian" -export var_version="12" -export var_cpu="2" -export var_ram="2048" -export var_disk="10" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.140" -export var_template_storage="nfs-storage" -export var_container_storage="ssd-storage" - -source build.func -``` - -### 2. High-Performance Storage - -**Scenario**: Use high-performance storage for resource-intensive applications - -```bash -# High-performance storage configuration -export APP="database-server" -export CTID="401" -export var_hostname="database-server" -export var_os="debian" -export var_version="12" -export var_cpu="8" -export var_ram="16384" -export var_disk="100" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.141" -export var_template_storage="nvme-storage" -export var_container_storage="nvme-storage" - -source build.func -``` - -## Feature Configuration Examples - -### 1. Privileged Container - -**Scenario**: Create privileged container for system-level access - -```bash -# Privileged container configuration -export APP="system-container" -export CTID="500" -export var_hostname="system-container" -export var_os="debian" -export var_version="12" -export var_cpu="4" -export var_ram="4096" -export var_disk="20" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.150" -export var_template_storage="local" -export var_container_storage="local" -export ENABLE_PRIVILEGED="true" -export ENABLE_FUSE="true" -export ENABLE_TUN="true" -export ENABLE_KEYCTL="true" -export ENABLE_MOUNT="true" - -source build.func -``` - -### 2. Unprivileged Container - -**Scenario**: Create secure unprivileged container - -```bash -# Unprivileged container configuration -export APP="secure-container" -export CTID="501" -export var_hostname="secure-container" -export var_os="debian" -export var_version="12" -export var_cpu="2" -export var_ram="2048" -export var_disk="10" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.151" -export var_template_storage="local" -export var_container_storage="local" -export ENABLE_UNPRIVILEGED="true" -export ENABLE_FUSE="true" -export ENABLE_TUN="true" - -source build.func -``` - -## Settings Persistence Examples - -### 1. Save Global Defaults - -**Scenario**: Save current settings as global defaults - -```bash -# Save global defaults -export APP="default-test" -export CTID="600" -export var_hostname="default-test" -export var_os="debian" -export var_version="12" -export var_cpu="2" -export var_ram="2048" -export var_disk="10" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.160" -export var_template_storage="local" -export var_container_storage="local" -export SAVE_DEFAULTS="true" - -source build.func -``` - -### 2. Save App-Specific Defaults - -**Scenario**: Save settings as app-specific defaults - -```bash -# Save app-specific defaults -export APP="plex" -export CTID="601" -export var_hostname="plex-server" -export var_os="debian" -export var_version="12" -export var_cpu="4" -export var_ram="4096" -export var_disk="20" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.161" -export var_template_storage="local" -export var_container_storage="local" -export SAVE_APP_DEFAULTS="true" - -source build.func -``` - -## Error Handling Examples - -### 1. Validation Error Handling - -**Scenario**: Handle configuration validation errors - -```bash -#!/bin/bash -# Error handling example - -# Set invalid configuration -export APP="error-test" -export CTID="700" -export var_hostname="error-test" -export var_os="invalid-os" -export var_version="invalid-version" -export var_cpu="invalid-cpu" -export var_ram="invalid-ram" -export var_disk="invalid-disk" -export var_net="invalid-network" -export var_gateway="invalid-gateway" -export var_ip="invalid-ip" - -# Execute with error handling -if source build.func; then - echo "Container created successfully!" -else - echo "Error: Container creation failed!" - echo "Please check your configuration and try again." -fi -``` - -### 2. Storage Error Handling - -**Scenario**: Handle storage selection errors - -```bash -#!/bin/bash -# Storage error handling - -# Set invalid storage -export APP="storage-error-test" -export CTID="701" -export var_hostname="storage-error-test" -export var_os="debian" -export var_version="12" -export var_cpu="2" -export var_ram="2048" -export var_disk="10" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.170" -export var_template_storage="nonexistent-storage" -export var_container_storage="nonexistent-storage" - -# Execute with error handling -if source build.func; then - echo "Container created successfully!" -else - echo "Error: Storage not available!" - echo "Please check available storage and try again." -fi -``` - -## Integration Examples - -### 1. With Install Scripts - -**Scenario**: Integrate with application install scripts - -```bash -#!/bin/bash -# Integration with install scripts - -# Create container -export APP="plex" -export CTID="800" -export var_hostname="plex-server" -export var_os="debian" -export var_version="12" -export var_cpu="4" -export var_ram="4096" -export var_disk="20" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.180" -export var_template_storage="local" -export var_container_storage="local" - -# Create container -source build.func - -# Run install script -if [ -f "plex-install.sh" ]; then - source plex-install.sh -else - echo "Install script not found!" -fi -``` - -### 2. With Monitoring - -**Scenario**: Integrate with monitoring systems - -```bash -#!/bin/bash -# Monitoring integration - -# Create container with monitoring -export APP="monitored-app" -export CTID="801" -export var_hostname="monitored-app" -export var_os="debian" -export var_version="12" -export var_cpu="2" -export var_ram="2048" -export var_disk="10" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.181" -export var_template_storage="local" -export var_container_storage="local" -export DIAGNOSTICS="true" - -# Create container -source build.func - -# Set up monitoring -if [ -f "monitoring-setup.sh" ]; then - source monitoring-setup.sh -fi -``` - -## Best Practices - -### 1. Environment Variable Management - -```bash -#!/bin/bash -# Best practice: Environment variable management - -# Set configuration file -CONFIG_FILE="/etc/build.func.conf" - -# Load configuration if exists -if [ -f "$CONFIG_FILE" ]; then - source "$CONFIG_FILE" -fi - -# Set required variables -export APP="${APP:-plex}" -export CTID="${CTID:-100}" -export var_hostname="${var_hostname:-plex-server}" -export var_os="${var_os:-debian}" -export var_version="${var_version:-12}" -export var_cpu="${var_cpu:-2}" -export var_ram="${var_ram:-2048}" -export var_disk="${var_disk:-10}" -export var_net="${var_net:-vmbr0}" -export var_gateway="${var_gateway:-192.168.1.1}" -export var_ip="${var_ip:-192.168.1.100}" -export var_template_storage="${var_template_storage:-local}" -export var_container_storage="${var_container_storage:-local}" - -# Execute build.func -source build.func -``` - -### 2. Error Handling and Logging - -```bash -#!/bin/bash -# Best practice: Error handling and logging - -# Set log file -LOG_FILE="/var/log/build.func.log" - -# Function to log messages -log_message() { - echo "$(date): $1" >> "$LOG_FILE" -} - -# Function to create container with error handling -create_container() { - local app=$1 - local ctid=$2 - - log_message "Starting container creation for $app (ID: $ctid)" - - # Set variables - export APP="$app" - export CTID="$ctid" - export var_hostname="${app}-server" - export var_os="debian" - export var_version="12" - export var_cpu="2" - export var_ram="2048" - export var_disk="10" - export var_net="vmbr0" - export var_gateway="192.168.1.1" - export var_ip="192.168.1.$ctid" - export var_template_storage="local" - export var_container_storage="local" - - # Create container - if source build.func; then - log_message "Container $app created successfully (ID: $ctid)" - return 0 - else - log_message "Error: Failed to create container $app (ID: $ctid)" - return 1 - fi -} - -# Create containers -create_container "plex" "100" -create_container "nextcloud" "101" -create_container "nginx" "102" -``` diff --git a/docs/misc/build.func/README.md b/docs/misc/build.func/README.md deleted file mode 100644 index 2b495d081..000000000 --- a/docs/misc/build.func/README.md +++ /dev/null @@ -1,270 +0,0 @@ -# build.func Documentation - -## Overview - -This directory contains comprehensive documentation for the `build.func` script, which is the core orchestration script for Proxmox LXC container creation in the Community Scripts project. - -## Documentation Files - -### 🎛️ [BUILD_FUNC_ADVANCED_SETTINGS.md](./BUILD_FUNC_ADVANCED_SETTINGS.md) -Complete reference for the 28-step Advanced Settings wizard, including all configurable options and their inheritance behavior. - -**Contents:** -- All 28 wizard steps explained -- Default value inheritance -- Feature matrix (when to enable each feature) -- Confirmation summary format -- Usage examples - -### 📊 [BUILD_FUNC_FLOWCHART.md](./BUILD_FUNC_FLOWCHART.md) -Visual ASCII flowchart showing the main execution flow, decision trees, and key decision points in the build.func script. - -**Contents:** -- Main execution flow diagram -- Installation mode selection flows -- Storage selection workflow -- GPU passthrough decision logic -- Variable precedence chain -- Error handling flow -- Integration points - -### 🔧 [BUILD_FUNC_ENVIRONMENT_VARIABLES.md](./BUILD_FUNC_ENVIRONMENT_VARIABLES.md) -Complete reference of all environment variables used in build.func, organized by category and usage context. - -**Contents:** -- Core container variables -- Operating system variables -- Resource configuration variables -- Network configuration variables -- Storage configuration variables -- Feature flags -- GPU passthrough variables -- API and diagnostics variables -- Settings persistence variables -- Variable precedence chain -- Critical variables for non-interactive use -- Common variable combinations - -### 📚 [BUILD_FUNC_FUNCTIONS_REFERENCE.md](./BUILD_FUNC_FUNCTIONS_REFERENCE.md) -Alphabetical function reference with detailed descriptions, parameters, dependencies, and usage information. - -**Contents:** -- Initialization functions -- UI and menu functions -- Storage functions -- Container creation functions -- GPU and hardware functions -- Settings persistence functions -- Utility functions -- Function call flow -- Function dependencies -- Function usage examples -- Function error handling - -### 🔄 [BUILD_FUNC_EXECUTION_FLOWS.md](./BUILD_FUNC_EXECUTION_FLOWS.md) -Detailed execution flows for different installation modes and scenarios, including variable precedence and decision trees. - -**Contents:** -- Default install flow -- Advanced install flow -- My defaults flow -- App defaults flow -- Variable precedence chain -- Storage selection logic -- GPU passthrough flow -- Network configuration flow -- Container creation flow -- Error handling flows -- Integration flows -- Performance considerations - -### 🏗️ [BUILD_FUNC_ARCHITECTURE.md](./BUILD_FUNC_ARCHITECTURE.md) -High-level architectural overview including module dependencies, data flow, integration points, and system architecture. - -**Contents:** -- High-level architecture diagram -- Module dependencies -- Data flow architecture -- Integration architecture -- System architecture components -- User interface components -- Security architecture -- Performance architecture -- Deployment architecture -- Maintenance architecture -- Future architecture considerations - -### 💡 [BUILD_FUNC_USAGE_EXAMPLES.md](./BUILD_FUNC_USAGE_EXAMPLES.md) -Practical usage examples covering common scenarios, CLI examples, and environment variable combinations. - -**Contents:** -- Basic usage examples -- Silent/non-interactive examples -- Network configuration examples -- Storage configuration examples -- Feature configuration examples -- Settings persistence examples -- Error handling examples -- Integration examples -- Best practices - -## Quick Start Guide - -### For New Users -1. Start with [BUILD_FUNC_FLOWCHART.md](./BUILD_FUNC_FLOWCHART.md) to understand the overall flow -2. Review [BUILD_FUNC_ENVIRONMENT_VARIABLES.md](./BUILD_FUNC_ENVIRONMENT_VARIABLES.md) for configuration options -3. Follow examples in [BUILD_FUNC_USAGE_EXAMPLES.md](./BUILD_FUNC_USAGE_EXAMPLES.md) - -### For Developers -1. Read [BUILD_FUNC_ARCHITECTURE.md](./BUILD_FUNC_ARCHITECTURE.md) for system overview -2. Study [BUILD_FUNC_FUNCTIONS_REFERENCE.md](./BUILD_FUNC_FUNCTIONS_REFERENCE.md) for function details -3. Review [BUILD_FUNC_EXECUTION_FLOWS.md](./BUILD_FUNC_EXECUTION_FLOWS.md) for implementation details - -### For System Administrators -1. Focus on [BUILD_FUNC_USAGE_EXAMPLES.md](./BUILD_FUNC_USAGE_EXAMPLES.md) for deployment scenarios -2. Review [BUILD_FUNC_ENVIRONMENT_VARIABLES.md](./BUILD_FUNC_ENVIRONMENT_VARIABLES.md) for configuration management -3. Check [BUILD_FUNC_ARCHITECTURE.md](./BUILD_FUNC_ARCHITECTURE.md) for security and performance considerations - -## Key Concepts - -### Variable Precedence -Variables are resolved in this order (highest to lowest priority): -1. Hard environment variables (set before script execution) -2. App-specific .vars file (`/usr/local/community-scripts/defaults/.vars`) -3. Global default.vars file (`/usr/local/community-scripts/default.vars`) -4. Built-in defaults (set in `base_settings()` function) - -### Installation Modes -- **Default Install**: Uses built-in defaults, minimal prompts -- **Advanced Install**: Full interactive configuration via whiptail -- **My Defaults**: Loads from global default.vars file -- **App Defaults**: Loads from app-specific .vars file - -### Storage Selection Logic -1. If only 1 storage exists for content type → auto-select -2. If preselected via environment variables → validate and use -3. Otherwise → prompt user via whiptail - -### GPU Passthrough Flow -1. Detect hardware (Intel/AMD/NVIDIA) -2. Check if app is in GPU_APPS list OR container is privileged -3. Auto-select if single GPU type, prompt if multiple -4. Configure `/etc/pve/lxc/.conf` with proper device entries -5. Fix GIDs post-creation to match container's video/render groups - -## Common Use Cases - -### Basic Container Creation -```bash -export APP="plex" -export CTID="100" -export var_hostname="plex-server" -export var_os="debian" -export var_version="12" -export var_cpu="4" -export var_ram="4096" -export var_disk="20" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.100" -export var_template_storage="local" -export var_container_storage="local" - -source build.func -``` - -### GPU Passthrough -```bash -export APP="jellyfin" -export CTID="101" -export var_hostname="jellyfin-server" -export var_os="debian" -export var_version="12" -export var_cpu="8" -export var_ram="16384" -export var_disk="30" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.101" -export var_template_storage="local" -export var_container_storage="local" -export GPU_APPS="jellyfin" -export var_gpu="nvidia" -export ENABLE_PRIVILEGED="true" - -source build.func -``` - -### Silent/Non-Interactive Deployment -```bash -#!/bin/bash -# Automated deployment -export APP="nginx" -export CTID="102" -export var_hostname="nginx-proxy" -export var_os="alpine" -export var_version="3.18" -export var_cpu="1" -export var_ram="512" -export var_disk="2" -export var_net="vmbr0" -export var_gateway="192.168.1.1" -export var_ip="192.168.1.102" -export var_template_storage="local" -export var_container_storage="local" -export ENABLE_UNPRIVILEGED="true" - -source build.func -``` - -## Troubleshooting - -### Common Issues -1. **Container creation fails**: Check resource availability and configuration validity -2. **Storage errors**: Verify storage exists and supports required content types -3. **Network errors**: Validate network configuration and IP address availability -4. **GPU passthrough issues**: Check hardware detection and container privileges -5. **Permission errors**: Verify user permissions and container privileges - -### Debug Mode -Enable verbose output for debugging: -```bash -export VERBOSE="true" -export DIAGNOSTICS="true" -source build.func -``` - -### Log Files -Check system logs for detailed error information: -- `/var/log/syslog` -- `/var/log/pve/lxc/.log` -- Container-specific logs - -## Contributing - -When contributing to build.func documentation: -1. Update relevant documentation files -2. Add examples for new features -3. Update architecture diagrams if needed -4. Test all examples before submitting -5. Follow the existing documentation style - -## Related Documentation - -- [Main README](../../README.md) - Project overview -- [Installation Guide](../../install/) - Installation scripts -- [Container Templates](../../ct/) - Container templates -- [Tools](../../tools/) - Additional tools and utilities - -## Support - -For issues and questions: -1. Check this documentation first -2. Review the [troubleshooting section](#troubleshooting) -3. Check existing issues in the project repository -4. Create a new issue with detailed information - ---- - -*Last updated: $(date)* -*Documentation version: 1.0* diff --git a/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_FLOWCHART.md b/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_FLOWCHART.md deleted file mode 100644 index ad03c9438..000000000 --- a/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_FLOWCHART.md +++ /dev/null @@ -1,28 +0,0 @@ -# cloud-init.func Flowchart - -Cloud-init VM provisioning flow. - -## Cloud-Init Generation and Application - -``` -generate_cloud_init() - ↓ -generate_user_data() - ↓ -setup_ssh_keys() - ↓ -Apply to VM - ↓ -VM Boot - ↓ -cloud-init phases -├─ system -├─ config -└─ final - ↓ -VM Ready ✓ -``` - ---- - -**Last Updated**: December 2025 diff --git a/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_FUNCTIONS_REFERENCE.md b/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_FUNCTIONS_REFERENCE.md deleted file mode 100644 index 1ae2f44b9..000000000 --- a/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_FUNCTIONS_REFERENCE.md +++ /dev/null @@ -1,30 +0,0 @@ -# cloud-init.func Functions Reference - -Cloud-init and VM provisioning functions. - -## Core Functions - -### generate_cloud_init() -Generate cloud-init configuration. - -### generate_user_data() -Generate user-data script for VM. - -### apply_cloud_init() -Apply cloud-init to VM. - -### setup_ssh_keys() -Deploy SSH public keys. - -### setup_static_ip() -Configure static IP on VM. - -### setup_dns() -Configure DNS for VM. - -### setup_ipv6() -Enable IPv6 on VM. - ---- - -**Last Updated**: December 2025 diff --git a/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_INTEGRATION.md b/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_INTEGRATION.md deleted file mode 100644 index d494a4bed..000000000 --- a/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_INTEGRATION.md +++ /dev/null @@ -1,7 +0,0 @@ -# cloud-init.func Integration Guide - -Cloud-init integration with Proxmox VM provisioning. - ---- - -**Last Updated**: December 2025 diff --git a/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_USAGE_EXAMPLES.md b/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_USAGE_EXAMPLES.md deleted file mode 100644 index 13b6dacca..000000000 --- a/docs/misc/cloud-init.func/CLOUD_INIT_FUNC_USAGE_EXAMPLES.md +++ /dev/null @@ -1,17 +0,0 @@ -# cloud-init.func Usage Examples - -Examples for VM cloud-init configuration. - -### Example: Basic Cloud-Init - -```bash -#!/usr/bin/env bash - -generate_cloud_init > cloud-init.yaml -setup_ssh_keys "$VMID" "$SSH_KEY" -apply_cloud_init "$VMID" cloud-init.yaml -``` - ---- - -**Last Updated**: December 2025 diff --git a/docs/misc/cloud-init.func/README.md b/docs/misc/cloud-init.func/README.md deleted file mode 100644 index 53450b4c2..000000000 --- a/docs/misc/cloud-init.func/README.md +++ /dev/null @@ -1,339 +0,0 @@ -# cloud-init.func Documentation - -## Overview - -The `cloud-init.func` file provides cloud-init configuration and VM initialization functions for Proxmox VE virtual machines. It handles user data, cloud-config generation, and VM setup automation. - -## Purpose and Use Cases - -- **VM Cloud-Init Setup**: Generate and apply cloud-init configurations for VMs -- **User Data Generation**: Create user-data scripts for VM initialization -- **Cloud-Config**: Generate cloud-config YAML for VM provisioning -- **SSH Key Management**: Setup SSH keys for VM access -- **Network Configuration**: Configure networking for VMs -- **Automated VM Provisioning**: Complete VM setup without manual intervention - -## Quick Reference - -### Key Function Groups -- **Cloud-Init Core**: Generate and apply cloud-init configurations -- **User Data**: Create initialization scripts for VMs -- **SSH Setup**: Deploy SSH keys automatically -- **Network Configuration**: Setup networking during VM provisioning -- **VM Customization**: Apply custom settings to VMs - -### Dependencies -- **External**: `cloud-init`, `curl`, `qemu-img` -- **Internal**: Uses functions from `core.func`, `error_handler.func` - -### Integration Points -- Used by: VM creation scripts (vm/*.sh) -- Uses: Environment variables from build.func -- Provides: VM initialization and cloud-init services - -## Documentation Files - -### 📊 [CLOUD_INIT_FUNC_FLOWCHART.md](./CLOUD_INIT_FUNC_FLOWCHART.md) -Visual execution flows showing cloud-init generation and VM provisioning workflows. - -### 📚 [CLOUD_INIT_FUNC_FUNCTIONS_REFERENCE.md](./CLOUD_INIT_FUNC_FUNCTIONS_REFERENCE.md) -Complete alphabetical reference of all cloud-init functions. - -### 💡 [CLOUD_INIT_FUNC_USAGE_EXAMPLES.md](./CLOUD_INIT_FUNC_USAGE_EXAMPLES.md) -Practical examples for VM cloud-init setup and customization. - -### 🔗 [CLOUD_INIT_FUNC_INTEGRATION.md](./CLOUD_INIT_FUNC_INTEGRATION.md) -How cloud-init.func integrates with VM creation and Proxmox workflows. - -## Key Features - -### Cloud-Init Configuration -- **User Data Generation**: Create custom initialization scripts -- **Cloud-Config YAML**: Generate standardized cloud-config -- **SSH Keys**: Automatically deploy public keys -- **Package Installation**: Install packages during VM boot -- **Custom Commands**: Run arbitrary commands on first boot - -### VM Network Setup -- **DHCP Configuration**: Configure DHCP for automatic IP assignment -- **Static IP Setup**: Configure static IP addresses -- **IPv6 Support**: Enable IPv6 on VMs -- **DNS Configuration**: Set DNS servers for VM -- **Firewall Rules**: Basic firewall configuration - -### Security Features -- **SSH Key Injection**: Deploy SSH keys during VM creation -- **Disable Passwords**: Disable password authentication -- **Sudoers Configuration**: Setup sudo access -- **User Management**: Create and configure users - -## Function Categories - -### 🔹 Cloud-Init Core Functions -- `generate_cloud_init()` - Create cloud-init configuration -- `generate_user_data()` - Generate user-data script -- `apply_cloud_init()` - Apply cloud-init to VM -- `validate_cloud_init()` - Validate cloud-config syntax - -### 🔹 SSH & Security Functions -- `setup_ssh_keys()` - Deploy SSH public keys -- `setup_sudo()` - Configure sudoers -- `create_user()` - Create new user account -- `disable_password_auth()` - Disable password login - -### 🔹 Network Configuration Functions -- `setup_dhcp()` - Configure DHCP networking -- `setup_static_ip()` - Configure static IP -- `setup_dns()` - Configure DNS servers -- `setup_ipv6()` - Enable IPv6 support - -### 🔹 VM Customization Functions -- `install_packages()` - Install packages during boot -- `run_custom_commands()` - Execute custom scripts -- `configure_hostname()` - Set VM hostname -- `configure_timezone()` - Set VM timezone - -## Cloud-Init Workflow - -``` -VM Created - ↓ -cloud-init (system) boot phase - ↓ -User-Data Script Execution - ↓ -├─ Install packages -├─ Deploy SSH keys -├─ Configure network -└─ Create users - ↓ -cloud-init config phase - ↓ -Apply cloud-config settings - ↓ -cloud-init final phase - ↓ -VM Ready for Use -``` - -## Common Usage Patterns - -### Basic VM Setup with Cloud-Init -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -# Generate cloud-init configuration -cat > cloud-init.yaml < user-data.txt - -# Inject SSH key -setup_ssh_keys "$VMID" "$SSH_KEY" - -# Create VM with cloud-init -qm create $VMID ... --cicustom local:snippets/user-data -``` - -### Network Configuration -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -# Static IP setup -setup_static_ip "192.168.1.100" "255.255.255.0" "192.168.1.1" - -# DNS configuration -setup_dns "8.8.8.8 8.8.4.4" - -# IPv6 support -setup_ipv6 -``` - -## Best Practices - -### ✅ DO -- Validate cloud-config syntax before applying -- Use cloud-init for automated setup -- Deploy SSH keys for secure access -- Test cloud-init configuration in non-production first -- Use DHCP for easier VM deployment -- Document custom cloud-init configurations -- Version control cloud-init templates - -### ❌ DON'T -- Use weak SSH keys or passwords -- Leave SSH password authentication enabled -- Hardcode credentials in cloud-init -- Skip validation of cloud-config -- Use untrusted cloud-init sources -- Forget to set timezone on VMs -- Mix cloud-init versions - -## Cloud-Config Format - -### Example Cloud-Config -```yaml -#cloud-config -# This is a comment - -# System configuration -hostname: myvm -timezone: UTC -package_upgrade: true - -# Packages to install -packages: - - curl - - wget - - git - - build-essential - -# SSH keys for users -ssh_authorized_keys: - - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC... - -# Users to create -users: - - name: ubuntu - home: /home/ubuntu - shell: /bin/bash - sudo: ['ALL=(ALL) NOPASSWD:ALL'] - ssh_authorized_keys: - - ssh-rsa AAAAB3... - -# Commands to run on boot -runcmd: - - apt-get update - - apt-get upgrade -y - - systemctl restart ssh - -# Files to create -write_files: - - path: /etc/profile.d/custom.sh - content: | - export CUSTOM_VAR="value" -``` - -## VM Network Configuration - -### DHCP Configuration -```bash -network: - version: 2 - ethernets: - eth0: - dhcp4: true - dhcp6: true -``` - -### Static IP Configuration -```bash -network: - version: 2 - ethernets: - eth0: - addresses: - - 192.168.1.100/24 - gateway4: 192.168.1.1 - nameservers: - addresses: [8.8.8.8, 8.8.4.4] -``` - -## Troubleshooting - -### "Cloud-Init Configuration Not Applied" -```bash -# Check cloud-init status in VM -cloud-init status -cloud-init status --long - -# View cloud-init logs -tail /var/log/cloud-init.log -``` - -### "SSH Keys Not Deployed" -```bash -# Verify SSH key in cloud-config -grep ssh_authorized_keys user-data.txt - -# Check permissions -ls -la ~/.ssh/authorized_keys -``` - -### "Network Not Configured" -```bash -# Check network configuration -ip addr show -ip route show - -# View netplan (if used) -cat /etc/netplan/*.yaml -``` - -### "Packages Failed to Install" -```bash -# Check cloud-init package log -tail /var/log/cloud-init-output.log - -# Manual package installation -apt-get update && apt-get install -y package-name -``` - -## Related Documentation - -- **[install.func/](../install.func/)** - Container installation (similar workflow) -- **[core.func/](../core.func/)** - Utility functions -- **[error_handler.func/](../error_handler.func/)** - Error handling -- **[UPDATED_APP-install.md](../../UPDATED_APP-install.md)** - Application setup guide -- **Proxmox Docs**: https://pve.proxmox.com/wiki/Cloud-Init - -## Recent Updates - -### Version 2.0 (Dec 2025) -- ✅ Enhanced cloud-init validation -- ✅ Improved SSH key deployment -- ✅ Better network configuration support -- ✅ Added IPv6 support -- ✅ Streamlined user and package setup - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team -**License**: MIT diff --git a/docs/misc/core.func/CORE_FLOWCHART.md b/docs/misc/core.func/CORE_FLOWCHART.md deleted file mode 100644 index 2b9dd98ce..000000000 --- a/docs/misc/core.func/CORE_FLOWCHART.md +++ /dev/null @@ -1,316 +0,0 @@ -# core.func Execution Flowchart - -## Main Execution Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ core.func Loading │ -│ Entry point when core.func is sourced by other scripts │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Load Prevention Check │ -│ • Check if _CORE_FUNC_LOADED is set │ -│ • Return early if already loaded │ -│ • Set _CORE_FUNC_LOADED=1 to prevent reloading │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ LOAD_FUNCTIONS() │ -│ Main function loader - sets up all core utilities │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Core Function Loading Sequence │ -│ │ -│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────────┐ │ -│ │ color() │ │ formatting() │ │ icons() │ │ -│ │ │ │ │ │ │ │ -│ │ • Set ANSI │ │ • Set format │ │ • Set symbolic icons │ │ -│ │ color codes │ │ helpers │ │ • Define message │ │ -│ │ • Define │ │ • Tab, bold, │ │ symbols │ │ -│ │ colors │ │ line reset │ │ • Status indicators │ │ -│ └─────────────────┘ └─────────────────┘ └─────────────────────────────┘ │ -│ │ -│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────────┐ │ -│ │ default_vars() │ │ set_std_mode() │ │ Additional Functions │ │ -│ │ │ │ │ │ │ │ -│ │ • Set retry │ │ • Set verbose │ │ • Add more functions │ │ -│ │ variables │ │ mode │ │ as needed │ │ -│ │ • Initialize │ │ • Configure │ │ │ │ -│ │ counters │ │ STD variable │ │ │ │ -│ └─────────────────┘ └─────────────────┘ └─────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## System Check Functions Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ System Validation Flow │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ PVE_CHECK() │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Get PVE │ │ Check PVE │ │ Check PVE │ │ │ -│ │ │ Version │ │ 8.x Support │ │ 9.x Support │ │ │ -│ │ │ │ │ │ │ │ │ │ -│ │ │ • pveversion │ │ • Allow 8.0-8.9│ │ • Allow ONLY 9.0 │ │ │ -│ │ │ • Parse version │ │ • Reject others │ │ • Reject 9.1+ │ │ │ -│ │ │ • Extract │ │ • Exit if │ │ • Exit if │ │ │ -│ │ │ major.minor │ │ unsupported │ │ unsupported │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ ARCH_CHECK() │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check │ │ AMD64 Check │ │ PiMox Warning │ │ │ -│ │ │ Architecture │ │ │ │ │ │ │ -│ │ │ │ │ • dpkg --print- │ │ • Show PiMox │ │ │ -│ │ │ • Get system │ │ architecture │ │ message │ │ │ -│ │ │ architecture │ │ • Must be │ │ • Point to ARM64 │ │ │ -│ │ │ • Compare with │ │ "amd64" │ │ support │ │ │ -│ │ │ "amd64" │ │ • Exit if not │ │ • Exit script │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ SHELL_CHECK() │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check │ │ Bash Check │ │ Error Handling │ │ │ -│ │ │ Shell Type │ │ │ │ │ │ │ -│ │ │ │ │ • ps -p $$ -o │ │ • Clear screen │ │ │ -│ │ │ • Get current │ │ comm= │ │ • Show error │ │ │ -│ │ │ shell │ │ • Must be │ │ • Sleep and exit │ │ │ -│ │ │ • Compare with │ │ "bash" │ │ │ │ │ -│ │ │ "bash" │ │ • Exit if not │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ ROOT_CHECK() │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check │ │ Root Check │ │ Sudo Check │ │ │ -│ │ │ User ID │ │ │ │ │ │ │ -│ │ │ │ │ • id -u │ │ • Check parent │ │ │ -│ │ │ • Get user ID │ │ • Must be 0 │ │ process │ │ │ -│ │ │ • Check if │ │ • Exit if not │ │ • Detect sudo │ │ │ -│ │ │ root (0) │ │ root │ │ usage │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Message System Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Message System Flow │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ MSG_INFO() │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Message │ │ Duplicate │ │ Display Mode │ │ │ -│ │ │ Validation │ │ Check │ │ Selection │ │ │ -│ │ │ │ │ │ │ │ │ │ -│ │ │ • Check if │ │ • Track shown │ │ • Verbose mode: │ │ │ -│ │ │ message │ │ messages │ │ Show directly │ │ │ -│ │ │ exists │ │ • Skip if │ │ • Normal mode: │ │ │ -│ │ │ • Return if │ │ already │ │ Start spinner │ │ │ -│ │ │ empty │ │ shown │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ SPINNER() │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Spinner │ │ Animation │ │ Display │ │ │ -│ │ │ Initialization│ │ Loop │ │ Control │ │ │ -│ │ │ │ │ │ │ │ │ │ -│ │ │ • Define │ │ • Cycle through │ │ • Print spinner │ │ │ -│ │ │ characters │ │ characters │ │ character │ │ │ -│ │ │ • Set index │ │ • Sleep 0.1s │ │ • Print message │ │ │ -│ │ │ • Start loop │ │ • Increment │ │ • Clear line │ │ │ -│ │ │ │ │ index │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ STOP_SPINNER() │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Get Spinner │ │ Kill Process │ │ Cleanup │ │ │ -│ │ │ PID │ │ │ │ │ │ │ -│ │ │ │ │ • Send TERM │ │ • Remove PID file │ │ │ -│ │ │ • From │ │ • Wait for │ │ • Unset variables │ │ │ -│ │ │ SPINNER_PID │ │ termination │ │ • Reset terminal │ │ │ -│ │ │ • From PID │ │ • Force kill │ │ settings │ │ │ -│ │ │ file │ │ if needed │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Silent Execution Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ SILENT() Execution Flow │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Command Execution │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Setup │ │ Execute │ │ Capture Output │ │ │ -│ │ │ Environment │ │ Command │ │ │ │ │ -│ │ │ │ │ │ │ • Redirect stdout │ │ │ -│ │ │ • Disable │ │ • Run command │ │ to log file │ │ │ -│ │ │ error │ │ • Capture │ │ • Redirect stderr │ │ │ -│ │ │ handling │ │ return code │ │ to log file │ │ │ -│ │ │ • Remove │ │ • Store exit │ │ • Log all output │ │ │ -│ │ │ traps │ │ code │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Error Handling │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check Exit │ │ Load Error │ │ Display Error │ │ │ -│ │ │ Code │ │ Handler │ │ Information │ │ │ -│ │ │ │ │ │ │ │ │ │ -│ │ │ • If exit code │ │ • Source │ │ • Show error code │ │ │ -│ │ │ != 0 │ │ error_handler │ │ • Show explanation │ │ │ -│ │ │ • Proceed to │ │ if needed │ │ • Show command │ │ │ -│ │ │ error │ │ • Get error │ │ • Show log lines │ │ │ -│ │ │ handling │ │ explanation │ │ • Show full log │ │ │ -│ │ │ │ │ │ │ command │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Log Management │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Log File │ │ Log Display │ │ Log Access │ │ │ -│ │ │ Management │ │ │ │ │ │ │ -│ │ │ │ │ • Show last 10 │ │ • Provide command │ │ │ -│ │ │ • Create log │ │ lines │ │ to view full log │ │ │ -│ │ │ file path │ │ • Count total │ │ • Show line count │ │ │ -│ │ │ • Use process │ │ lines │ │ • Enable debugging │ │ │ -│ │ │ ID in name │ │ • Format │ │ │ │ │ -│ │ │ │ │ output │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Header Management Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Header Management Flow │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ GET_HEADER() │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Prepare │ │ Check Local │ │ Download Header │ │ │ -│ │ │ Parameters │ │ File │ │ │ │ │ -│ │ │ │ │ │ │ • Construct URL │ │ │ -│ │ │ • Get app name │ │ • Check if │ │ • Download file │ │ │ -│ │ │ from APP │ │ file exists │ │ • Save to local │ │ │ -│ │ │ • Get app type │ │ • Check if │ │ path │ │ │ -│ │ │ from APP_TYPE │ │ file has │ │ • Return success │ │ │ -│ │ │ • Construct │ │ content │ │ status │ │ │ -│ │ │ paths │ │ • Return if │ │ │ │ │ -│ │ │ │ │ available │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ HEADER_INFO() │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Get Header │ │ Clear Screen │ │ Display Header │ │ │ -│ │ │ Content │ │ │ │ │ │ │ -│ │ │ │ │ • Clear │ │ • Show header │ │ │ -│ │ │ • Call │ │ terminal │ │ content if │ │ │ -│ │ │ get_header() │ │ • Get terminal │ │ available │ │ │ -│ │ │ • Handle │ │ width │ │ • Format output │ │ │ -│ │ │ errors │ │ • Set default │ │ • Center content │ │ │ -│ │ │ • Return │ │ width if │ │ if possible │ │ │ -│ │ │ content │ │ needed │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Swap Management Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ CHECK_OR_CREATE_SWAP() Flow │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Swap Detection │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check Active │ │ Swap Found │ │ No Swap Found │ │ │ -│ │ │ Swap │ │ │ │ │ │ │ -│ │ │ │ │ • Show success │ │ • Show error │ │ │ -│ │ │ • Use swapon │ │ message │ │ message │ │ │ -│ │ │ command │ │ • Return 0 │ │ • Ask user for │ │ │ -│ │ │ • Check for │ │ │ │ creation │ │ │ -│ │ │ swap devices │ │ │ │ • Proceed to │ │ │ -│ │ │ • Return │ │ │ │ creation flow │ │ │ -│ │ │ status │ │ │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Swap Creation │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ User Input │ │ Size │ │ File Creation │ │ │ -│ │ │ Collection │ │ Validation │ │ │ │ │ -│ │ │ │ │ │ │ • Create swap file │ │ │ -│ │ │ • Ask for │ │ • Validate │ │ with dd │ │ │ -│ │ │ confirmation │ │ numeric input │ │ • Set permissions │ │ │ -│ │ │ • Convert to │ │ • Check range │ │ • Format swap │ │ │ -│ │ │ lowercase │ │ • Abort if │ │ • Activate swap │ │ │ -│ │ │ • Check for │ │ invalid │ │ • Show success │ │ │ -│ │ │ y/yes │ │ │ │ message │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Integration Points - -### With Other Scripts -- **build.func**: Provides system checks and UI functions -- **tools.func**: Uses core utilities for extended operations -- **api.func**: Uses system checks and error handling -- **error_handler.func**: Provides error explanations for silent execution - -### External Dependencies -- **curl**: For downloading header files -- **tput**: For terminal control (installed if missing) -- **swapon/mkswap**: For swap management -- **pveversion**: For Proxmox version checking - -### Data Flow -- **Input**: Environment variables, command parameters -- **Processing**: System validation, UI rendering, command execution -- **Output**: Messages, log files, exit codes, system state changes diff --git a/docs/misc/core.func/CORE_FUNCTIONS_REFERENCE.md b/docs/misc/core.func/CORE_FUNCTIONS_REFERENCE.md deleted file mode 100644 index 1dacb1609..000000000 --- a/docs/misc/core.func/CORE_FUNCTIONS_REFERENCE.md +++ /dev/null @@ -1,637 +0,0 @@ -# core.func Functions Reference - -## Overview - -This document provides a comprehensive alphabetical reference of all functions in `core.func`, including parameters, dependencies, usage examples, and error handling. - -## Function Categories - -### Initialization Functions - -#### `load_functions()` -**Purpose**: Main function loader that initializes all core utilities -**Parameters**: None -**Returns**: None -**Side Effects**: -- Sets `__FUNCTIONS_LOADED=1` to prevent reloading -- Calls all core function groups in sequence -- Initializes color, formatting, icons, defaults, and standard mode -**Dependencies**: None -**Environment Variables Used**: `__FUNCTIONS_LOADED` - -**Usage Example**: -```bash -# Automatically called when core.func is sourced -source core.func -# load_functions() is called automatically -``` - -### Color and Formatting Functions - -#### `color()` -**Purpose**: Set ANSI color codes for styled terminal output -**Parameters**: None -**Returns**: None -**Side Effects**: Sets global color variables -**Dependencies**: None -**Environment Variables Used**: None - -**Sets Variables**: -- `YW`: Yellow -- `YWB`: Bright yellow -- `BL`: Blue -- `RD`: Red -- `BGN`: Bright green -- `GN`: Green -- `DGN`: Dark green -- `CL`: Clear/reset - -**Usage Example**: -```bash -color -echo -e "${GN}Success message${CL}" -echo -e "${RD}Error message${CL}" -``` - -#### `color_spinner()` -**Purpose**: Set color codes specifically for spinner output -**Parameters**: None -**Returns**: None -**Side Effects**: Sets spinner-specific color variables -**Dependencies**: None -**Environment Variables Used**: None - -**Sets Variables**: -- `CS_YW`: Yellow for spinner -- `CS_YWB`: Bright yellow for spinner -- `CS_CL`: Clear for spinner - -#### `formatting()` -**Purpose**: Define formatting helpers for terminal output -**Parameters**: None -**Returns**: None -**Side Effects**: Sets global formatting variables -**Dependencies**: None -**Environment Variables Used**: None - -**Sets Variables**: -- `BFR`: Back and forward reset -- `BOLD`: Bold text -- `HOLD`: Space character -- `TAB`: Two spaces -- `TAB3`: Six spaces - -### Icon Functions - -#### `icons()` -**Purpose**: Set symbolic icons used throughout user feedback and prompts -**Parameters**: None -**Returns**: None -**Side Effects**: Sets global icon variables -**Dependencies**: `formatting()` (for TAB variable) -**Environment Variables Used**: `TAB`, `CL` - -**Sets Variables**: -- `CM`: Check mark -- `CROSS`: Cross mark -- `DNSOK`: DNS success -- `DNSFAIL`: DNS failure -- `INFO`: Information icon -- `OS`: Operating system icon -- `OSVERSION`: OS version icon -- `CONTAINERTYPE`: Container type icon -- `DISKSIZE`: Disk size icon -- `CPUCORE`: CPU core icon -- `RAMSIZE`: RAM size icon -- `SEARCH`: Search icon -- `VERBOSE_CROPPED`: Verbose mode icon -- `VERIFYPW`: Password verification icon -- `CONTAINERID`: Container ID icon -- `HOSTNAME`: Hostname icon -- `BRIDGE`: Bridge icon -- `NETWORK`: Network icon -- `GATEWAY`: Gateway icon -- `DISABLEIPV6`: IPv6 disable icon -- `DEFAULT`: Default settings icon -- `MACADDRESS`: MAC address icon -- `VLANTAG`: VLAN tag icon -- `ROOTSSH`: SSH key icon -- `CREATING`: Creating icon -- `ADVANCED`: Advanced settings icon -- `FUSE`: FUSE icon -- `HOURGLASS`: Hourglass icon - -### Default Variables Functions - -#### `default_vars()` -**Purpose**: Set default retry and wait variables for system actions -**Parameters**: None -**Returns**: None -**Side Effects**: Sets retry configuration variables -**Dependencies**: None -**Environment Variables Used**: None - -**Sets Variables**: -- `RETRY_NUM`: Number of retry attempts (default: 10) -- `RETRY_EVERY`: Seconds between retries (default: 3) -- `i`: Retry counter initialized to RETRY_NUM - -#### `set_std_mode()` -**Purpose**: Set default verbose mode for script execution -**Parameters**: None -**Returns**: None -**Side Effects**: Sets STD variable based on VERBOSE setting -**Dependencies**: None -**Environment Variables Used**: `VERBOSE` - -**Sets Variables**: -- `STD`: "silent" if VERBOSE != "yes", empty string if VERBOSE = "yes" - -### Silent Execution Functions - -#### `silent()` -**Purpose**: Execute commands silently with detailed error reporting -**Parameters**: `$*` - Command and arguments to execute -**Returns**: None (exits on error) -**Side Effects**: -- Executes command with output redirected to log file -- On error, displays detailed error information -- Exits with command's exit code -**Dependencies**: `error_handler.func` (for error explanations) -**Environment Variables Used**: `SILENT_LOGFILE` - -**Usage Example**: -```bash -silent apt-get update -silent apt-get install -y package-name -``` - -**Error Handling**: -- Captures command output to `/tmp/silent.$$.log` -- Shows error code explanation -- Displays last 10 lines of log -- Provides command to view full log - -### System Check Functions - -#### `shell_check()` -**Purpose**: Verify that the script is running in Bash shell -**Parameters**: None -**Returns**: None (exits if not Bash) -**Side Effects**: -- Checks current shell process -- Exits with error message if not Bash -**Dependencies**: None -**Environment Variables Used**: None - -**Usage Example**: -```bash -shell_check -# Script continues if Bash, exits if not -``` - -#### `root_check()` -**Purpose**: Ensure script is running as root user -**Parameters**: None -**Returns**: None (exits if not root) -**Side Effects**: -- Checks user ID and parent process -- Exits with error message if not root -**Dependencies**: None -**Environment Variables Used**: None - -**Usage Example**: -```bash -root_check -# Script continues if root, exits if not -``` - -#### `pve_check()` -**Purpose**: Verify Proxmox VE version compatibility -**Parameters**: None -**Returns**: None (exits if unsupported version) -**Side Effects**: -- Checks PVE version using pveversion command -- Exits with error message if unsupported -**Dependencies**: `pveversion` command -**Environment Variables Used**: None - -**Supported Versions**: -- Proxmox VE 8.0 - 8.9 -- Proxmox VE 9.0 (only) - -**Usage Example**: -```bash -pve_check -# Script continues if supported version, exits if not -``` - -#### `arch_check()` -**Purpose**: Verify system architecture is AMD64 -**Parameters**: None -**Returns**: None (exits if not AMD64) -**Side Effects**: -- Checks system architecture -- Exits with PiMox warning if not AMD64 -**Dependencies**: `dpkg` command -**Environment Variables Used**: None - -**Usage Example**: -```bash -arch_check -# Script continues if AMD64, exits if not -``` - -#### `ssh_check()` -**Purpose**: Detect and warn about external SSH usage -**Parameters**: None -**Returns**: None -**Side Effects**: -- Checks SSH_CLIENT environment variable -- Warns if connecting from external IP -- Allows local connections (127.0.0.1 or host IP) -**Dependencies**: None -**Environment Variables Used**: `SSH_CLIENT` - -**Usage Example**: -```bash -ssh_check -# Shows warning if external SSH, continues anyway -``` - -### Header Management Functions - -#### `get_header()` -**Purpose**: Download and cache application header files -**Parameters**: None (uses APP and APP_TYPE variables) -**Returns**: Header content on success, empty on failure -**Side Effects**: -- Downloads header from remote URL -- Caches header locally -- Creates directory structure if needed -**Dependencies**: `curl` command -**Environment Variables Used**: `APP`, `APP_TYPE` - -**Usage Example**: -```bash -export APP="plex" -export APP_TYPE="ct" -header_content=$(get_header) -``` - -#### `header_info()` -**Purpose**: Display application header information -**Parameters**: None (uses APP variable) -**Returns**: None -**Side Effects**: -- Clears screen -- Displays header content -- Gets terminal width for formatting -**Dependencies**: `get_header()`, `tput` command -**Environment Variables Used**: `APP` - -**Usage Example**: -```bash -export APP="plex" -header_info -# Displays Plex header information -``` - -### Utility Functions - -#### `ensure_tput()` -**Purpose**: Ensure tput command is available for terminal control -**Parameters**: None -**Returns**: None -**Side Effects**: -- Installs ncurses package if tput missing -- Works on Alpine and Debian-based systems -**Dependencies**: `apk` or `apt-get` package managers -**Environment Variables Used**: None - -**Usage Example**: -```bash -ensure_tput -# Installs ncurses if needed, continues if already available -``` - -#### `is_alpine()` -**Purpose**: Detect if running on Alpine Linux -**Parameters**: None -**Returns**: 0 if Alpine, 1 if not Alpine -**Side Effects**: None -**Dependencies**: None -**Environment Variables Used**: `var_os`, `PCT_OSTYPE` - -**Usage Example**: -```bash -if is_alpine; then - echo "Running on Alpine Linux" -else - echo "Not running on Alpine Linux" -fi -``` - -#### `is_verbose_mode()` -**Purpose**: Check if verbose mode is enabled -**Parameters**: None -**Returns**: 0 if verbose mode, 1 if not verbose -**Side Effects**: None -**Dependencies**: None -**Environment Variables Used**: `VERBOSE`, `var_verbose` - -**Usage Example**: -```bash -if is_verbose_mode; then - echo "Verbose mode enabled" -else - echo "Verbose mode disabled" -fi -``` - -#### `fatal()` -**Purpose**: Display fatal error and terminate script -**Parameters**: `$1` - Error message -**Returns**: None (terminates script) -**Side Effects**: -- Displays error message -- Sends INT signal to current process -**Dependencies**: `msg_error()` -**Environment Variables Used**: None - -**Usage Example**: -```bash -fatal "Critical error occurred" -# Script terminates after displaying error -``` - -### Spinner Functions - -#### `spinner()` -**Purpose**: Display animated spinner for progress indication -**Parameters**: None (uses SPINNER_MSG variable) -**Returns**: None (runs indefinitely) -**Side Effects**: -- Displays rotating spinner characters -- Uses terminal control sequences -**Dependencies**: `color_spinner()` -**Environment Variables Used**: `SPINNER_MSG` - -**Usage Example**: -```bash -SPINNER_MSG="Processing..." -spinner & -SPINNER_PID=$! -# Spinner runs in background -``` - -#### `clear_line()` -**Purpose**: Clear current terminal line -**Parameters**: None -**Returns**: None -**Side Effects**: Clears current line using terminal control -**Dependencies**: `tput` command -**Environment Variables Used**: None - -#### `stop_spinner()` -**Purpose**: Stop running spinner and cleanup -**Parameters**: None -**Returns**: None -**Side Effects**: -- Kills spinner process -- Removes PID file -- Resets terminal settings -- Unsets spinner variables -**Dependencies**: None -**Environment Variables Used**: `SPINNER_PID`, `SPINNER_MSG` - -**Usage Example**: -```bash -stop_spinner -# Stops spinner and cleans up -``` - -### Message Functions - -#### `msg_info()` -**Purpose**: Display informational message with spinner -**Parameters**: `$1` - Message text -**Returns**: None -**Side Effects**: -- Starts spinner if not in verbose mode -- Tracks shown messages to prevent duplicates -- Displays message with hourglass icon in verbose mode -**Dependencies**: `spinner()`, `is_verbose_mode()`, `is_alpine()` -**Environment Variables Used**: `MSG_INFO_SHOWN` - -**Usage Example**: -```bash -msg_info "Installing package..." -# Shows spinner with message -``` - -#### `msg_ok()` -**Purpose**: Display success message -**Parameters**: `$1` - Success message text -**Returns**: None -**Side Effects**: -- Stops spinner -- Displays green checkmark with message -- Removes message from shown tracking -**Dependencies**: `stop_spinner()` -**Environment Variables Used**: `MSG_INFO_SHOWN` - -**Usage Example**: -```bash -msg_ok "Package installed successfully" -# Shows green checkmark with message -``` - -#### `msg_error()` -**Purpose**: Display error message -**Parameters**: `$1` - Error message text -**Returns**: None -**Side Effects**: -- Stops spinner -- Displays red cross with message -**Dependencies**: `stop_spinner()` -**Environment Variables Used**: None - -**Usage Example**: -```bash -msg_error "Installation failed" -# Shows red cross with message -``` - -#### `msg_warn()` -**Purpose**: Display warning message -**Parameters**: `$1` - Warning message text -**Returns**: None -**Side Effects**: -- Stops spinner -- Displays yellow info icon with message -**Dependencies**: `stop_spinner()` -**Environment Variables Used**: None - -**Usage Example**: -```bash -msg_warn "This operation may take some time" -# Shows yellow info icon with message -``` - -#### `msg_custom()` -**Purpose**: Display custom message with specified symbol and color -**Parameters**: -- `$1` - Custom symbol (default: "[*]") -- `$2` - Color code (default: "\e[36m") -- `$3` - Message text -**Returns**: None -**Side Effects**: -- Stops spinner -- Displays custom formatted message -**Dependencies**: `stop_spinner()` -**Environment Variables Used**: None - -**Usage Example**: -```bash -msg_custom "⚡" "\e[33m" "Custom warning message" -# Shows custom symbol and color with message -``` - -#### `msg_debug()` -**Purpose**: Display debug message if debug mode enabled -**Parameters**: `$*` - Debug message text -**Returns**: None -**Side Effects**: -- Only displays if var_full_verbose is set -- Shows timestamp and debug prefix -**Dependencies**: None -**Environment Variables Used**: `var_full_verbose`, `var_verbose` - -**Usage Example**: -```bash -export var_full_verbose=1 -msg_debug "Debug information here" -# Shows debug message with timestamp -``` - -### System Management Functions - -#### `check_or_create_swap()` -**Purpose**: Check for active swap and optionally create swap file -**Parameters**: None -**Returns**: 0 if swap exists or created, 1 if skipped -**Side Effects**: -- Checks for active swap -- Prompts user to create swap if none found -- Creates swap file if user confirms -**Dependencies**: `swapon`, `dd`, `mkswap` commands -**Environment Variables Used**: None - -**Usage Example**: -```bash -if check_or_create_swap; then - echo "Swap is available" -else - echo "No swap available" -fi -``` - -## Function Call Hierarchy - -### Initialization Flow -``` -load_functions() -├── color() -├── formatting() -├── icons() -├── default_vars() -└── set_std_mode() -``` - -### Message System Flow -``` -msg_info() -├── is_verbose_mode() -├── is_alpine() -├── spinner() -└── color_spinner() - -msg_ok() -├── stop_spinner() -└── clear_line() - -msg_error() -└── stop_spinner() - -msg_warn() -└── stop_spinner() -``` - -### System Check Flow -``` -pve_check() -├── pveversion command -└── version parsing - -arch_check() -├── dpkg command -└── architecture check - -shell_check() -├── ps command -└── shell detection - -root_check() -├── id command -└── parent process check -``` - -### Silent Execution Flow -``` -silent() -├── Command execution -├── Output redirection -├── Error handling -├── error_handler.func loading -└── Log management -``` - -## Error Handling Patterns - -### System Check Errors -- All system check functions exit with appropriate error messages -- Clear indication of what's wrong and how to fix it -- Graceful exit with sleep delay for user to read message - -### Silent Execution Errors -- Commands executed via `silent()` capture output to log file -- On failure, displays error code explanation -- Shows last 10 lines of log output -- Provides command to view full log - -### Spinner Errors -- Spinner functions handle process cleanup on exit -- Trap handlers ensure spinners are stopped -- Terminal settings are restored on error - -## Environment Variable Dependencies - -### Required Variables -- `APP`: Application name for header display -- `APP_TYPE`: Application type (ct/vm) for header paths -- `VERBOSE`: Verbose mode setting - -### Optional Variables -- `var_os`: OS type for Alpine detection -- `PCT_OSTYPE`: Alternative OS type variable -- `var_verbose`: Alternative verbose setting -- `var_full_verbose`: Debug mode setting - -### Internal Variables -- `_CORE_FUNC_LOADED`: Prevents multiple loading -- `__FUNCTIONS_LOADED`: Prevents multiple function loading -- `SILENT_LOGFILE`: Silent execution log file path -- `SPINNER_PID`: Spinner process ID -- `SPINNER_MSG`: Spinner message text -- `MSG_INFO_SHOWN`: Tracks shown info messages diff --git a/docs/misc/core.func/CORE_INTEGRATION.md b/docs/misc/core.func/CORE_INTEGRATION.md deleted file mode 100644 index b203f1c73..000000000 --- a/docs/misc/core.func/CORE_INTEGRATION.md +++ /dev/null @@ -1,517 +0,0 @@ -# core.func Integration Guide - -## Overview - -This document describes how `core.func` integrates with other components in the Proxmox Community Scripts project, including dependencies, data flow, and API surface. - -## Dependencies - -### External Dependencies - -#### Required Commands -- **`pveversion`**: Proxmox VE version checking -- **`dpkg`**: Architecture detection -- **`ps`**: Process and shell detection -- **`id`**: User ID checking -- **`curl`**: Header file downloading -- **`swapon`**: Swap status checking -- **`dd`**: Swap file creation -- **`mkswap`**: Swap file formatting - -#### Optional Commands -- **`tput`**: Terminal control (installed if missing) -- **`apk`**: Alpine package manager -- **`apt-get`**: Debian package manager - -### Internal Dependencies - -#### error_handler.func -- **Purpose**: Provides error code explanations for silent execution -- **Usage**: Automatically loaded when `silent()` encounters errors -- **Integration**: Called via `explain_exit_code()` function -- **Data Flow**: Error code → explanation → user display - -## Integration Points - -### With build.func - -#### System Validation -```bash -# build.func uses core.func for system checks -source core.func -pve_check -arch_check -shell_check -root_check -``` - -#### User Interface -```bash -# build.func uses core.func for UI elements -msg_info "Creating container..." -msg_ok "Container created successfully" -msg_error "Container creation failed" -``` - -#### Silent Execution -```bash -# build.func uses core.func for command execution -silent pct create "$CTID" "$TEMPLATE" \ - --hostname "$HOSTNAME" \ - --memory "$MEMORY" \ - --cores "$CORES" -``` - -### With tools.func - -#### Utility Functions -```bash -# tools.func uses core.func utilities -source core.func - -# System checks -pve_check -root_check - -# UI elements -msg_info "Running maintenance tasks..." -msg_ok "Maintenance completed" -``` - -#### Error Handling -```bash -# tools.func uses core.func for error handling -if silent systemctl restart service; then - msg_ok "Service restarted" -else - msg_error "Service restart failed" -fi -``` - -### With api.func - -#### System Validation -```bash -# api.func uses core.func for system checks -source core.func -pve_check -root_check -``` - -#### API Operations -```bash -# api.func uses core.func for API calls -msg_info "Connecting to Proxmox API..." -if silent curl -k -H "Authorization: PVEAPIToken=$API_TOKEN" \ - "$API_URL/api2/json/nodes/$NODE/lxc"; then - msg_ok "API connection successful" -else - msg_error "API connection failed" -fi -``` - -### With error_handler.func - -#### Error Explanations -```bash -# error_handler.func provides explanations for core.func -explain_exit_code() { - local code="$1" - case "$code" in - 1) echo "General error" ;; - 2) echo "Misuse of shell builtins" ;; - 126) echo "Command invoked cannot execute" ;; - 127) echo "Command not found" ;; - 128) echo "Invalid argument to exit" ;; - *) echo "Unknown error code" ;; - esac -} -``` - -### With install.func - -#### Installation Process -```bash -# install.func uses core.func for installation -source core.func - -# System checks -pve_check -root_check - -# Installation steps -msg_info "Installing packages..." -silent apt-get update -silent apt-get install -y package - -msg_ok "Installation completed" -``` - -### With alpine-install.func - -#### Alpine-Specific Operations -```bash -# alpine-install.func uses core.func for Alpine operations -source core.func - -# Alpine detection -if is_alpine; then - msg_info "Detected Alpine Linux" - silent apk add --no-cache package -else - msg_info "Detected Debian-based system" - silent apt-get install -y package -fi -``` - -### With alpine-tools.func - -#### Alpine Utilities -```bash -# alpine-tools.func uses core.func for Alpine tools -source core.func - -# Alpine-specific operations -if is_alpine; then - msg_info "Running Alpine-specific operations..." - # Alpine tools logic - msg_ok "Alpine operations completed" -fi -``` - -### With passthrough.func - -#### Hardware Passthrough -```bash -# passthrough.func uses core.func for hardware operations -source core.func - -# System checks -pve_check -root_check - -# Hardware operations -msg_info "Configuring GPU passthrough..." -if silent lspci | grep -i nvidia; then - msg_ok "NVIDIA GPU detected" -else - msg_warn "No NVIDIA GPU found" -fi -``` - -### With vm-core.func - -#### VM Operations -```bash -# vm-core.func uses core.func for VM management -source core.func - -# System checks -pve_check -root_check - -# VM operations -msg_info "Creating virtual machine..." -silent qm create "$VMID" \ - --name "$VMNAME" \ - --memory "$MEMORY" \ - --cores "$CORES" - -msg_ok "Virtual machine created" -``` - -## Data Flow - -### Input Data - -#### Environment Variables -- **`APP`**: Application name for header display -- **`APP_TYPE`**: Application type (ct/vm) for header paths -- **`VERBOSE`**: Verbose mode setting -- **`var_os`**: OS type for Alpine detection -- **`PCT_OSTYPE`**: Alternative OS type variable -- **`var_verbose`**: Alternative verbose setting -- **`var_full_verbose`**: Debug mode setting - -#### Command Parameters -- **Function arguments**: Passed to individual functions -- **Command arguments**: Passed to `silent()` function -- **User input**: Collected via `read` commands - -### Processing Data - -#### System Information -- **Proxmox version**: Parsed from `pveversion` output -- **Architecture**: Retrieved from `dpkg --print-architecture` -- **Shell type**: Detected from process information -- **User ID**: Retrieved from `id -u` -- **SSH connection**: Detected from `SSH_CLIENT` environment - -#### UI State -- **Message tracking**: `MSG_INFO_SHOWN` associative array -- **Spinner state**: `SPINNER_PID` and `SPINNER_MSG` variables -- **Terminal state**: Cursor position and display mode - -#### Error Information -- **Exit codes**: Captured from command execution -- **Log output**: Redirected to temporary log files -- **Error explanations**: Retrieved from error_handler.func - -### Output Data - -#### User Interface -- **Colored messages**: ANSI color codes for terminal output -- **Icons**: Symbolic representations for different message types -- **Spinners**: Animated progress indicators -- **Formatted text**: Consistent message formatting - -#### System State -- **Exit codes**: Returned from functions -- **Log files**: Created for silent execution -- **Configuration**: Modified system settings -- **Process state**: Spinner processes and cleanup - -## API Surface - -### Public Functions - -#### System Validation -- **`pve_check()`**: Proxmox VE version validation -- **`arch_check()`**: Architecture validation -- **`shell_check()`**: Shell validation -- **`root_check()`**: Privilege validation -- **`ssh_check()`**: SSH connection warning - -#### User Interface -- **`msg_info()`**: Informational messages -- **`msg_ok()`**: Success messages -- **`msg_error()`**: Error messages -- **`msg_warn()`**: Warning messages -- **`msg_custom()`**: Custom messages -- **`msg_debug()`**: Debug messages - -#### Spinner Control -- **`spinner()`**: Start spinner animation -- **`stop_spinner()`**: Stop spinner and cleanup -- **`clear_line()`**: Clear current terminal line - -#### Silent Execution -- **`silent()`**: Execute commands with error handling - -#### Utility Functions -- **`is_alpine()`**: Alpine Linux detection -- **`is_verbose_mode()`**: Verbose mode detection -- **`fatal()`**: Fatal error handling -- **`ensure_tput()`**: Terminal control setup - -#### Header Management -- **`get_header()`**: Download application headers -- **`header_info()`**: Display header information - -#### System Management -- **`check_or_create_swap()`**: Swap file management - -### Internal Functions - -#### Initialization -- **`load_functions()`**: Function loader -- **`color()`**: Color setup -- **`formatting()`**: Formatting setup -- **`icons()`**: Icon setup -- **`default_vars()`**: Default variables -- **`set_std_mode()`**: Standard mode setup - -#### Color Management -- **`color_spinner()`**: Spinner colors - -### Global Variables - -#### Color Variables -- **`YW`**, **`YWB`**, **`BL`**, **`RD`**, **`BGN`**, **`GN`**, **`DGN`**, **`CL`**: Color codes -- **`CS_YW`**, **`CS_YWB`**, **`CS_CL`**: Spinner colors - -#### Formatting Variables -- **`BFR`**, **`BOLD`**, **`HOLD`**, **`TAB`**, **`TAB3`**: Formatting helpers - -#### Icon Variables -- **`CM`**, **`CROSS`**, **`INFO`**, **`OS`**, **`OSVERSION`**, etc.: Message icons - -#### Configuration Variables -- **`RETRY_NUM`**, **`RETRY_EVERY`**: Retry settings -- **`STD`**: Standard mode setting -- **`SILENT_LOGFILE`**: Log file path - -#### State Variables -- **`_CORE_FUNC_LOADED`**: Loading prevention -- **`__FUNCTIONS_LOADED`**: Function loading prevention -- **`SPINNER_PID`**, **`SPINNER_MSG`**: Spinner state -- **`MSG_INFO_SHOWN`**: Message tracking - -## Integration Patterns - -### Standard Integration Pattern - -```bash -#!/usr/bin/env bash -# Standard integration pattern - -# 1. Source core.func first -source core.func - -# 2. Run system checks -pve_check -arch_check -shell_check -root_check - -# 3. Set up error handling -trap 'stop_spinner' EXIT INT TERM - -# 4. Use UI functions -msg_info "Starting operation..." - -# 5. Use silent execution -silent command - -# 6. Show completion -msg_ok "Operation completed" -``` - -### Minimal Integration Pattern - -```bash -#!/usr/bin/env bash -# Minimal integration pattern - -source core.func -pve_check -root_check - -msg_info "Running operation..." -silent command -msg_ok "Operation completed" -``` - -### Advanced Integration Pattern - -```bash -#!/usr/bin/env bash -# Advanced integration pattern - -source core.func - -# System validation -pve_check -arch_check -shell_check -root_check -ssh_check - -# Error handling -trap 'stop_spinner' EXIT INT TERM - -# Verbose mode handling -if is_verbose_mode; then - msg_info "Verbose mode enabled" -fi - -# OS-specific operations -if is_alpine; then - msg_info "Alpine Linux detected" - # Alpine-specific logic -else - msg_info "Debian-based system detected" - # Debian-specific logic -fi - -# Operation execution -msg_info "Starting operation..." -if silent command; then - msg_ok "Operation succeeded" -else - msg_error "Operation failed" - exit 1 -fi -``` - -## Error Handling Integration - -### Silent Execution Error Flow - -``` -silent() command -├── Execute command -├── Capture output to log -├── Check exit code -├── If error: -│ ├── Load error_handler.func -│ ├── Get error explanation -│ ├── Display error details -│ ├── Show log excerpt -│ └── Exit with error code -└── If success: Continue -``` - -### System Check Error Flow - -``` -System Check Function -├── Check system state -├── If valid: Return 0 -└── If invalid: - ├── Display error message - ├── Show fix instructions - ├── Sleep for user to read - └── Exit with error code -``` - -## Performance Considerations - -### Loading Optimization -- **Single Loading**: `_CORE_FUNC_LOADED` prevents multiple loading -- **Function Loading**: `__FUNCTIONS_LOADED` prevents multiple function loading -- **Lazy Loading**: Functions loaded only when needed - -### Memory Usage -- **Minimal Footprint**: Core functions use minimal memory -- **Variable Reuse**: Global variables reused across functions -- **Cleanup**: Spinner processes cleaned up on exit - -### Execution Speed -- **Fast Checks**: System checks are optimized for speed -- **Efficient Spinners**: Spinner animation uses minimal CPU -- **Quick Messages**: Message functions optimized for performance - -## Security Considerations - -### Privilege Escalation -- **Root Check**: Ensures script runs with sufficient privileges -- **Shell Check**: Validates shell environment -- **Process Validation**: Checks parent process for sudo usage - -### Input Validation -- **Parameter Checking**: Functions validate input parameters -- **Error Handling**: Proper error handling prevents crashes -- **Safe Execution**: Silent execution with proper error handling - -### System Protection -- **Version Validation**: Ensures compatible Proxmox version -- **Architecture Check**: Prevents execution on unsupported systems -- **SSH Warning**: Warns about external SSH usage - -## Future Integration Considerations - -### Extensibility -- **Function Groups**: Easy to add new function groups -- **Message Types**: Easy to add new message types -- **System Checks**: Easy to add new system checks - -### Compatibility -- **Version Support**: Easy to add new Proxmox versions -- **OS Support**: Easy to add new operating systems -- **Architecture Support**: Easy to add new architectures - -### Performance -- **Optimization**: Functions can be optimized for better performance -- **Caching**: Results can be cached for repeated operations -- **Parallelization**: Operations can be parallelized where appropriate diff --git a/docs/misc/core.func/CORE_USAGE_EXAMPLES.md b/docs/misc/core.func/CORE_USAGE_EXAMPLES.md deleted file mode 100644 index c702bd2ed..000000000 --- a/docs/misc/core.func/CORE_USAGE_EXAMPLES.md +++ /dev/null @@ -1,728 +0,0 @@ -# core.func Usage Examples - -## Overview - -This document provides practical usage examples for `core.func` functions, covering common scenarios, integration patterns, and best practices. - -## Basic Script Setup - -### Standard Script Initialization - -```bash -#!/usr/bin/env bash -# Standard script setup using core.func - -# Source core functions -source core.func - -# Run system checks -pve_check -arch_check -shell_check -root_check - -# Optional: Check SSH connection -ssh_check - -# Set up error handling -trap 'stop_spinner' EXIT INT TERM - -# Your script logic here -msg_info "Starting script execution" -# ... script code ... -msg_ok "Script completed successfully" -``` - -### Minimal Script Setup - -```bash -#!/usr/bin/env bash -# Minimal setup for simple scripts - -source core.func - -# Basic checks only -pve_check -root_check - -# Simple execution -msg_info "Running operation" -# ... your code ... -msg_ok "Operation completed" -``` - -## Message Display Examples - -### Progress Indication - -```bash -#!/usr/bin/env bash -source core.func - -# Show progress with spinner -msg_info "Downloading package..." -sleep 2 -msg_ok "Download completed" - -msg_info "Installing package..." -sleep 3 -msg_ok "Installation completed" - -msg_info "Configuring service..." -sleep 1 -msg_ok "Configuration completed" -``` - -### Error Handling - -```bash -#!/usr/bin/env bash -source core.func - -# Function with error handling -install_package() { - local package="$1" - - msg_info "Installing $package..." - - if silent apt-get install -y "$package"; then - msg_ok "$package installed successfully" - return 0 - else - msg_error "Failed to install $package" - return 1 - fi -} - -# Usage -if install_package "nginx"; then - msg_ok "Nginx installation completed" -else - msg_error "Nginx installation failed" - exit 1 -fi -``` - -### Warning Messages - -```bash -#!/usr/bin/env bash -source core.func - -# Show warnings for potentially dangerous operations -msg_warn "This will modify system configuration" -read -p "Continue? [y/N]: " confirm - -if [[ "$confirm" =~ ^[yY]$ ]]; then - msg_info "Proceeding with modification..." - # ... dangerous operation ... - msg_ok "Modification completed" -else - msg_info "Operation cancelled" -fi -``` - -### Custom Messages - -```bash -#!/usr/bin/env bash -source core.func - -# Custom message with specific icon and color -msg_custom "🚀" "\e[32m" "Launching application" -msg_custom "⚡" "\e[33m" "High performance mode enabled" -msg_custom "🔒" "\e[31m" "Security mode activated" -``` - -### Debug Messages - -```bash -#!/usr/bin/env bash -source core.func - -# Enable debug mode -export var_full_verbose=1 - -# Debug messages -msg_debug "Variable value: $some_variable" -msg_debug "Function called: $FUNCNAME" -msg_debug "Current directory: $(pwd)" -``` - -## Silent Execution Examples - -### Package Management - -```bash -#!/usr/bin/env bash -source core.func - -# Update package lists -msg_info "Updating package lists..." -silent apt-get update - -# Install packages -msg_info "Installing required packages..." -silent apt-get install -y curl wget git - -# Upgrade packages -msg_info "Upgrading packages..." -silent apt-get upgrade -y - -msg_ok "Package management completed" -``` - -### File Operations - -```bash -#!/usr/bin/env bash -source core.func - -# Create directories -msg_info "Creating directory structure..." -silent mkdir -p /opt/myapp/{config,logs,data} - -# Set permissions -msg_info "Setting permissions..." -silent chmod 755 /opt/myapp -silent chmod 644 /opt/myapp/config/* - -# Copy files -msg_info "Copying configuration files..." -silent cp config/* /opt/myapp/config/ - -msg_ok "File operations completed" -``` - -### Service Management - -```bash -#!/usr/bin/env bash -source core.func - -# Start service -msg_info "Starting service..." -silent systemctl start myservice - -# Enable service -msg_info "Enabling service..." -silent systemctl enable myservice - -# Check service status -msg_info "Checking service status..." -if silent systemctl is-active --quiet myservice; then - msg_ok "Service is running" -else - msg_error "Service failed to start" -fi -``` - -### Network Operations - -```bash -#!/usr/bin/env bash -source core.func - -# Test network connectivity -msg_info "Testing network connectivity..." -if silent ping -c 1 8.8.8.8; then - msg_ok "Network connectivity confirmed" -else - msg_error "Network connectivity failed" -fi - -# Download files -msg_info "Downloading configuration..." -silent curl -fsSL https://example.com/config -o /tmp/config - -# Extract archives -msg_info "Extracting archive..." -silent tar -xzf /tmp/archive.tar.gz -C /opt/ -``` - -## System Check Examples - -### Comprehensive System Validation - -```bash -#!/usr/bin/env bash -source core.func - -# Complete system validation -validate_system() { - msg_info "Validating system requirements..." - - # Check Proxmox version - if pve_check; then - msg_ok "Proxmox VE version is supported" - fi - - # Check architecture - if arch_check; then - msg_ok "System architecture is supported" - fi - - # Check shell - if shell_check; then - msg_ok "Shell environment is correct" - fi - - # Check privileges - if root_check; then - msg_ok "Running with sufficient privileges" - fi - - # Check SSH connection - ssh_check - - msg_ok "System validation completed" -} - -# Run validation -validate_system -``` - -### Conditional System Checks - -```bash -#!/usr/bin/env bash -source core.func - -# Check if running in container -if [[ -f /.dockerenv ]] || [[ -f /run/.containerenv ]]; then - msg_warn "Running inside container" - # Skip some checks -else - # Full system checks - pve_check - arch_check -fi - -# Always check shell and privileges -shell_check -root_check -``` - -## Header Management Examples - -### Application Header Display - -```bash -#!/usr/bin/env bash -source core.func - -# Set application information -export APP="plex" -export APP_TYPE="ct" - -# Display header -header_info - -# Continue with application setup -msg_info "Setting up Plex Media Server..." -``` - -### Custom Header Handling - -```bash -#!/usr/bin/env bash -source core.func - -# Get header content -export APP="nextcloud" -export APP_TYPE="ct" - -header_content=$(get_header) -if [[ -n "$header_content" ]]; then - echo "Header found:" - echo "$header_content" -else - msg_warn "No header found for $APP" -fi -``` - -## Swap Management Examples - -### Interactive Swap Creation - -```bash -#!/usr/bin/env bash -source core.func - -# Check and create swap -if check_or_create_swap; then - msg_ok "Swap is available" -else - msg_warn "No swap available - continuing without swap" -fi -``` - -### Automated Swap Check - -```bash -#!/usr/bin/env bash -source core.func - -# Check swap without prompting -check_swap_quiet() { - if swapon --noheadings --show | grep -q 'swap'; then - msg_ok "Swap is active" - return 0 - else - msg_warn "No active swap detected" - return 1 - fi -} - -if check_swap_quiet; then - msg_info "System has sufficient swap" -else - msg_warn "Consider adding swap for better performance" -fi -``` - -## Spinner Usage Examples - -### Long-Running Operations - -```bash -#!/usr/bin/env bash -source core.func - -# Long-running operation with spinner -long_operation() { - msg_info "Processing large dataset..." - - # Simulate long operation - for i in {1..100}; do - sleep 0.1 - # Update spinner message periodically - if (( i % 20 == 0 )); then - SPINNER_MSG="Processing... $i%" - fi - done - - msg_ok "Dataset processing completed" -} - -long_operation -``` - -### Background Operations - -```bash -#!/usr/bin/env bash -source core.func - -# Background operation with spinner -background_operation() { - msg_info "Starting background process..." - - # Start spinner - SPINNER_MSG="Processing in background..." - spinner & - SPINNER_PID=$! - - # Do background work - sleep 5 - - # Stop spinner - stop_spinner - msg_ok "Background process completed" -} - -background_operation -``` - -## Integration Examples - -### With build.func - -```bash -#!/usr/bin/env bash -# Integration with build.func - -source core.func -source build.func - -# Use core functions for system validation -pve_check -arch_check -root_check - -# Use build.func for container creation -export APP="plex" -export CTID="100" -# ... container creation ... -``` - -### With tools.func - -```bash -#!/usr/bin/env bash -# Integration with tools.func - -source core.func -source tools.func - -# Use core functions for UI -msg_info "Starting maintenance tasks..." - -# Use tools.func for maintenance -update_system -cleanup_logs -optimize_storage - -msg_ok "Maintenance completed" -``` - -### With error_handler.func - -```bash -#!/usr/bin/env bash -# Integration with error_handler.func - -source core.func -source error_handler.func - -# Use core functions for execution -msg_info "Running operation..." - -# Silent execution will use error_handler for explanations -silent apt-get install -y package - -msg_ok "Operation completed" -``` - -## Best Practices Examples - -### Error Handling Pattern - -```bash -#!/usr/bin/env bash -source core.func - -# Robust error handling -run_with_error_handling() { - local operation="$1" - local description="$2" - - msg_info "$description" - - if silent "$operation"; then - msg_ok "$description completed successfully" - return 0 - else - msg_error "$description failed" - return 1 - fi -} - -# Usage -run_with_error_handling "apt-get update" "Package list update" -run_with_error_handling "apt-get install -y nginx" "Nginx installation" -``` - -### Verbose Mode Handling - -```bash -#!/usr/bin/env bash -source core.func - -# Handle verbose mode -if is_verbose_mode; then - msg_info "Verbose mode enabled - showing detailed output" - # Show more information -else - msg_info "Normal mode - showing minimal output" - # Show less information -fi -``` - -### Alpine Linux Detection - -```bash -#!/usr/bin/env bash -source core.func - -# Handle different OS types -if is_alpine; then - msg_info "Detected Alpine Linux" - # Use Alpine-specific commands - silent apk add --no-cache package -else - msg_info "Detected Debian-based system" - # Use Debian-specific commands - silent apt-get install -y package -fi -``` - -### Conditional Execution - -```bash -#!/usr/bin/env bash -source core.func - -# Conditional execution based on system state -if [[ -f /etc/nginx/nginx.conf ]]; then - msg_warn "Nginx configuration already exists" - read -p "Overwrite? [y/N]: " overwrite - if [[ "$overwrite" =~ ^[yY]$ ]]; then - msg_info "Overwriting configuration..." - # ... overwrite logic ... - else - msg_info "Skipping configuration" - fi -else - msg_info "Creating new Nginx configuration..." - # ... create logic ... -fi -``` - -## Advanced Usage Examples - -### Custom Spinner Messages - -```bash -#!/usr/bin/env bash -source core.func - -# Custom spinner with progress -download_with_progress() { - local url="$1" - local file="$2" - - msg_info "Starting download..." - - # Start spinner - SPINNER_MSG="Downloading..." - spinner & - SPINNER_PID=$! - - # Download with progress - curl -L "$url" -o "$file" --progress-bar - - # Stop spinner - stop_spinner - msg_ok "Download completed" -} - -download_with_progress "https://example.com/file.tar.gz" "/tmp/file.tar.gz" -``` - -### Message Deduplication - -```bash -#!/usr/bin/env bash -source core.func - -# Messages are automatically deduplicated -for i in {1..5}; do - msg_info "Processing item $i" - # This message will only show once -done - -# Different messages will show separately -msg_info "Starting phase 1" -msg_info "Starting phase 2" -msg_info "Starting phase 3" -``` - -### Terminal Control - -```bash -#!/usr/bin/env bash -source core.func - -# Ensure terminal control is available -ensure_tput - -# Use terminal control -clear_line -echo "This line will be cleared" -clear_line -echo "New content" -``` - -## Troubleshooting Examples - -### Debug Mode - -```bash -#!/usr/bin/env bash -source core.func - -# Enable debug mode -export var_full_verbose=1 -export VERBOSE="yes" - -# Debug information -msg_debug "Script started" -msg_debug "Current user: $(whoami)" -msg_debug "Current directory: $(pwd)" -msg_debug "Environment variables: $(env | grep -E '^(APP|CTID|VERBOSE)')" -``` - -### Silent Execution Debugging - -```bash -#!/usr/bin/env bash -source core.func - -# Debug silent execution -debug_silent() { - local cmd="$1" - local log_file="/tmp/debug.$$.log" - - echo "Command: $cmd" > "$log_file" - echo "Timestamp: $(date)" >> "$log_file" - echo "Working directory: $(pwd)" >> "$log_file" - echo "Environment:" >> "$log_file" - env >> "$log_file" - echo "--- Command Output ---" >> "$log_file" - - if silent "$cmd"; then - msg_ok "Command succeeded" - else - msg_error "Command failed - check $log_file for details" - fi -} - -debug_silent "apt-get update" -``` - -### Error Recovery - -```bash -#!/usr/bin/env bash -source core.func - -# Error recovery pattern -retry_operation() { - local max_attempts=3 - local attempt=1 - - while [[ $attempt -le $max_attempts ]]; do - msg_info "Attempt $attempt of $max_attempts" - - if silent "$@"; then - msg_ok "Operation succeeded on attempt $attempt" - return 0 - else - msg_warn "Attempt $attempt failed" - ((attempt++)) - - if [[ $attempt -le $max_attempts ]]; then - msg_info "Retrying in 5 seconds..." - sleep 5 - fi - fi - done - - msg_error "Operation failed after $max_attempts attempts" - return 1 -} - -# Usage -retry_operation "apt-get install -y package" -``` diff --git a/docs/misc/core.func/README.md b/docs/misc/core.func/README.md deleted file mode 100644 index 52c62af6e..000000000 --- a/docs/misc/core.func/README.md +++ /dev/null @@ -1,181 +0,0 @@ -# core.func Documentation - -## Overview - -The `core.func` file provides fundamental utility functions and system checks that form the foundation for all other scripts in the Proxmox Community Scripts project. It handles basic system operations, user interface elements, validation, and core infrastructure. - -## Purpose and Use Cases - -- **System Validation**: Checks for Proxmox VE compatibility, architecture, shell requirements -- **User Interface**: Provides colored output, icons, spinners, and formatted messages -- **Core Utilities**: Basic functions used across all scripts -- **Error Handling**: Silent execution with detailed error reporting -- **System Information**: OS detection, verbose mode handling, swap management - -## Quick Reference - -### Key Function Groups -- **System Checks**: `pve_check()`, `arch_check()`, `shell_check()`, `root_check()` -- **User Interface**: `msg_info()`, `msg_ok()`, `msg_error()`, `msg_warn()`, `spinner()` -- **Core Utilities**: `silent()`, `is_alpine()`, `is_verbose_mode()`, `get_header()` -- **System Management**: `check_or_create_swap()`, `ensure_tput()` - -### Dependencies -- **External**: `curl` for downloading headers, `tput` for terminal control -- **Internal**: `error_handler.func` for error explanations - -### Integration Points -- Used by: All other `.func` files and installation scripts -- Uses: `error_handler.func` for error explanations -- Provides: Core utilities for `build.func`, `tools.func`, `api.func` - -## Documentation Files - -### 📊 [CORE_FLOWCHART.md](./CORE_FLOWCHART.md) -Visual execution flows showing how core functions interact and the system validation process. - -### 📚 [CORE_FUNCTIONS_REFERENCE.md](./CORE_FUNCTIONS_REFERENCE.md) -Complete alphabetical reference of all functions with parameters, dependencies, and usage details. - -### 💡 [CORE_USAGE_EXAMPLES.md](./CORE_USAGE_EXAMPLES.md) -Practical examples showing how to use core functions in scripts and common patterns. - -### 🔗 [CORE_INTEGRATION.md](./CORE_INTEGRATION.md) -How core.func integrates with other components and provides foundational services. - -## Key Features - -### System Validation -- **Proxmox VE Version Check**: Supports PVE 8.0-8.9 and 9.0 -- **Architecture Check**: Ensures AMD64 architecture (excludes PiMox) -- **Shell Check**: Validates Bash shell usage -- **Root Check**: Ensures root privileges -- **SSH Check**: Warns about external SSH usage - -### User Interface -- **Colored Output**: ANSI color codes for styled terminal output -- **Icons**: Symbolic icons for different message types -- **Spinners**: Animated progress indicators -- **Formatted Messages**: Consistent message formatting across scripts - -### Core Utilities -- **Silent Execution**: Execute commands with detailed error reporting -- **OS Detection**: Alpine Linux detection -- **Verbose Mode**: Handle verbose output settings -- **Header Management**: Download and display application headers -- **Swap Management**: Check and create swap files - -## Common Usage Patterns - -### Basic Script Setup -```bash -# Source core functions -source core.func - -# Run system checks -pve_check -arch_check -shell_check -root_check -``` - -### Message Display -```bash -# Show progress -msg_info "Installing package..." - -# Show success -msg_ok "Package installed successfully" - -# Show error -msg_error "Installation failed" - -# Show warning -msg_warn "This operation may take some time" -``` - -### Silent Command Execution -```bash -# Execute command silently with error handling -silent apt-get update -silent apt-get install -y package-name -``` - -## Environment Variables - -### Core Variables -- `VERBOSE`: Enable verbose output mode -- `SILENT_LOGFILE`: Path to silent execution log file -- `APP`: Application name for header display -- `APP_TYPE`: Application type (ct/vm) for header paths - -### Internal Variables -- `_CORE_FUNC_LOADED`: Prevents multiple loading -- `__FUNCTIONS_LOADED`: Prevents multiple function loading -- `RETRY_NUM`: Number of retry attempts (default: 10) -- `RETRY_EVERY`: Seconds between retries (default: 3) - -## Error Handling - -### Silent Execution Errors -- Commands executed via `silent()` capture output to log file -- On failure, displays error code explanation -- Shows last 10 lines of log output -- Provides command to view full log - -### System Check Failures -- Each system check function exits with appropriate error message -- Clear indication of what's wrong and how to fix it -- Graceful exit with sleep delay for user to read message - -## Best Practices - -### Script Initialization -1. Source `core.func` first -2. Run system checks early -3. Set up error handling -4. Use appropriate message functions - -### Message Usage -1. Use `msg_info()` for progress updates -2. Use `msg_ok()` for successful completions -3. Use `msg_error()` for failures -4. Use `msg_warn()` for warnings - -### Silent Execution -1. Use `silent()` for commands that might fail -2. Check return codes after silent execution -3. Provide meaningful error messages - -## Troubleshooting - -### Common Issues -1. **Proxmox Version**: Ensure running supported PVE version -2. **Architecture**: Script only works on AMD64 systems -3. **Shell**: Must use Bash shell -4. **Permissions**: Must run as root -5. **Network**: SSH warnings for external connections - -### Debug Mode -Enable verbose output for debugging: -```bash -export VERBOSE="yes" -source core.func -``` - -### Log Files -Check silent execution logs: -```bash -cat /tmp/silent.$$.log -``` - -## Related Documentation - -- [build.func](../build.func/) - Main container creation script -- [error_handler.func](../error_handler.func/) - Error handling utilities -- [tools.func](../tools.func/) - Extended utility functions -- [api.func](../api.func/) - Proxmox API interactions - ---- - -*This documentation covers the core.func file which provides fundamental utilities for all Proxmox Community Scripts.* diff --git a/docs/misc/error_handler.func/ERROR_HANDLER_FLOWCHART.md b/docs/misc/error_handler.func/ERROR_HANDLER_FLOWCHART.md deleted file mode 100644 index 984596d7f..000000000 --- a/docs/misc/error_handler.func/ERROR_HANDLER_FLOWCHART.md +++ /dev/null @@ -1,347 +0,0 @@ -# error_handler.func Execution Flowchart - -## Main Error Handling Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Error Handler Initialization │ -│ Entry point when error_handler.func is sourced by other scripts │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ CATCH_ERRORS() │ -│ Initialize error handling traps and strict mode │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Trap Setup Sequence │ -│ │ -│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────────────┐ │ -│ │ Set Strict │ │ Set Error │ │ Set Signal │ │ -│ │ Mode │ │ Trap │ │ Traps │ │ -│ │ │ │ │ │ │ │ -│ │ • -Ee │ │ • ERR trap │ │ • EXIT trap │ │ -│ │ • -o pipefail │ │ • error_handler │ │ • INT trap │ │ -│ │ • -u (if │ │ function │ │ • TERM trap │ │ -│ │ STRICT_UNSET) │ │ │ │ │ │ -│ └─────────────────┘ └─────────────────┘ └─────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Error Handler Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ ERROR_HANDLER() Flow │ -│ Main error handler triggered by ERR trap or manual call │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Error Detection │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Error Information Collection │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Get Exit │ │ Get Command │ │ Get Line │ │ │ -│ │ │ Code │ │ Information │ │ Number │ │ │ -│ │ │ │ │ │ │ │ │ │ -│ │ │ • From $? or │ │ • From │ │ • From │ │ │ -│ │ │ parameter │ │ BASH_COMMAND │ │ BASH_LINENO[0] │ │ │ -│ │ │ • Store in │ │ • Clean $STD │ │ • Default to │ │ │ -│ │ │ exit_code │ │ references │ │ "unknown" │ │ │ -│ │ │ │ │ • Store in │ │ • Store in │ │ │ -│ │ │ │ │ command │ │ line_number │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Success Check │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Exit Code Validation │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check Exit │ │ Success │ │ Error │ │ -│ │ │ Code │ │ Path │ │ Path │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • If exit_code │ │ • Return 0 │ │ • Continue to │ │ -│ │ │ == 0 │ │ • No error │ │ error handling │ │ -│ │ │ • Success │ │ processing │ │ • Process error │ │ -│ │ │ • No error │ │ │ │ information │ │ -│ │ │ handling │ │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Error Processing │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Error Explanation │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Get Error │ │ Display Error │ │ Log Error │ │ │ -│ │ │ Explanation │ │ Information │ │ Information │ │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • Call │ │ • Show error │ │ • Write to debug │ │ -│ │ │ explain_exit_ │ │ message │ │ log if enabled │ │ -│ │ │ code() │ │ • Show line │ │ • Include │ │ -│ │ │ • Get human- │ │ number │ │ timestamp │ │ -│ │ │ readable │ │ • Show command │ │ • Include exit │ │ -│ │ │ message │ │ • Show exit │ │ code │ │ -│ │ │ │ │ code │ │ • Include command │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Silent Log Integration │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Silent Log Display │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check Silent │ │ Display Log │ │ Exit with │ │ -│ │ │ Log File │ │ Content │ │ Error Code │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • Check if │ │ • Show last 20 │ │ • Exit with │ │ -│ │ │ SILENT_ │ │ lines │ │ original exit │ │ -│ │ │ LOGFILE set │ │ • Show file │ │ code │ │ -│ │ │ • Check if │ │ path │ │ • Terminate script │ │ -│ │ │ file exists │ │ • Format │ │ execution │ │ -│ │ │ • Check if │ │ output │ │ │ │ -│ │ │ file has │ │ │ │ │ │ -│ │ │ content │ │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Signal Handling Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Signal Handler Flow │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Signal Detection │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ SIGINT │ │ SIGTERM │ │ EXIT │ │ │ -│ │ │ (Ctrl+C) │ │ (Termination) │ │ (Script End) │ │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • User │ │ • System │ │ • Normal script │ │ -│ │ │ interruption │ │ termination │ │ completion │ │ -│ │ │ • Graceful │ │ • Graceful │ │ • Error exit │ │ -│ │ │ handling │ │ handling │ │ • Signal exit │ │ -│ │ │ • Exit code │ │ • Exit code │ │ • Cleanup │ │ -│ │ │ 130 │ │ 143 │ │ operations │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ ON_INTERRUPT() Flow │ -│ Handles SIGINT (Ctrl+C) signals │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Interrupt Processing │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ User Interruption Handling │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Display │ │ Cleanup │ │ Exit with │ │ │ -│ │ │ Message │ │ Operations │ │ Code 130 │ │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • Show │ │ • Stop │ │ • Exit with │ │ -│ │ │ interruption │ │ processes │ │ SIGINT code │ │ -│ │ │ message │ │ • Clean up │ │ • Terminate script │ │ -│ │ │ • Use red │ │ temporary │ │ execution │ │ -│ │ │ color │ │ files │ │ │ │ -│ │ │ • Clear │ │ • Remove lock │ │ │ │ -│ │ │ terminal │ │ files │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Exit Handler Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ ON_EXIT() Flow │ -│ Handles script exit cleanup │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Exit Cleanup │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Cleanup Operations │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Lock File │ │ Temporary │ │ Exit with │ │ │ -│ │ │ Cleanup │ │ File │ │ Original Code │ │ │ -│ │ │ │ │ Cleanup │ │ │ │ -│ │ │ • Check if │ │ • Remove │ │ • Exit with │ │ -│ │ │ lockfile │ │ temporary │ │ original exit │ │ -│ │ │ variable set │ │ files │ │ code │ │ -│ │ │ • Check if │ │ • Clean up │ │ • Preserve exit │ │ -│ │ │ lockfile │ │ process │ │ status │ │ -│ │ │ exists │ │ state │ │ • Terminate │ │ -│ │ │ • Remove │ │ │ │ execution │ │ -│ │ │ lockfile │ │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Error Code Explanation Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ EXPLAIN_EXIT_CODE() Flow │ -│ Converts numeric exit codes to human-readable explanations │ -└─────────────────────┬───────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Error Code Classification │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Error Code Categories │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Generic/ │ │ Package │ │ Node.js │ │ │ -│ │ │ Shell │ │ Manager │ │ Errors │ │ │ -│ │ │ Errors │ │ Errors │ │ │ │ -│ │ │ │ │ │ │ • 243: Out of │ │ -│ │ │ • 1: General │ │ • 100: APT │ │ memory │ │ -│ │ │ error │ │ package │ │ • 245: Invalid │ │ -│ │ │ • 2: Shell │ │ error │ │ option │ │ -│ │ │ builtin │ │ • 101: APT │ │ • 246: Parse │ │ -│ │ │ misuse │ │ config error │ │ error │ │ -│ │ │ • 126: Cannot │ │ • 255: DPKG │ │ • 247: Fatal │ │ -│ │ │ execute │ │ fatal error │ │ error │ │ -│ │ │ • 127: Command │ │ │ │ • 248: Addon │ │ -│ │ │ not found │ │ │ │ failure │ │ -│ │ │ • 128: Invalid │ │ │ │ • 249: Inspector │ │ -│ │ │ exit │ │ │ │ error │ │ -│ │ │ • 130: SIGINT │ │ │ │ • 254: Unknown │ │ -│ │ │ • 137: SIGKILL │ │ │ │ fatal error │ │ -│ │ │ • 139: Segfault │ │ │ │ │ │ -│ │ │ • 143: SIGTERM │ │ │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Python │ │ Database │ │ Proxmox │ │ │ -│ │ │ Errors │ │ Errors │ │ Custom │ │ │ -│ │ │ │ │ │ │ Errors │ │ -│ │ │ • 210: Virtual │ │ • PostgreSQL: │ │ • 200: Lock file │ │ -│ │ │ env missing │ │ 231-234 │ │ failed │ │ -│ │ │ • 211: Dep │ │ • MySQL: 241- │ │ • 203: Missing │ │ -│ │ │ resolution │ │ 244 │ │ CTID │ │ -│ │ │ • 212: Install │ │ • MongoDB: 251- │ │ • 204: Missing │ │ -│ │ │ aborted │ │ 254 │ │ PCT_OSTYPE │ │ -│ │ │ │ │ │ │ • 205: Invalid │ │ -│ │ │ │ │ │ │ CTID │ │ -│ │ │ │ │ │ │ • 209: Container │ │ -│ │ │ │ │ │ │ creation failed │ │ -│ │ │ │ │ │ │ • 210: Cluster │ │ -│ │ │ │ │ │ │ not quorate │ │ -│ │ │ │ │ │ │ • 214: No storage │ │ -│ │ │ │ │ │ │ space │ │ -│ │ │ │ │ │ │ • 215: CTID not │ │ -│ │ │ │ │ │ │ listed │ │ -│ │ │ │ │ │ │ • 216: RootFS │ │ -│ │ │ │ │ │ │ missing │ │ -│ │ │ │ │ │ │ • 217: Storage │ │ -│ │ │ │ │ │ │ not supported │ │ -│ │ │ │ │ │ │ • 220: Template │ │ -│ │ │ │ │ │ │ path error │ │ -│ │ │ │ │ │ │ • 222: Template │ │ -│ │ │ │ │ │ │ download failed │ │ -│ │ │ │ │ │ │ • 223: Template │ │ -│ │ │ │ │ │ │ not available │ │ -│ │ │ │ │ │ │ • 231: LXC stack │ │ -│ │ │ │ │ │ │ upgrade failed │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Default Case │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Unknown Error Handling │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check for │ │ Return │ │ Log Unknown │ │ │ -│ │ │ Unknown │ │ Generic │ │ Error │ │ │ -│ │ │ Code │ │ Message │ │ │ │ -│ │ │ │ │ │ │ • Log to debug │ │ -│ │ │ • If no match │ │ • "Unknown │ │ file if enabled │ │ -│ │ │ found │ │ error" │ │ • Include error │ │ -│ │ │ • Use default │ │ • Return to │ │ code │ │ -│ │ │ case │ │ caller │ │ • Include │ │ -│ │ │ │ │ │ │ timestamp │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Debug Logging Flow - -``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ Debug Log Integration │ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────────┐ │ -│ │ Debug Log Writing │ │ -│ │ │ │ -│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────────┐ │ │ -│ │ │ Check Debug │ │ Write Error │ │ Format Log │ │ │ -│ │ │ Log File │ │ Information │ │ Entry │ │ │ -│ │ │ │ │ │ │ │ │ -│ │ │ • Check if │ │ • Timestamp │ │ • Error separator │ │ -│ │ │ DEBUG_LOGFILE │ │ • Exit code │ │ • Structured │ │ -│ │ │ set │ │ • Explanation │ │ format │ │ -│ │ │ • Check if │ │ • Line number │ │ • Easy to parse │ │ -│ │ │ file exists │ │ • Command │ │ • Easy to read │ │ -│ │ │ • Check if │ │ • Append to │ │ │ │ -│ │ │ file writable │ │ file │ │ │ │ -│ │ └─────────────────┘ └─────────────────┘ └─────────────────────┘ │ │ -│ └─────────────────────────────────────────────────────────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ -``` - -## Integration Points - -### With core.func -- **Silent Execution**: Provides error explanations for silent() function -- **Color Variables**: Uses color variables for error display -- **Log Integration**: Integrates with SILENT_LOGFILE - -### With Other Scripts -- **Error Traps**: Sets up ERR trap for automatic error handling -- **Signal Traps**: Handles SIGINT, SIGTERM, and EXIT signals -- **Cleanup**: Provides cleanup on script exit - -### External Dependencies -- **None**: Pure Bash implementation -- **Color Support**: Requires color variables from core.func -- **Log Files**: Uses standard file operations diff --git a/docs/misc/error_handler.func/ERROR_HANDLER_FUNCTIONS_REFERENCE.md b/docs/misc/error_handler.func/ERROR_HANDLER_FUNCTIONS_REFERENCE.md deleted file mode 100644 index 2270ffb76..000000000 --- a/docs/misc/error_handler.func/ERROR_HANDLER_FUNCTIONS_REFERENCE.md +++ /dev/null @@ -1,424 +0,0 @@ -# error_handler.func Functions Reference - -## Overview - -This document provides a comprehensive alphabetical reference of all functions in `error_handler.func`, including parameters, dependencies, usage examples, and error handling. - -## Function Categories - -### Error Explanation Functions - -#### `explain_exit_code()` -**Purpose**: Convert numeric exit codes to human-readable explanations -**Parameters**: -- `$1` - Exit code to explain -**Returns**: Human-readable error explanation string -**Side Effects**: None -**Dependencies**: None -**Environment Variables Used**: None - -**Supported Exit Codes**: -- **Generic/Shell**: 1, 2, 126, 127, 128, 130, 137, 139, 143 -- **Package Manager**: 100, 101, 255 -- **Node.js**: 243, 245, 246, 247, 248, 249, 254 -- **Python**: 210, 211, 212 -- **PostgreSQL**: 231, 232, 233, 234 -- **MySQL/MariaDB**: 241, 242, 243, 244 -- **MongoDB**: 251, 252, 253, 254 -- **Proxmox Custom**: 200, 203, 204, 205, 209, 210, 214, 215, 216, 217, 220, 222, 223, 231 - -**Usage Example**: -```bash -explanation=$(explain_exit_code 127) -echo "Error 127: $explanation" -# Output: Error 127: Command not found -``` - -**Error Code Examples**: -```bash -explain_exit_code 1 # "General error / Operation not permitted" -explain_exit_code 126 # "Command invoked cannot execute (permission problem?)" -explain_exit_code 127 # "Command not found" -explain_exit_code 130 # "Terminated by Ctrl+C (SIGINT)" -explain_exit_code 200 # "Custom: Failed to create lock file" -explain_exit_code 999 # "Unknown error" -``` - -### Error Handling Functions - -#### `error_handler()` -**Purpose**: Main error handler triggered by ERR trap or manual call -**Parameters**: -- `$1` - Exit code (optional, defaults to $?) -- `$2` - Command that failed (optional, defaults to BASH_COMMAND) -**Returns**: None (exits with error code) -**Side Effects**: -- Displays detailed error information -- Logs error to debug file if enabled -- Shows silent log content if available -- Exits with original error code -**Dependencies**: `explain_exit_code()` -**Environment Variables Used**: `DEBUG_LOGFILE`, `SILENT_LOGFILE` - -**Usage Example**: -```bash -# Automatic error handling via ERR trap -set -e -trap 'error_handler' ERR - -# Manual error handling -error_handler 127 "command_not_found" -``` - -**Error Information Displayed**: -- Error message with color coding -- Line number where error occurred -- Exit code with explanation -- Command that failed -- Silent log content (last 20 lines) -- Debug log entry (if enabled) - -### Signal Handling Functions - -#### `on_interrupt()` -**Purpose**: Handle SIGINT (Ctrl+C) signals gracefully -**Parameters**: None -**Returns**: None (exits with code 130) -**Side Effects**: -- Displays interruption message -- Exits with SIGINT code (130) -**Dependencies**: None -**Environment Variables Used**: None - -**Usage Example**: -```bash -# Set up interrupt handler -trap on_interrupt INT - -# User presses Ctrl+C -# Handler displays: "Interrupted by user (SIGINT)" -# Script exits with code 130 -``` - -#### `on_terminate()` -**Purpose**: Handle SIGTERM signals gracefully -**Parameters**: None -**Returns**: None (exits with code 143) -**Side Effects**: -- Displays termination message -- Exits with SIGTERM code (143) -**Dependencies**: None -**Environment Variables Used**: None - -**Usage Example**: -```bash -# Set up termination handler -trap on_terminate TERM - -# System sends SIGTERM -# Handler displays: "Terminated by signal (SIGTERM)" -# Script exits with code 143 -``` - -### Cleanup Functions - -#### `on_exit()` -**Purpose**: Handle script exit cleanup -**Parameters**: None -**Returns**: None (exits with original exit code) -**Side Effects**: -- Removes lock file if set -- Exits with original exit code -**Dependencies**: None -**Environment Variables Used**: `lockfile` - -**Usage Example**: -```bash -# Set up exit handler -trap on_exit EXIT - -# Set lock file -lockfile="/tmp/my_script.lock" - -# Script exits normally or with error -# Handler removes lock file and exits -``` - -### Initialization Functions - -#### `catch_errors()` -**Purpose**: Initialize error handling traps and strict mode -**Parameters**: None -**Returns**: None -**Side Effects**: -- Sets strict error handling mode -- Sets up error traps -- Sets up signal traps -- Sets up exit trap -**Dependencies**: None -**Environment Variables Used**: `STRICT_UNSET` - -**Strict Mode Settings**: -- `-E`: Exit on command failure -- `-e`: Exit on any error -- `-o pipefail`: Exit on pipe failure -- `-u`: Exit on unset variables (if STRICT_UNSET=1) - -**Trap Setup**: -- `ERR`: Calls `error_handler` on command failure -- `EXIT`: Calls `on_exit` on script exit -- `INT`: Calls `on_interrupt` on SIGINT -- `TERM`: Calls `on_terminate` on SIGTERM - -**Usage Example**: -```bash -# Initialize error handling -catch_errors - -# Script now has full error handling -# All errors will be caught and handled -``` - -## Function Call Hierarchy - -### Error Handling Flow -``` -Command Failure -├── ERR trap triggered -├── error_handler() called -│ ├── Get exit code -│ ├── Get command info -│ ├── Get line number -│ ├── explain_exit_code() -│ ├── Display error info -│ ├── Log to debug file -│ ├── Show silent log -│ └── Exit with error code -``` - -### Signal Handling Flow -``` -Signal Received -├── Signal trap triggered -├── Appropriate handler called -│ ├── on_interrupt() for SIGINT -│ ├── on_terminate() for SIGTERM -│ └── on_exit() for EXIT -└── Exit with signal code -``` - -### Initialization Flow -``` -catch_errors() -├── Set strict mode -│ ├── -E (exit on failure) -│ ├── -e (exit on error) -│ ├── -o pipefail (pipe failure) -│ └── -u (unset variables, if enabled) -└── Set up traps - ├── ERR → error_handler - ├── EXIT → on_exit - ├── INT → on_interrupt - └── TERM → on_terminate -``` - -## Error Code Reference - -### Generic/Shell Errors -| Code | Description | -|------|-------------| -| 1 | General error / Operation not permitted | -| 2 | Misuse of shell builtins (e.g. syntax error) | -| 126 | Command invoked cannot execute (permission problem?) | -| 127 | Command not found | -| 128 | Invalid argument to exit | -| 130 | Terminated by Ctrl+C (SIGINT) | -| 137 | Killed (SIGKILL / Out of memory?) | -| 139 | Segmentation fault (core dumped) | -| 143 | Terminated (SIGTERM) | - -### Package Manager Errors -| Code | Description | -|------|-------------| -| 100 | APT: Package manager error (broken packages / dependency problems) | -| 101 | APT: Configuration error (bad sources.list, malformed config) | -| 255 | DPKG: Fatal internal error | - -### Node.js Errors -| Code | Description | -|------|-------------| -| 243 | Node.js: Out of memory (JavaScript heap out of memory) | -| 245 | Node.js: Invalid command-line option | -| 246 | Node.js: Internal JavaScript Parse Error | -| 247 | Node.js: Fatal internal error | -| 248 | Node.js: Invalid C++ addon / N-API failure | -| 249 | Node.js: Inspector error | -| 254 | npm/pnpm/yarn: Unknown fatal error | - -### Python Errors -| Code | Description | -|------|-------------| -| 210 | Python: Virtualenv / uv environment missing or broken | -| 211 | Python: Dependency resolution failed | -| 212 | Python: Installation aborted (permissions or EXTERNALLY-MANAGED) | - -### Database Errors -| Code | Description | -|------|-------------| -| 231 | PostgreSQL: Connection failed (server not running / wrong socket) | -| 232 | PostgreSQL: Authentication failed (bad user/password) | -| 233 | PostgreSQL: Database does not exist | -| 234 | PostgreSQL: Fatal error in query / syntax | -| 241 | MySQL/MariaDB: Connection failed (server not running / wrong socket) | -| 242 | MySQL/MariaDB: Authentication failed (bad user/password) | -| 243 | MySQL/MariaDB: Database does not exist | -| 244 | MySQL/MariaDB: Fatal error in query / syntax | -| 251 | MongoDB: Connection failed (server not running) | -| 252 | MongoDB: Authentication failed (bad user/password) | -| 253 | MongoDB: Database not found | -| 254 | MongoDB: Fatal query error | - -### Proxmox Custom Errors -| Code | Description | -|------|-------------| -| 200 | Custom: Failed to create lock file | -| 203 | Custom: Missing CTID variable | -| 204 | Custom: Missing PCT_OSTYPE variable | -| 205 | Custom: Invalid CTID (<100) | -| 209 | Custom: Container creation failed | -| 210 | Custom: Cluster not quorate | -| 214 | Custom: Not enough storage space | -| 215 | Custom: Container ID not listed | -| 216 | Custom: RootFS entry missing in config | -| 217 | Custom: Storage does not support rootdir | -| 220 | Custom: Unable to resolve template path | -| 222 | Custom: Template download failed after 3 attempts | -| 223 | Custom: Template not available after download | -| 231 | Custom: LXC stack upgrade/retry failed | - -## Environment Variable Dependencies - -### Required Variables -- **`lockfile`**: Lock file path for cleanup (set by calling script) - -### Optional Variables -- **`DEBUG_LOGFILE`**: Path to debug log file for error logging -- **`SILENT_LOGFILE`**: Path to silent execution log file -- **`STRICT_UNSET`**: Enable strict unset variable checking (0/1) - -### Internal Variables -- **`exit_code`**: Current exit code -- **`command`**: Failed command -- **`line_number`**: Line number where error occurred -- **`explanation`**: Error explanation text - -## Error Handling Patterns - -### Automatic Error Handling -```bash -#!/usr/bin/env bash -source error_handler.func - -# Initialize error handling -catch_errors - -# All commands are now monitored -# Errors will be automatically caught and handled -``` - -### Manual Error Handling -```bash -#!/usr/bin/env bash -source error_handler.func - -# Manual error handling -if ! command -v required_tool >/dev/null 2>&1; then - error_handler 127 "required_tool not found" -fi -``` - -### Custom Error Codes -```bash -#!/usr/bin/env bash -source error_handler.func - -# Use custom error codes -if [[ ! -f /required/file ]]; then - echo "Error: Required file missing" - exit 200 # Custom error code -fi -``` - -### Signal Handling -```bash -#!/usr/bin/env bash -source error_handler.func - -# Set up signal handling -trap on_interrupt INT -trap on_terminate TERM -trap on_exit EXIT - -# Script handles signals gracefully -``` - -## Integration Examples - -### With core.func -```bash -#!/usr/bin/env bash -source core.func -source error_handler.func - -# Silent execution uses error_handler for explanations -silent apt-get install -y package -# If command fails, error_handler provides explanation -``` - -### With build.func -```bash -#!/usr/bin/env bash -source core.func -source error_handler.func -source build.func - -# Container creation with error handling -# Errors are caught and explained -``` - -### With tools.func -```bash -#!/usr/bin/env bash -source core.func -source error_handler.func -source tools.func - -# Tool operations with error handling -# All errors are properly handled and explained -``` - -## Best Practices - -### Error Handling Setup -1. Source error_handler.func early in script -2. Call catch_errors() to initialize traps -3. Use appropriate exit codes for different error types -4. Provide meaningful error messages - -### Signal Handling -1. Always set up signal traps -2. Provide graceful cleanup on interruption -3. Use appropriate exit codes for signals -4. Clean up temporary files and processes - -### Error Reporting -1. Use explain_exit_code() for user-friendly messages -2. Log errors to debug files when needed -3. Provide context information (line numbers, commands) -4. Integrate with silent execution logging - -### Custom Error Codes -1. Use Proxmox custom error codes (200-231) for container/VM errors -2. Use standard error codes for common operations -3. Document custom error codes in script comments -4. Provide clear error messages for custom codes diff --git a/docs/misc/error_handler.func/ERROR_HANDLER_INTEGRATION.md b/docs/misc/error_handler.func/ERROR_HANDLER_INTEGRATION.md deleted file mode 100644 index c3cf3b5c9..000000000 --- a/docs/misc/error_handler.func/ERROR_HANDLER_INTEGRATION.md +++ /dev/null @@ -1,512 +0,0 @@ -# error_handler.func Integration Guide - -## Overview - -This document describes how `error_handler.func` integrates with other components in the Proxmox Community Scripts project, including dependencies, data flow, and API surface. - -## Dependencies - -### External Dependencies - -#### Required Commands -- **None**: Pure Bash implementation - -#### Optional Commands -- **None**: No external command dependencies - -### Internal Dependencies - -#### core.func -- **Purpose**: Provides color variables for error display -- **Usage**: Uses `RD`, `CL`, `YWB` color variables -- **Integration**: Called automatically when core.func is sourced -- **Data Flow**: Color variables → error display formatting - -## Integration Points - -### With core.func - -#### Silent Execution Integration -```bash -# core.func silent() function uses error_handler.func -silent() { - local cmd="$*" - local caller_line="${BASH_LINENO[0]:-unknown}" - - # Execute command - "$@" >>"$SILENT_LOGFILE" 2>&1 - local rc=$? - - if [[ $rc -ne 0 ]]; then - # Load error_handler.func if needed - if ! declare -f explain_exit_code >/dev/null 2>&1; then - source error_handler.func - fi - - # Get error explanation - local explanation - explanation="$(explain_exit_code "$rc")" - - # Display error with explanation - printf "\e[?25h" - echo -e "\n${RD}[ERROR]${CL} in line ${RD}${caller_line}${CL}: exit code ${RD}${rc}${CL} (${explanation})" - echo -e "${RD}Command:${CL} ${YWB}${cmd}${CL}\n" - - exit "$rc" - fi -} -``` - -#### Color Variable Usage -```bash -# error_handler.func uses color variables from core.func -error_handler() { - # ... error handling logic ... - - # Use color variables for error display - echo -e "\n${RD}[ERROR]${CL} in line ${RD}${line_number}${CL}: exit code ${RD}${exit_code}${CL} (${explanation}): while executing command ${YWB}${command}${CL}\n" -} - -on_interrupt() { - echo -e "\n${RD}Interrupted by user (SIGINT)${CL}" - exit 130 -} - -on_terminate() { - echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}" - exit 143 -} -``` - -### With build.func - -#### Container Creation Error Handling -```bash -# build.func uses error_handler.func for container operations -source core.func -source error_handler.func - -# Container creation with error handling -create_container() { - # Set up error handling - catch_errors - - # Container creation operations - silent pct create "$CTID" "$TEMPLATE" \ - --hostname "$HOSTNAME" \ - --memory "$MEMORY" \ - --cores "$CORES" - - # If creation fails, error_handler provides explanation -} -``` - -#### Template Download Error Handling -```bash -# build.func uses error_handler.func for template operations -download_template() { - # Template download with error handling - if ! silent curl -fsSL "$TEMPLATE_URL" -o "$TEMPLATE_FILE"; then - # error_handler provides detailed explanation - exit 222 # Template download failed - fi -} -``` - -### With tools.func - -#### Maintenance Operations Error Handling -```bash -# tools.func uses error_handler.func for maintenance operations -source core.func -source error_handler.func - -# Maintenance operations with error handling -update_system() { - catch_errors - - # System update operations - silent apt-get update - silent apt-get upgrade -y - - # Error handling provides explanations for failures -} - -cleanup_logs() { - catch_errors - - # Log cleanup operations - silent find /var/log -name "*.log" -mtime +30 -delete - - # Error handling provides explanations for permission issues -} -``` - -### With api.func - -#### API Operations Error Handling -```bash -# api.func uses error_handler.func for API operations -source core.func -source error_handler.func - -# API operations with error handling -api_call() { - catch_errors - - # API call with error handling - if ! silent curl -k -H "Authorization: PVEAPIToken=$API_TOKEN" \ - "$API_URL/api2/json/nodes/$NODE/lxc"; then - # error_handler provides explanation for API failures - exit 1 - fi -} -``` - -### With install.func - -#### Installation Process Error Handling -```bash -# install.func uses error_handler.func for installation operations -source core.func -source error_handler.func - -# Installation with error handling -install_package() { - local package="$1" - - catch_errors - - # Package installation - silent apt-get install -y "$package" - - # Error handling provides explanations for installation failures -} -``` - -### With alpine-install.func - -#### Alpine Installation Error Handling -```bash -# alpine-install.func uses error_handler.func for Alpine operations -source core.func -source error_handler.func - -# Alpine installation with error handling -install_alpine_package() { - local package="$1" - - catch_errors - - # Alpine package installation - silent apk add --no-cache "$package" - - # Error handling provides explanations for Alpine-specific failures -} -``` - -### With alpine-tools.func - -#### Alpine Tools Error Handling -```bash -# alpine-tools.func uses error_handler.func for Alpine tools -source core.func -source error_handler.func - -# Alpine tools with error handling -alpine_tool_operation() { - catch_errors - - # Alpine-specific tool operations - silent alpine_command - - # Error handling provides explanations for Alpine tool failures -} -``` - -### With passthrough.func - -#### Hardware Passthrough Error Handling -```bash -# passthrough.func uses error_handler.func for hardware operations -source core.func -source error_handler.func - -# Hardware passthrough with error handling -configure_gpu_passthrough() { - catch_errors - - # GPU passthrough operations - silent lspci | grep -i nvidia - - # Error handling provides explanations for hardware failures -} -``` - -### With vm-core.func - -#### VM Operations Error Handling -```bash -# vm-core.func uses error_handler.func for VM operations -source core.func -source error_handler.func - -# VM operations with error handling -create_vm() { - catch_errors - - # VM creation operations - silent qm create "$VMID" \ - --name "$VMNAME" \ - --memory "$MEMORY" \ - --cores "$CORES" - - # Error handling provides explanations for VM creation failures -} -``` - -## Data Flow - -### Input Data - -#### Environment Variables -- **`DEBUG_LOGFILE`**: Path to debug log file for error logging -- **`SILENT_LOGFILE`**: Path to silent execution log file -- **`STRICT_UNSET`**: Enable strict unset variable checking (0/1) -- **`lockfile`**: Lock file path for cleanup (set by calling script) - -#### Function Parameters -- **Exit codes**: Passed to `explain_exit_code()` and `error_handler()` -- **Command information**: Passed to `error_handler()` for context -- **Signal information**: Passed to signal handlers - -#### System Information -- **Exit codes**: Retrieved from `$?` variable -- **Command information**: Retrieved from `BASH_COMMAND` variable -- **Line numbers**: Retrieved from `BASH_LINENO[0]` variable -- **Process information**: Retrieved from system calls - -### Processing Data - -#### Error Code Processing -- **Code classification**: Categorize exit codes by type -- **Explanation lookup**: Map codes to human-readable messages -- **Context collection**: Gather command and line information -- **Log preparation**: Format error information for logging - -#### Signal Processing -- **Signal detection**: Identify received signals -- **Handler selection**: Choose appropriate signal handler -- **Cleanup operations**: Perform necessary cleanup -- **Exit code setting**: Set appropriate exit codes - -#### Log Processing -- **Debug logging**: Write error information to debug log -- **Silent log integration**: Display silent log content -- **Log formatting**: Format log entries for readability -- **Log analysis**: Provide log analysis capabilities - -### Output Data - -#### Error Information -- **Error messages**: Human-readable error explanations -- **Context information**: Line numbers, commands, timestamps -- **Color formatting**: ANSI color codes for terminal display -- **Log content**: Silent log excerpts and debug information - -#### System State -- **Exit codes**: Returned from functions -- **Log files**: Created and updated for error tracking -- **Cleanup status**: Lock file removal and process cleanup -- **Signal handling**: Graceful signal processing - -## API Surface - -### Public Functions - -#### Error Explanation -- **`explain_exit_code()`**: Convert exit codes to explanations -- **Parameters**: Exit code to explain -- **Returns**: Human-readable explanation string -- **Usage**: Called by error_handler() and other functions - -#### Error Handling -- **`error_handler()`**: Main error handler function -- **Parameters**: Exit code (optional), command (optional) -- **Returns**: None (exits with error code) -- **Usage**: Called by ERR trap or manually - -#### Signal Handling -- **`on_interrupt()`**: Handle SIGINT signals -- **`on_terminate()`**: Handle SIGTERM signals -- **`on_exit()`**: Handle script exit cleanup -- **Parameters**: None -- **Returns**: None (exits with signal code) -- **Usage**: Called by signal traps - -#### Initialization -- **`catch_errors()`**: Initialize error handling -- **Parameters**: None -- **Returns**: None -- **Usage**: Called to set up error handling traps - -### Internal Functions - -#### None -- All functions in error_handler.func are public -- No internal helper functions -- Direct implementation of all functionality - -### Global Variables - -#### Configuration Variables -- **`DEBUG_LOGFILE`**: Debug log file path -- **`SILENT_LOGFILE`**: Silent log file path -- **`STRICT_UNSET`**: Strict mode setting -- **`lockfile`**: Lock file path - -#### State Variables -- **`exit_code`**: Current exit code -- **`command`**: Failed command -- **`line_number`**: Line number where error occurred -- **`explanation`**: Error explanation text - -## Integration Patterns - -### Standard Integration Pattern - -```bash -#!/usr/bin/env bash -# Standard integration pattern - -# 1. Source core.func first -source core.func - -# 2. Source error_handler.func -source error_handler.func - -# 3. Initialize error handling -catch_errors - -# 4. Use silent execution -silent command - -# 5. Errors are automatically handled -``` - -### Minimal Integration Pattern - -```bash -#!/usr/bin/env bash -# Minimal integration pattern - -source error_handler.func -catch_errors - -# Basic error handling -command -``` - -### Advanced Integration Pattern - -```bash -#!/usr/bin/env bash -# Advanced integration pattern - -source core.func -source error_handler.func - -# Set up comprehensive error handling -export DEBUG_LOGFILE="/tmp/debug.log" -export SILENT_LOGFILE="/tmp/silent.log" -lockfile="/tmp/script.lock" -touch "$lockfile" - -catch_errors -trap on_interrupt INT -trap on_terminate TERM -trap on_exit EXIT - -# Advanced error handling -silent command -``` - -## Error Handling Integration - -### Automatic Error Handling -- **ERR Trap**: Automatically catches command failures -- **Error Explanation**: Provides human-readable error messages -- **Context Information**: Shows line numbers and commands -- **Log Integration**: Displays silent log content - -### Manual Error Handling -- **Custom Error Codes**: Use Proxmox custom error codes -- **Error Recovery**: Implement retry logic with error handling -- **Conditional Handling**: Different handling for different error types -- **Error Analysis**: Analyze error patterns and trends - -### Signal Handling Integration -- **Graceful Interruption**: Handle Ctrl+C gracefully -- **Clean Termination**: Handle SIGTERM signals -- **Exit Cleanup**: Clean up resources on script exit -- **Lock File Management**: Remove lock files on exit - -## Performance Considerations - -### Error Handling Overhead -- **Minimal Impact**: Error handling adds minimal overhead -- **Trap Setup**: Trap setup is done once during initialization -- **Error Processing**: Error processing is only done on failures -- **Log Writing**: Log writing is only done when enabled - -### Memory Usage -- **Minimal Footprint**: Error handler uses minimal memory -- **Variable Reuse**: Global variables reused across functions -- **No Memory Leaks**: Proper cleanup prevents memory leaks -- **Efficient Processing**: Efficient error code processing - -### Execution Speed -- **Fast Error Detection**: Quick error detection and handling -- **Efficient Explanation**: Fast error code explanation lookup -- **Minimal Delay**: Minimal delay in error handling -- **Quick Exit**: Fast exit on error conditions - -## Security Considerations - -### Error Information Disclosure -- **Controlled Disclosure**: Only necessary error information is shown -- **Log Security**: Log files have appropriate permissions -- **Sensitive Data**: Sensitive data is not logged -- **Error Sanitization**: Error messages are sanitized - -### Signal Handling Security -- **Signal Validation**: Only expected signals are handled -- **Cleanup Security**: Secure cleanup of temporary files -- **Lock File Security**: Secure lock file management -- **Process Security**: Secure process termination - -### Log File Security -- **File Permissions**: Log files have appropriate permissions -- **Log Rotation**: Log files are rotated to prevent disk filling -- **Log Cleanup**: Old log files are cleaned up -- **Log Access**: Log access is controlled - -## Future Integration Considerations - -### Extensibility -- **New Error Codes**: Easy to add new error code explanations -- **Custom Handlers**: Easy to add custom error handlers -- **Signal Extensions**: Easy to add new signal handlers -- **Log Formats**: Easy to add new log formats - -### Compatibility -- **Bash Version**: Compatible with different Bash versions -- **System Compatibility**: Compatible with different systems -- **Script Compatibility**: Compatible with different script types -- **Error Code Compatibility**: Compatible with different error codes - -### Performance -- **Optimization**: Error handling can be optimized for better performance -- **Caching**: Error explanations can be cached for faster lookup -- **Parallel Processing**: Error handling can be parallelized -- **Resource Management**: Better resource management for error handling diff --git a/docs/misc/error_handler.func/ERROR_HANDLER_USAGE_EXAMPLES.md b/docs/misc/error_handler.func/ERROR_HANDLER_USAGE_EXAMPLES.md deleted file mode 100644 index cfb668711..000000000 --- a/docs/misc/error_handler.func/ERROR_HANDLER_USAGE_EXAMPLES.md +++ /dev/null @@ -1,625 +0,0 @@ -# error_handler.func Usage Examples - -## Overview - -This document provides practical usage examples for `error_handler.func` functions, covering common scenarios, integration patterns, and best practices. - -## Basic Error Handling Setup - -### Standard Script Initialization - -```bash -#!/usr/bin/env bash -# Standard error handling setup - -# Source error handler -source error_handler.func - -# Initialize error handling -catch_errors - -# Your script code here -# All errors will be automatically caught and handled -echo "Script running..." -apt-get update -apt-get install -y package -echo "Script completed successfully" -``` - -### Minimal Error Handling - -```bash -#!/usr/bin/env bash -# Minimal error handling setup - -source error_handler.func -catch_errors - -# Simple script with error handling -echo "Starting operation..." -command_that_might_fail -echo "Operation completed" -``` - -## Error Code Explanation Examples - -### Basic Error Explanation - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Explain common error codes -echo "Error 1: $(explain_exit_code 1)" -echo "Error 127: $(explain_exit_code 127)" -echo "Error 130: $(explain_exit_code 130)" -echo "Error 200: $(explain_exit_code 200)" -``` - -### Error Code Testing - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Test all error codes -test_error_codes() { - local codes=(1 2 126 127 128 130 137 139 143 100 101 255 200 203 204 205) - - for code in "${codes[@]}"; do - echo "Code $code: $(explain_exit_code $code)" - done -} - -test_error_codes -``` - -### Custom Error Code Usage - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Use custom error codes -check_requirements() { - if [[ ! -f /required/file ]]; then - echo "Error: Required file missing" - exit 200 # Custom error code - fi - - if [[ -z "$CTID" ]]; then - echo "Error: CTID not set" - exit 203 # Custom error code - fi - - if [[ $CTID -lt 100 ]]; then - echo "Error: Invalid CTID" - exit 205 # Custom error code - fi -} - -check_requirements -``` - -## Signal Handling Examples - -### Interrupt Handling - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Set up interrupt handler -trap on_interrupt INT - -echo "Script running... Press Ctrl+C to interrupt" -sleep 10 -echo "Script completed normally" -``` - -### Termination Handling - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Set up termination handler -trap on_terminate TERM - -echo "Script running... Send SIGTERM to terminate" -sleep 10 -echo "Script completed normally" -``` - -### Complete Signal Handling - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Set up all signal handlers -trap on_interrupt INT -trap on_terminate TERM -trap on_exit EXIT - -echo "Script running with full signal handling" -sleep 10 -echo "Script completed normally" -``` - -## Cleanup Examples - -### Lock File Cleanup - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Set up lock file -lockfile="/tmp/my_script.lock" -touch "$lockfile" - -# Set up exit handler -trap on_exit EXIT - -echo "Script running with lock file..." -sleep 5 -echo "Script completed - lock file will be removed" -``` - -### Temporary File Cleanup - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Create temporary files -temp_file1="/tmp/temp1.$$" -temp_file2="/tmp/temp2.$$" -touch "$temp_file1" "$temp_file2" - -# Set up cleanup -cleanup() { - rm -f "$temp_file1" "$temp_file2" - echo "Temporary files cleaned up" -} - -trap cleanup EXIT - -echo "Script running with temporary files..." -sleep 5 -echo "Script completed - temporary files will be cleaned up" -``` - -## Debug Logging Examples - -### Basic Debug Logging - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Enable debug logging -export DEBUG_LOGFILE="/tmp/debug.log" -catch_errors - -echo "Script with debug logging" -apt-get update -apt-get install -y package -``` - -### Debug Log Analysis - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Enable debug logging -export DEBUG_LOGFILE="/tmp/debug.log" -catch_errors - -# Function to analyze debug log -analyze_debug_log() { - if [[ -f "$DEBUG_LOGFILE" ]]; then - echo "Debug log analysis:" - echo "Total errors: $(grep -c "ERROR" "$DEBUG_LOGFILE")" - echo "Recent errors:" - tail -n 5 "$DEBUG_LOGFILE" - else - echo "No debug log found" - fi -} - -# Run script -echo "Running script..." -apt-get update - -# Analyze results -analyze_debug_log -``` - -## Silent Execution Integration - -### With core.func Silent Execution - -```bash -#!/usr/bin/env bash -source core.func -source error_handler.func - -# Silent execution with error handling -echo "Installing packages..." -silent apt-get update -silent apt-get install -y nginx - -echo "Configuring service..." -silent systemctl enable nginx -silent systemctl start nginx - -echo "Installation completed" -``` - -### Silent Execution Error Handling - -```bash -#!/usr/bin/env bash -source core.func -source error_handler.func - -# Function with silent execution and error handling -install_package() { - local package="$1" - - echo "Installing $package..." - if silent apt-get install -y "$package"; then - echo "$package installed successfully" - return 0 - else - echo "Failed to install $package" - return 1 - fi -} - -# Install multiple packages -packages=("nginx" "apache2" "mysql-server") -for package in "${packages[@]}"; do - if ! install_package "$package"; then - echo "Stopping installation due to error" - exit 1 - fi -done -``` - -## Advanced Error Handling Examples - -### Conditional Error Handling - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Conditional error handling based on environment -setup_error_handling() { - if [[ "${STRICT_MODE:-0}" == "1" ]]; then - echo "Enabling strict mode" - export STRICT_UNSET=1 - fi - - catch_errors - echo "Error handling configured" -} - -setup_error_handling -``` - -### Error Recovery - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Error recovery pattern -retry_operation() { - local max_attempts=3 - local attempt=1 - - while [[ $attempt -le $max_attempts ]]; do - echo "Attempt $attempt of $max_attempts" - - if silent "$@"; then - echo "Operation succeeded on attempt $attempt" - return 0 - else - echo "Attempt $attempt failed" - ((attempt++)) - - if [[ $attempt -le $max_attempts ]]; then - echo "Retrying in 5 seconds..." - sleep 5 - fi - fi - done - - echo "Operation failed after $max_attempts attempts" - return 1 -} - -# Use retry pattern -retry_operation apt-get update -retry_operation apt-get install -y package -``` - -### Custom Error Handler - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Custom error handler for specific operations -custom_error_handler() { - local exit_code=${1:-$?} - local command=${2:-${BASH_COMMAND:-unknown}} - - case "$exit_code" in - 127) - echo "Custom handling: Command not found - $command" - echo "Suggestions:" - echo "1. Check if the command is installed" - echo "2. Check if the command is in PATH" - echo "3. Check spelling" - ;; - 126) - echo "Custom handling: Permission denied - $command" - echo "Suggestions:" - echo "1. Check file permissions" - echo "2. Run with appropriate privileges" - echo "3. Check if file is executable" - ;; - *) - # Use default error handler - error_handler "$exit_code" "$command" - ;; - esac -} - -# Set up custom error handler -trap 'custom_error_handler' ERR - -# Test custom error handling -nonexistent_command -``` - -## Integration Examples - -### With build.func - -```bash -#!/usr/bin/env bash -# Integration with build.func - -source core.func -source error_handler.func -source build.func - -# Container creation with error handling -export APP="plex" -export CTID="100" - -# Errors will be caught and explained -# Silent execution will use error_handler for explanations -``` - -### With tools.func - -```bash -#!/usr/bin/env bash -# Integration with tools.func - -source core.func -source error_handler.func -source tools.func - -# Tool operations with error handling -# All errors are properly handled and explained -``` - -### With api.func - -```bash -#!/usr/bin/env bash -# Integration with api.func - -source core.func -source error_handler.func -source api.func - -# API operations with error handling -# Network errors and API errors are properly handled -``` - -## Best Practices Examples - -### Comprehensive Error Handling - -```bash -#!/usr/bin/env bash -# Comprehensive error handling example - -source error_handler.func - -# Set up comprehensive error handling -setup_comprehensive_error_handling() { - # Enable debug logging - export DEBUG_LOGFILE="/tmp/script_debug.log" - - # Set up lock file - lockfile="/tmp/script.lock" - touch "$lockfile" - - # Initialize error handling - catch_errors - - # Set up signal handlers - trap on_interrupt INT - trap on_terminate TERM - trap on_exit EXIT - - echo "Comprehensive error handling configured" -} - -setup_comprehensive_error_handling - -# Script operations -echo "Starting script operations..." -# ... script code ... -echo "Script operations completed" -``` - -### Error Handling for Different Scenarios - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Different error handling for different scenarios -handle_package_errors() { - local exit_code=$1 - case "$exit_code" in - 100) - echo "Package manager error - trying to fix..." - apt-get --fix-broken install - ;; - 101) - echo "Configuration error - checking sources..." - apt-get update - ;; - *) - error_handler "$exit_code" - ;; - esac -} - -handle_network_errors() { - local exit_code=$1 - case "$exit_code" in - 127) - echo "Network command not found - checking connectivity..." - ping -c 1 8.8.8.8 - ;; - *) - error_handler "$exit_code" - ;; - esac -} - -# Use appropriate error handler -if [[ "$1" == "package" ]]; then - trap 'handle_package_errors $?' ERR -elif [[ "$1" == "network" ]]; then - trap 'handle_network_errors $?' ERR -else - catch_errors -fi -``` - -### Error Handling with Logging - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Error handling with detailed logging -setup_logging_error_handling() { - # Create log directory - mkdir -p /var/log/script_errors - - # Set up debug logging - export DEBUG_LOGFILE="/var/log/script_errors/debug.log" - - # Set up silent logging - export SILENT_LOGFILE="/var/log/script_errors/silent.log" - - # Initialize error handling - catch_errors - - echo "Logging error handling configured" -} - -setup_logging_error_handling - -# Script operations with logging -echo "Starting logged operations..." -# ... script code ... -echo "Logged operations completed" -``` - -## Troubleshooting Examples - -### Debug Mode - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Enable debug mode -export DEBUG_LOGFILE="/tmp/debug.log" -export STRICT_UNSET=1 - -catch_errors - -echo "Debug mode enabled" -# Script operations -``` - -### Error Analysis - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Function to analyze errors -analyze_errors() { - local log_file="${1:-$DEBUG_LOGFILE}" - - if [[ -f "$log_file" ]]; then - echo "Error Analysis:" - echo "Total errors: $(grep -c "ERROR" "$log_file")" - echo "Error types:" - grep "ERROR" "$log_file" | awk '{print $NF}' | sort | uniq -c - echo "Recent errors:" - tail -n 10 "$log_file" - else - echo "No error log found" - fi -} - -# Run script with error analysis -analyze_errors -``` - -### Error Recovery Testing - -```bash -#!/usr/bin/env bash -source error_handler.func - -# Test error recovery -test_error_recovery() { - local test_cases=( - "nonexistent_command" - "apt-get install nonexistent_package" - "systemctl start nonexistent_service" - ) - - for test_case in "${test_cases[@]}"; do - echo "Testing: $test_case" - if silent $test_case; then - echo "Unexpected success" - else - echo "Expected failure handled" - fi - done -} - -test_error_recovery -``` diff --git a/docs/misc/error_handler.func/README.md b/docs/misc/error_handler.func/README.md deleted file mode 100644 index 3c4448184..000000000 --- a/docs/misc/error_handler.func/README.md +++ /dev/null @@ -1,228 +0,0 @@ -# error_handler.func Documentation - -## Overview - -The `error_handler.func` file provides comprehensive error handling and signal management for Proxmox Community Scripts. It offers detailed error code explanations, graceful error recovery, and proper cleanup mechanisms. - -## Purpose and Use Cases - -- **Error Code Explanation**: Provides human-readable explanations for exit codes -- **Signal Handling**: Manages SIGINT, SIGTERM, and other signals gracefully -- **Error Recovery**: Implements proper cleanup and error reporting -- **Debug Logging**: Records error information for troubleshooting -- **Silent Execution Support**: Integrates with core.func silent execution - -## Quick Reference - -### Key Function Groups -- **Error Explanation**: `explain_exit_code()` - Convert exit codes to human-readable messages -- **Error Handling**: `error_handler()` - Main error handler with detailed reporting -- **Signal Handlers**: `on_interrupt()`, `on_terminate()` - Graceful signal handling -- **Cleanup**: `on_exit()` - Cleanup on script exit -- **Trap Setup**: `catch_errors()` - Initialize error handling traps - -### Dependencies -- **External**: None (pure Bash implementation) -- **Internal**: Uses color variables from core.func - -### Integration Points -- Used by: All scripts via core.func silent execution -- Uses: Color variables from core.func -- Provides: Error explanations for core.func silent function - -## Documentation Files - -### 📊 [ERROR_HANDLER_FLOWCHART.md](./ERROR_HANDLER_FLOWCHART.md) -Visual execution flows showing error handling processes and signal management. - -### 📚 [ERROR_HANDLER_FUNCTIONS_REFERENCE.md](./ERROR_HANDLER_FUNCTIONS_REFERENCE.md) -Complete alphabetical reference of all functions with parameters, dependencies, and usage details. - -### 💡 [ERROR_HANDLER_USAGE_EXAMPLES.md](./ERROR_HANDLER_USAGE_EXAMPLES.md) -Practical examples showing how to use error handling functions and common patterns. - -### 🔗 [ERROR_HANDLER_INTEGRATION.md](./ERROR_HANDLER_INTEGRATION.md) -How error_handler.func integrates with other components and provides error handling services. - -## Key Features - -### Error Code Categories -- **Generic/Shell Errors**: Exit codes 1, 2, 126, 127, 128, 130, 137, 139, 143 -- **Package Manager Errors**: APT/DPKG errors (100, 101, 255) -- **Node.js Errors**: JavaScript runtime errors (243-249, 254) -- **Python Errors**: Python environment and dependency errors (210-212) -- **Database Errors**: PostgreSQL, MySQL, MongoDB errors (231-254) -- **Proxmox Custom Errors**: Container and VM specific errors (200-231) - -### Signal Handling -- **SIGINT (Ctrl+C)**: Graceful interruption handling -- **SIGTERM**: Graceful termination handling -- **EXIT**: Cleanup on script exit -- **ERR**: Error trap for command failures - -### Error Reporting -- **Detailed Messages**: Human-readable error explanations -- **Context Information**: Line numbers, commands, timestamps -- **Log Integration**: Silent log file integration -- **Debug Logging**: Optional debug log file support - -## Common Usage Patterns - -### Basic Error Handling Setup -```bash -#!/usr/bin/env bash -# Basic error handling setup - -source error_handler.func - -# Initialize error handling -catch_errors - -# Your script code here -# Errors will be automatically handled -``` - -### Manual Error Explanation -```bash -#!/usr/bin/env bash -source error_handler.func - -# Get error explanation -explanation=$(explain_exit_code 127) -echo "Error 127: $explanation" -# Output: Error 127: Command not found -``` - -### Custom Error Handling -```bash -#!/usr/bin/env bash -source error_handler.func - -# Custom error handling -if ! command -v required_tool >/dev/null 2>&1; then - echo "Error: required_tool not found" - exit 127 -fi -``` - -## Environment Variables - -### Debug Variables -- `DEBUG_LOGFILE`: Path to debug log file for error logging -- `SILENT_LOGFILE`: Path to silent execution log file -- `STRICT_UNSET`: Enable strict unset variable checking (0/1) - -### Internal Variables -- `lockfile`: Lock file path for cleanup (set by calling script) -- `exit_code`: Current exit code -- `command`: Failed command -- `line_number`: Line number where error occurred - -## Error Categories - -### Generic/Shell Errors -- **1**: General error / Operation not permitted -- **2**: Misuse of shell builtins (syntax error) -- **126**: Command invoked cannot execute (permission problem) -- **127**: Command not found -- **128**: Invalid argument to exit -- **130**: Terminated by Ctrl+C (SIGINT) -- **137**: Killed (SIGKILL / Out of memory) -- **139**: Segmentation fault (core dumped) -- **143**: Terminated (SIGTERM) - -### Package Manager Errors -- **100**: APT package manager error (broken packages) -- **101**: APT configuration error (bad sources.list) -- **255**: DPKG fatal internal error - -### Node.js Errors -- **243**: JavaScript heap out of memory -- **245**: Invalid command-line option -- **246**: Internal JavaScript parse error -- **247**: Fatal internal error -- **248**: Invalid C++ addon / N-API failure -- **249**: Inspector error -- **254**: npm/pnpm/yarn unknown fatal error - -### Python Errors -- **210**: Virtualenv/uv environment missing or broken -- **211**: Dependency resolution failed -- **212**: Installation aborted (permissions or EXTERNALLY-MANAGED) - -### Database Errors -- **PostgreSQL (231-234)**: Connection, authentication, database, query errors -- **MySQL/MariaDB (241-244)**: Connection, authentication, database, query errors -- **MongoDB (251-254)**: Connection, authentication, database, query errors - -### Proxmox Custom Errors -- **200**: Failed to create lock file -- **203**: Missing CTID variable -- **204**: Missing PCT_OSTYPE variable -- **205**: Invalid CTID (<100) -- **209**: Container creation failed -- **210**: Cluster not quorate -- **214**: Not enough storage space -- **215**: Container ID not listed -- **216**: RootFS entry missing in config -- **217**: Storage does not support rootdir -- **220**: Unable to resolve template path -- **222**: Template download failed after 3 attempts -- **223**: Template not available after download -- **231**: LXC stack upgrade/retry failed - -## Best Practices - -### Error Handling Setup -1. Source error_handler.func early in script -2. Call catch_errors() to initialize traps -3. Use proper exit codes for different error types -4. Provide meaningful error messages - -### Signal Handling -1. Always set up signal traps -2. Provide graceful cleanup on interruption -3. Use appropriate exit codes for signals -4. Clean up temporary files and processes - -### Error Reporting -1. Use explain_exit_code() for user-friendly messages -2. Log errors to debug files when needed -3. Provide context information (line numbers, commands) -4. Integrate with silent execution logging - -## Troubleshooting - -### Common Issues -1. **Missing Error Handler**: Ensure error_handler.func is sourced -2. **Trap Not Set**: Call catch_errors() to initialize traps -3. **Color Variables**: Ensure core.func is sourced for colors -4. **Lock Files**: Clean up lock files in on_exit() - -### Debug Mode -Enable debug logging for detailed error information: -```bash -export DEBUG_LOGFILE="/tmp/debug.log" -source error_handler.func -catch_errors -``` - -### Error Code Testing -Test error explanations: -```bash -source error_handler.func -for code in 1 2 126 127 128 130 137 139 143; do - echo "Code $code: $(explain_exit_code $code)" -done -``` - -## Related Documentation - -- [core.func](../core.func/) - Core utilities and silent execution -- [build.func](../build.func/) - Container creation with error handling -- [tools.func](../tools.func/) - Extended utilities with error handling -- [api.func](../api.func/) - API operations with error handling - ---- - -*This documentation covers the error_handler.func file which provides comprehensive error handling for all Proxmox Community Scripts.* diff --git a/docs/misc/install.func/INSTALL_FUNC_FLOWCHART.md b/docs/misc/install.func/INSTALL_FUNC_FLOWCHART.md deleted file mode 100644 index 73fcd69bd..000000000 --- a/docs/misc/install.func/INSTALL_FUNC_FLOWCHART.md +++ /dev/null @@ -1,117 +0,0 @@ -# install.func Flowchart - -## Installation Workflow - -``` -┌──────────────────────────────────┐ -│ Container Started │ -│ (Inside LXC by build.func) │ -└──────────────┬───────────────────┘ - │ - ▼ - ┌──────────────────────┐ - │ Source Functions │ - │ $FUNCTIONS_FILE_PATH │ - └──────────┬───────────┘ - │ - ▼ - ┌──────────────────────┐ - │ setting_up_container│ - │ Display setup msg │ - └──────────┬───────────┘ - │ - ▼ - ┌──────────────────────┐ - │ network_check() │ - │ (Verify internet) │ - └────┬──────────────┬──┘ - │ │ - OK FAIL - │ │ - │ ▼ - │ ┌──────────────┐ - │ │ Retry Check │ - │ │ 3 attempts │ - │ └────┬─────┬───┘ - │ │ │ - │ OK FAIL - │ │ │ - └──────────────┘ │ - │ │ - ▼ ▼ - ┌──────────────────────┐ ┌──────────────┐ - │ update_os() │ │ Exit Error │ - │ (apt update/upgrade) │ │ No internet │ - └──────────┬───────────┘ └──────────────┘ - │ - ▼ - ┌──────────────────────┐ - │ verb_ip6() [optional]│ - │ (Enable IPv6) │ - └──────────┬───────────┘ - │ - ▼ - ┌──────────────────────┐ - │ Application │ - │ Installation │ - │ (Main work) │ - └──────────┬───────────┘ - │ - ┌───────┴────────┐ - │ │ - SUCCESS FAILED - │ │ - │ └─ error_handler catches - │ (if catch_errors active) - │ - ▼ - ┌──────────────────────┐ - │ motd_ssh() │ - │ (Setup SSH/MOTD) │ - └──────────┬───────────┘ - │ - ▼ - ┌──────────────────────┐ - │ customize() │ - │ (Apply settings) │ - └──────────┬───────────┘ - │ - ▼ - ┌──────────────────────┐ - │ cleanup_lxc() │ - │ (Final cleanup) │ - └──────────┬───────────┘ - │ - ▼ - ┌──────────────────────┐ - │ Installation │ - │ Complete ✓ │ - └──────────────────────┘ -``` - -## Network Check Retry Logic - -``` -network_check() - │ - ├─ Ping 8.8.8.8 (Google DNS) - │ └─ Response? - │ ├─ YES: Continue - │ └─ NO: Retry - │ - ├─ Retry 1 - │ └─ Wait 5s, ping again - │ - ├─ Retry 2 - │ └─ Wait 5s, ping again - │ - └─ Retry 3 - ├─ If OK: Continue - └─ If FAIL: Exit Error - (Network unavailable) -``` - ---- - -**Visual Reference for**: install.func container setup workflows -**Last Updated**: December 2025 diff --git a/docs/misc/install.func/INSTALL_FUNC_FUNCTIONS_REFERENCE.md b/docs/misc/install.func/INSTALL_FUNC_FUNCTIONS_REFERENCE.md deleted file mode 100644 index 40ff0ae6f..000000000 --- a/docs/misc/install.func/INSTALL_FUNC_FUNCTIONS_REFERENCE.md +++ /dev/null @@ -1,237 +0,0 @@ -# install.func Functions Reference - -Complete reference of all functions in install.func with detailed usage information. - -## Function Index - -- `setting_up_container()` - Initialize container setup -- `network_check()` - Verify network connectivity -- `update_os()` - Update OS packages -- `verb_ip6()` - Enable IPv6 -- `motd_ssh()` - Configure SSH and MOTD -- `customize()` - Apply container customizations -- `cleanup_lxc()` - Final container cleanup - ---- - -## Core Functions - -### setting_up_container() - -Display setup message and initialize container environment. - -**Signature**: -```bash -setting_up_container -``` - -**Purpose**: Announce container initialization and set initial environment - -**Usage**: -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -setting_up_container -# Output: ⏳ Setting up container... -``` - ---- - -### network_check() - -Verify network connectivity with automatic retry logic. - -**Signature**: -```bash -network_check -``` - -**Purpose**: Ensure internet connectivity before critical operations - -**Behavior**: -- Pings 8.8.8.8 (Google DNS) -- 3 attempts with 5-second delays -- Exits with error if all attempts fail - -**Usage**: -```bash -network_check -# If no internet: Exits with error message -# If internet OK: Continues to next step -``` - -**Error Handling**: -```bash -if ! network_check; then - msg_error "No internet connection" - exit 1 -fi -``` - ---- - -### update_os() - -Update OS packages with error handling. - -**Signature**: -```bash -update_os -``` - -**Purpose**: Prepare container with latest packages - -**On Debian/Ubuntu**: -- Runs: `apt-get update && apt-get upgrade -y` - -**On Alpine**: -- Runs: `apk update && apk upgrade` - -**Usage**: -```bash -update_os -``` - ---- - -### verb_ip6() - -Enable IPv6 support in container (optional). - -**Signature**: -```bash -verb_ip6 -``` - -**Purpose**: Enable IPv6 if needed for application - -**Usage**: -```bash -verb_ip6 # Enable IPv6 -network_check # Verify connectivity with IPv6 -``` - ---- - -### motd_ssh() - -Configure SSH daemon and MOTD for container access. - -**Signature**: -```bash -motd_ssh -``` - -**Purpose**: Setup SSH and create login message - -**Configures**: -- SSH daemon startup and keys -- Custom MOTD displaying application access info -- SSH port and security settings - -**Usage**: -```bash -motd_ssh -# SSH is now configured and application info is in MOTD -``` - ---- - -### customize() - -Apply container customizations and final setup. - -**Signature**: -```bash -customize -``` - -**Purpose**: Apply any remaining customizations - -**Usage**: -```bash -customize -``` - ---- - -### cleanup_lxc() - -Final cleanup and completion of installation. - -**Signature**: -```bash -cleanup_lxc -``` - -**Purpose**: Remove temporary files and finalize installation - -**Cleans**: -- Temporary installation files -- Package manager cache -- Log files from installation process - -**Usage**: -```bash -cleanup_lxc -# Installation is now complete and ready -``` - ---- - -## Common Patterns - -### Basic Installation Pattern - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -setting_up_container -network_check -update_os - -# ... application installation ... - -motd_ssh -customize -cleanup_lxc -``` - -### With IPv6 Support - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -setting_up_container -verb_ip6 # Enable IPv6 -network_check -update_os - -# ... application installation ... -``` - -### With Error Handling - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -catch_errors # Setup error trapping -setting_up_container - -if ! network_check; then - msg_error "Network connectivity failed" - exit 1 -fi - -update_os -``` - ---- - -**Last Updated**: December 2025 -**Total Functions**: 7 -**Maintained by**: community-scripts team diff --git a/docs/misc/install.func/INSTALL_FUNC_INTEGRATION.md b/docs/misc/install.func/INSTALL_FUNC_INTEGRATION.md deleted file mode 100644 index 2715cad50..000000000 --- a/docs/misc/install.func/INSTALL_FUNC_INTEGRATION.md +++ /dev/null @@ -1,104 +0,0 @@ -# install.func Integration Guide - -How install.func integrates with the ProxmoxVE ecosystem and connects to other function libraries. - -## Component Integration - -### install.func in the Installation Pipeline - -``` -install/app-install.sh (container-side) - │ - ├─ Sources: core.func (messaging) - ├─ Sources: error_handler.func (error handling) - │ - ├─ ★ Uses: install.func ★ - │ ├─ setting_up_container() - │ ├─ network_check() - │ ├─ update_os() - │ └─ motd_ssh() - │ - ├─ Uses: tools.func (package installation) - │ - └─ Back to install.func: - ├─ customize() - └─ cleanup_lxc() -``` - -### Integration with tools.func - -install.func and tools.func work together: - -``` -setting_up_container() [install.func] - │ -update_os() [install.func] - │ -pkg_update() [tools.func] -setup_nodejs() [tools.func] -setup_mariadb() [tools.func] - │ -motd_ssh() [install.func] -customize() [install.func] -cleanup_lxc() [install.func] -``` - ---- - -## Dependencies - -### External Dependencies - -- `curl`, `wget` - For downloads -- `apt-get` or `apk` - Package management -- `ping` - Network verification -- `systemctl` or `rc-service` - Service management - -### Internal Dependencies - -``` -install.func uses: -├─ core.func (for messaging and colors) -├─ error_handler.func (for error handling) -└─ tools.func (for package operations) -``` - ---- - -## Best Practices - -### Always Follow This Pattern - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -# 1. Setup error handling -catch_errors - -# 2. Initialize container -setting_up_container - -# 3. Verify network -network_check - -# 4. Update OS -update_os - -# 5. Installation (your code) -# ... install application ... - -# 6. Configure access -motd_ssh - -# 7. Customize -customize - -# 8. Cleanup -cleanup_lxc -``` - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team diff --git a/docs/misc/install.func/INSTALL_FUNC_USAGE_EXAMPLES.md b/docs/misc/install.func/INSTALL_FUNC_USAGE_EXAMPLES.md deleted file mode 100644 index 634dd8ee4..000000000 --- a/docs/misc/install.func/INSTALL_FUNC_USAGE_EXAMPLES.md +++ /dev/null @@ -1,93 +0,0 @@ -# install.func Usage Examples - -Practical examples for using install.func functions in application installation scripts. - -## Basic Examples - -### Example 1: Minimal Setup - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -setting_up_container -network_check -update_os - -# ... application installation ... - -motd_ssh -customize -cleanup_lxc -``` - -### Example 2: With Error Handling - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -catch_errors -setting_up_container - -if ! network_check; then - msg_error "Network failed" - exit 1 -fi - -if ! update_os; then - msg_error "OS update failed" - exit 1 -fi - -# ... continue ... -``` - ---- - -## Production Examples - -### Example 3: Full Application Installation - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing application" -# ... install steps ... -msg_ok "Application installed" - -motd_ssh -customize -cleanup_lxc -``` - -### Example 4: With IPv6 Support - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -catch_errors -setting_up_container -verb_ip6 -network_check -update_os - -# ... application installation ... - -motd_ssh -customize -cleanup_lxc -``` - ---- - -**Last Updated**: December 2025 -**Examples**: Basic and production patterns -**All examples production-ready** diff --git a/docs/misc/install.func/README.md b/docs/misc/install.func/README.md deleted file mode 100644 index a9d7cffc7..000000000 --- a/docs/misc/install.func/README.md +++ /dev/null @@ -1,248 +0,0 @@ -# install.func Documentation - -## Overview - -The `install.func` file provides container installation workflow orchestration and fundamental operations for applications deployed inside LXC containers. It handles network setup, OS configuration, connectivity verification, and installation mechanics. - -## Purpose and Use Cases - -- **Container Setup**: Initialize new container with proper configuration -- **Network Verification**: Verify IPv4 and IPv6 connectivity -- **OS Configuration**: Update OS, apply system settings -- **Installation Workflow**: Orchestrate application installation steps -- **Error Handling**: Comprehensive signal trapping and error recovery - -## Quick Reference - -### Key Function Groups -- **Initialization**: `setting_up_container()` - Setup message and environment -- **Network**: `network_check()`, `verb_ip6()` - Connectivity verification -- **OS Configuration**: `update_os()` - OS updates and package management -- **Installation**: `motd_ssh()`, `customize()` - Container customization -- **Cleanup**: `cleanup_lxc()` - Final container cleanup - -### Dependencies -- **External**: `curl`, `apt-get`, `ping`, `dns` utilities -- **Internal**: Uses functions from `core.func`, `error_handler.func`, `tools.func` - -### Integration Points -- Used by: All install/*.sh scripts at startup -- Uses: Environment variables from build.func and core.func -- Provides: Container initialization and management services - -## Documentation Files - -### 📊 [INSTALL_FUNC_FLOWCHART.md](./INSTALL_FUNC_FLOWCHART.md) -Visual execution flows showing initialization, network checks, and installation workflows. - -### 📚 [INSTALL_FUNC_FUNCTIONS_REFERENCE.md](./INSTALL_FUNC_FUNCTIONS_REFERENCE.md) -Complete alphabetical reference of all functions with parameters, dependencies, and usage details. - -### 💡 [INSTALL_FUNC_USAGE_EXAMPLES.md](./INSTALL_FUNC_USAGE_EXAMPLES.md) -Practical examples showing how to use installation functions and common patterns. - -### 🔗 [INSTALL_FUNC_INTEGRATION.md](./INSTALL_FUNC_INTEGRATION.md) -How install.func integrates with other components and provides installation services. - -## Key Features - -### Container Initialization -- **Environment Setup**: Prepare container variables and functions -- **Message System**: Display installation progress with colored output -- **Error Handlers**: Setup signal trapping for proper cleanup - -### Network & Connectivity -- **IPv4 Verification**: Ping external hosts to verify internet access -- **IPv6 Support**: Optional IPv6 enablement and verification -- **DNS Checking**: Verify DNS resolution is working -- **Retry Logic**: Automatic retries for transient failures - -### OS Configuration -- **Package Updates**: Safely update OS package lists -- **System Optimization**: Disable unnecessary services (wait-online) -- **Timezone**: Validate and set container timezone -- **SSH Setup**: Configure SSH daemon and keys - -### Container Customization -- **MOTD**: Create custom login message -- **Auto-Login**: Optional passwordless root login -- **Update Script**: Register application update function -- **Customization Hooks**: Application-specific setup - -## Function Categories - -### 🔹 Core Functions -- `setting_up_container()` - Display setup message and set environment -- `network_check()` - Verify network connectivity -- `update_os()` - Update OS packages with retry logic -- `verb_ip6()` - Enable IPv6 (optional) - -### 🔹 Configuration Functions -- `motd_ssh()` - Setup MOTD and SSH configuration -- `customize()` - Apply container customizations -- `cleanup_lxc()` - Final cleanup before completion - -### 🔹 Utility Functions -- `create_update_script()` - Register application update function -- `set_timezone()` - Configure container timezone -- `disable_wait_online()` - Disable systemd-networkd-wait-online - -## Execution Flow - -``` -Container Started - ↓ -source $FUNCTIONS_FILE_PATH - ↓ -setting_up_container() ← Display "Setting up container..." - ↓ -network_check() ← Verify internet connectivity - ↓ -update_os() ← Update package lists - ↓ -[Application-Specific Installation] - ↓ -motd_ssh() ← Configure SSH/MOTD -customize() ← Apply customizations - ↓ -cleanup_lxc() ← Final cleanup - ↓ -Installation Complete -``` - -## Common Usage Patterns - -### Basic Container Setup -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -setting_up_container -network_check -update_os - -# ... application installation ... - -motd_ssh -customize -cleanup_lxc -``` - -### With Optional IPv6 -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -setting_up_container -verb_ip6 # Enable IPv6 -network_check -update_os - -# ... installation ... - -motd_ssh -customize -cleanup_lxc -``` - -### With Custom Update Script -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -setting_up_container -network_check -update_os - -# ... installation ... - -# Register update function -function update_script() { - # Update logic here -} -export -f update_script - -motd_ssh -customize -cleanup_lxc -``` - -## Best Practices - -### ✅ DO -- Call `setting_up_container()` at the start -- Check `network_check()` output before main installation -- Use `$STD` variable for silent operations -- Call `cleanup_lxc()` at the very end -- Test network connectivity before critical operations - -### ❌ DON'T -- Skip network verification -- Assume internet is available -- Hardcode container paths -- Use `echo` instead of `msg_*` functions -- Forget to call cleanup at the end - -## Environment Variables - -### Available Variables -- `$FUNCTIONS_FILE_PATH` - Path to core functions (set by build.func) -- `$CTID` - Container ID number -- `$NSAPP` - Normalized application name (lowercase) -- `$APP` - Application display name -- `$STD` - Output suppression (`silent` or empty) -- `$VERBOSE` - Verbose output mode (`yes` or `no`) - -### Setting Container Variables -```bash -CONTAINER_TIMEZONE="UTC" -CONTAINER_HOSTNAME="myapp-container" -CONTAINER_FQDN="myapp.example.com" -``` - -## Troubleshooting - -### "Network check failed" -```bash -# Container may not have internet access -# Check: -ping 8.8.8.8 # External connectivity -nslookup example.com # DNS resolution -ip route show # Routing table -``` - -### "Package update failed" -```bash -# APT may be locked by another process -ps aux | grep apt # Check for running apt -# Or wait for existing apt to finish -sleep 30 -update_os -``` - -### "Cannot source functions" -```bash -# $FUNCTIONS_FILE_PATH may not be set -# This variable is set by build.func before running install script -# If missing, the install script was not called properly -``` - -## Related Documentation - -- **[tools.func/](../tools.func/)** - Package and tool installation -- **[core.func/](../core.func/)** - Utility functions and messaging -- **[error_handler.func/](../error_handler.func/)** - Error handling -- **[alpine-install.func/](../alpine-install.func/)** - Alpine-specific setup -- **[UPDATED_APP-install.md](../../UPDATED_APP-install.md)** - Application script guide - -## Recent Updates - -### Version 2.0 (Dec 2025) -- ✅ Improved network connectivity checks -- ✅ Enhanced OS update error handling -- ✅ Added IPv6 support with verb_ip6() -- ✅ Better timezone validation -- ✅ Streamlined cleanup procedures - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team -**License**: MIT diff --git a/docs/misc/tools.func/README.md b/docs/misc/tools.func/README.md deleted file mode 100644 index 6a3da7144..000000000 --- a/docs/misc/tools.func/README.md +++ /dev/null @@ -1,235 +0,0 @@ -# tools.func Documentation - -## Overview - -The `tools.func` file provides a comprehensive collection of helper functions for robust package management, repository management, and tool installation in Debian/Ubuntu-based systems. It is the central hub for installing services, databases, programming languages, and development tools in containers. - -## Purpose and Use Cases - -- **Package Management**: Robust APT/DPKG operations with retry logic -- **Repository Setup**: Prepare and configure package repositories safely -- **Tool Installation**: Install 30+ tools (Node.js, PHP, databases, etc.) -- **Dependency Handling**: Manage complex installation workflows -- **Error Recovery**: Automatic recovery from network failures - -## Quick Reference - -### Key Function Groups -- **Package Helpers**: `pkg_install()`, `pkg_update()`, `pkg_remove()` - APT operations with retry -- **Repository Setup**: `setup_deb822_repo()` - Modern repository configuration -- **Tool Installation**: `setup_nodejs()`, `setup_php()`, `setup_mariadb()`, etc. - 30+ tool functions -- **System Utilities**: `disable_wait_online()`, `customize()` - System optimization -- **Container Setup**: `setting_up_container()`, `motd_ssh()` - Container initialization - -### Dependencies -- **External**: `curl`, `wget`, `apt-get`, `gpg` -- **Internal**: Uses functions from `core.func`, `install.func`, `error_handler.func` - -### Integration Points -- Used by: All install scripts for dependency installation -- Uses: Environment variables from build.func and core.func -- Provides: Tool installation, package management, and repository services - -## Documentation Files - -### 📊 [TOOLS_FUNC_FLOWCHART.md](./TOOLS_FUNC_FLOWCHART.md) -Visual execution flows showing package management, tool installation, and repository setup workflows. - -### 📚 [TOOLS_FUNC_FUNCTIONS_REFERENCE.md](./TOOLS_FUNC_FUNCTIONS_REFERENCE.md) -Complete alphabetical reference of all 30+ functions with parameters, dependencies, and usage details. - -### 💡 [TOOLS_FUNC_USAGE_EXAMPLES.md](./TOOLS_FUNC_USAGE_EXAMPLES.md) -Practical examples showing how to use tool installation functions and common patterns. - -### 🔗 [TOOLS_FUNC_INTEGRATION.md](./TOOLS_FUNC_INTEGRATION.md) -How tools.func integrates with other components and provides package/tool services. - -### 🔧 [TOOLS_FUNC_ENVIRONMENT_VARIABLES.md](./TOOLS_FUNC_ENVIRONMENT_VARIABLES.md) -Complete reference of environment variables and configuration options. - -## Key Features - -### Robust Package Management -- **Automatic Retry Logic**: 3 attempts with backoff for transient failures -- **Silent Mode**: Suppress output with `$STD` variable -- **Error Recovery**: Automatic cleanup of broken packages -- **Atomic Operations**: Ensure consistent state even on failure - -### Tool Installation Coverage -- **Node.js Ecosystem**: Node.js, npm, yarn, pnpm -- **PHP Stack**: PHP-FPM, PHP-CLI, Composer -- **Databases**: MariaDB, PostgreSQL, MongoDB -- **Development Tools**: Git, build-essential, Docker -- **Monitoring**: Grafana, Prometheus, Telegraf -- **And 20+ more...** - -### Repository Management -- **Deb822 Format**: Modern standardized repository format -- **Keyring Handling**: Automatic GPG key management -- **Cleanup**: Removes legacy repositories and keyrings -- **Validation**: Verifies repository accessibility before use - -## Common Usage Patterns - -### Installing a Tool -```bash -setup_nodejs "20" # Install Node.js v20 -setup_php "8.2" # Install PHP 8.2 -setup_mariadb # Install MariaDB (distribution packages) -# MARIADB_VERSION="11.4" setup_mariadb # Specific version from official repo -``` - -### Safe Package Operations -```bash -pkg_update # Update package lists with retry -pkg_install curl wget # Install packages safely -pkg_remove old-tool # Remove package cleanly -``` - -### Setting Up Repositories -```bash -setup_deb822_repo "ppa:example/ppa" "example-app" "jammy" "http://example.com" "release" -``` - -## Function Categories - -### 🔹 Core Package Functions -- `pkg_install()` - Install packages with retry logic -- `pkg_update()` - Update package lists safely -- `pkg_remove()` - Remove packages completely - -### 🔹 Repository Functions -- `setup_deb822_repo()` - Add repository in deb822 format -- `cleanup_repo_metadata()` - Clean GPG keys and old repos -- `check_repository()` - Verify repository is accessible - -### 🔹 Tool Installation Functions (30+) -**Programming Languages**: -- `setup_nodejs()` - Node.js with npm -- `setup_php()` - PHP-FPM and CLI -- `setup_python()` - Python 3 with pip -- `setup_ruby()` - Ruby with gem -- `setup_golang()` - Go programming language - -**Databases**: -- `setup_mariadb()` - MariaDB server -- `setup_postgresql()` - PostgreSQL database -- `setup_mongodb()` - MongoDB NoSQL -- `setup_redis()` - Redis cache - -**Web Servers & Proxies**: -- `setup_nginx()` - Nginx web server -- `setup_apache()` - Apache HTTP server -- `setup_caddy()` - Caddy web server -- `setup_traefik()` - Traefik reverse proxy - -**Containers & Virtualization**: -- `setup_docker()` - Docker container runtime -- `setup_podman()` - Podman container runtime - -**Development & System Tools**: -- `setup_git()` - Git version control -- `setup_docker_compose()` - Docker Compose -- `setup_composer()` - PHP dependency manager -- `setup_build_tools()` - C/C++ compilation tools - -**Monitoring & Logging**: -- `setup_grafana()` - Grafana dashboards -- `setup_prometheus()` - Prometheus monitoring -- `setup_telegraf()` - Telegraf metrics collector - -### 🔹 System Configuration Functions -- `setting_up_container()` - Container initialization message -- `network_check()` - Verify network connectivity -- `update_os()` - Update OS packages safely -- `customize()` - Apply container customizations -- `motd_ssh()` - Configure SSH and MOTD -- `cleanup_lxc()` - Final container cleanup - -## Best Practices - -### ✅ DO -- Use `$STD` to suppress output in production scripts -- Chain multiple tool installations together -- Check for tool availability before using -- Use version parameters when available -- Test new repositories before production use - -### ❌ DON'T -- Mix package managers (apt and apk in same script) -- Hardcode tool versions directly -- Skip error checking on package operations -- Use `apt-get install -y` without `$STD` -- Leave temporary files after installation - -## Recent Updates - -### Version 2.0 (Dec 2025) -- ✅ Added `setup_deb822_repo()` for modern repository format -- ✅ Improved error handling with automatic cleanup -- ✅ Added 5 new tool installation functions -- ✅ Enhanced package retry logic with backoff -- ✅ Standardized tool version handling - -## Integration with Other Functions - -``` -tools.func - ├── Uses: core.func (messaging, colors) - ├── Uses: error_handler.func (exit codes, trapping) - ├── Uses: install.func (network_check, update_os) - │ - └── Used by: All install/*.sh scripts - ├── For: Package installation - ├── For: Tool setup - └── For: Repository management -``` - -## Troubleshooting - -### "Package manager is locked" -```bash -# Wait for apt lock to release -sleep 10 -pkg_update -``` - -### "GPG key not found" -```bash -# Repository setup will handle this automatically -# If manual fix needed: -cleanup_repo_metadata -setup_deb822_repo ... -``` - -### "Tool installation failed" -```bash -# Enable verbose output -export var_verbose="yes" -setup_nodejs "20" -``` - -## Contributing - -When adding new tool installation functions: - -1. Follow the `setup_TOOLNAME()` naming convention -2. Accept version as first parameter -3. Check if tool already installed -4. Use `$STD` for output suppression -5. Set version file: `/opt/TOOLNAME_version.txt` -6. Document in TOOLS_FUNC_FUNCTIONS_REFERENCE.md - -## Related Documentation - -- **[build.func/](../build.func/)** - Container creation orchestrator -- **[core.func/](../core.func/)** - Utility functions and messaging -- **[install.func/](../install.func/)** - Installation workflow management -- **[error_handler.func/](../error_handler.func/)** - Error handling and recovery -- **[UPDATED_APP-install.md](../../UPDATED_APP-install.md)** - Application script guide - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team -**License**: MIT diff --git a/docs/misc/tools.func/TOOLS_FUNC_FLOWCHART.md b/docs/misc/tools.func/TOOLS_FUNC_FLOWCHART.md deleted file mode 100644 index b55da22c8..000000000 --- a/docs/misc/tools.func/TOOLS_FUNC_FLOWCHART.md +++ /dev/null @@ -1,199 +0,0 @@ -# tools.func Flowchart - -## Main Package Installation Flow - -``` -┌──────────────────────────────────┐ -│ Install Script Starts │ -│ source tools.func │ -└──────────────┬───────────────────┘ - │ - ▼ - ┌─────────────┐ - │ pkg_update()│ - │ (apt/apk) │ - └──────┬──────┘ - │ - ▼ - ┌────────────────┐ - │ Retry Logic │ ◄─────┐ - │ (Up to 3 tries)│ │ - └────┬───────────┘ │ - │ │ - ├─ Success: Continue │ - ├─ Retry 1 ──────────┘ - └─ Fail: Exit - │ - ▼ - ┌──────────────────┐ - │ setup_deb822_repo│ - │ (Add repository) │ - └────────┬─────────┘ - │ - ▼ - ┌─────────────────┐ - │ GPG Key Setup │ - │ Verify Repo OK │ - └────────┬────────┘ - │ - ▼ - ┌──────────────────┐ - │ Tool Installation│ - │ (setup_nodejs, │ - │ setup_php, etc.)│ - └────────┬─────────┘ - │ - ┌──────────┴──────────┐ - │ │ - ▼ ▼ - ┌─────────────┐ ┌──────────────┐ - │ Node.js │ │ MariaDB │ - │ setup_ │ │ setup_ │ - │ nodejs() │ │ mariadb() │ - └──────┬──────┘ └────────┬─────┘ - │ │ - └────────┬───────────┘ - │ - ▼ - ┌───────────────────┐ - │ Installation OK? │ - └────┬──────────┬───┘ - │ │ - YES NO - │ │ - │ ▼ - │ ┌─────────────┐ - │ │ Rollback │ - │ │ Error Exit │ - │ └─────────────┘ - │ - ▼ - ┌─────────────────┐ - │ Set Version File│ - │ /opt/TOOL_v.txt │ - └─────────────────┘ -``` - -## Repository Setup Flow (setup_deb822_repo) - -``` -setup_deb822_repo(URL, name, dist, repo_url, release) - │ - ├─ Parse Parameters - │ ├─ URL: Repository URL - │ ├─ name: Repository name - │ ├─ dist: Distro (jammy, bookworm) - │ ├─ repo_url: Main URL - │ └─ release: Release type - │ - ├─ Add GPG Key - │ ├─ Download key from URL - │ ├─ Add to keyring - │ └─ Trust key for deb822 - │ - ├─ Create deb822 file - │ ├─ /etc/apt/sources.list.d/name.sources - │ ├─ Format: DEB822 - │ └─ Include GPG key reference - │ - ├─ Validate Repository - │ ├─ apt-get update - │ ├─ Check for errors - │ └─ Retry if needed - │ - └─ Success / Error -``` - -## Tool Installation Chain - -``` -Tools to Install: -├─ Programming Languages -│ ├─ setup_nodejs(VERSION) -│ ├─ setup_php(VERSION) -│ ├─ setup_python(VERSION) -│ ├─ setup_ruby(VERSION) -│ └─ setup_golang(VERSION) -│ -├─ Databases -│ ├─ setup_mariadb(VERSION) -│ ├─ setup_postgresql(VERSION) -│ ├─ setup_mongodb(VERSION) -│ └─ setup_redis(VERSION) -│ -├─ Web Servers -│ ├─ setup_nginx() -│ ├─ setup_apache() -│ ├─ setup_caddy() -│ └─ setup_traefik() -│ -├─ Containers -│ ├─ setup_docker() -│ └─ setup_podman() -│ -└─ Utilities - ├─ setup_git() - ├─ setup_composer() - ├─ setup_build_tools() - └─ setup_[TOOL]() -``` - -## Package Operation Retry Logic - -``` -┌─────────────────────┐ -│ pkg_install PKG1 │ -│ pkg_install PKG2 │ -│ pkg_install PKG3 │ -└──────────┬──────────┘ - │ - ▼ - ┌─────────────────┐ - │ APT Lock Check │ - └────┬────────┬───┘ - │ │ - FREE LOCKED - │ │ - │ ▼ - │ ┌─────────────┐ - │ │ Wait 5 sec │ - │ └────────┬────┘ - │ │ - │ ▼ - │ ┌─────────────┐ - │ │ Retry Check │ - │ └────┬────┬───┘ - │ │ │ - │ OK LOCK - │ │ │ - │ └────┘ (loop) - │ - ▼ - ┌──────────────────┐ - │ apt-get install │ - │ (with $STD) │ - └────┬─────────┬───┘ - │ │ - SUCCESS FAILED - │ │ - │ ▼ - │ ┌──────────────┐ - │ │ Retry Count? │ - │ └────┬─────┬───┘ - │ │ │ - │ <3 ≥3 │ - │ Retry FAIL - │ │ - │ └─────────┐ - │ │ - ▼ ▼ - ┌─────────┐ ┌─────────┐ - │ SUCCESS │ │ FAILED │ - └─────────┘ │ EXIT 1 │ - └─────────┘ -``` - ---- - -**Visual Reference for**: tools.func package management and tool installation -**Last Updated**: December 2025 diff --git a/docs/misc/tools.func/TOOLS_FUNC_FUNCTIONS_REFERENCE.md b/docs/misc/tools.func/TOOLS_FUNC_FUNCTIONS_REFERENCE.md deleted file mode 100644 index 029095568..000000000 --- a/docs/misc/tools.func/TOOLS_FUNC_FUNCTIONS_REFERENCE.md +++ /dev/null @@ -1,784 +0,0 @@ -# tools.func Functions Reference - -Complete alphabetical reference of all functions in tools.func with parameters, usage, and examples. - -## Function Index - -### Package Management -- `pkg_install()` - Install packages safely with retry -- `pkg_update()` - Update package lists with retry -- `pkg_remove()` - Remove packages cleanly - -### Repository Management -- `setup_deb822_repo()` - Add repository in modern deb822 format -- `cleanup_repo_metadata()` - Clean GPG keys and old repositories -- `check_repository()` - Verify repository accessibility - -### Tool Installation Functions (30+) - -**Programming Languages**: -- `setup_nodejs(VERSION)` - Install Node.js and npm -- `setup_php(VERSION)` - Install PHP-FPM and CLI -- `setup_python(VERSION)` - Install Python 3 with pip -- `setup_uv()` - Install Python uv (modern & fast) -- `setup_ruby(VERSION)` - Install Ruby with gem -- `setup_golang(VERSION)` - Install Go programming language -- `setup_java(VERSION)` - Install OpenJDK (Adoptium) - -**Databases**: -- `setup_mariadb()` - Install MariaDB server -- `setup_mariadb_db()` - Create user/db in MariaDB -- `setup_postgresql(VERSION)` - Install PostgreSQL -- `setup_postgresql_db()` - Create user/db in PostgreSQL -- `setup_mongodb(VERSION)` - Install MongoDB -- `setup_redis(VERSION)` - Install Redis cache -- `setup_meilisearch()` - Install Meilisearch engine - -**Web Servers**: -- `setup_nginx()` - Install Nginx -- `setup_apache()` - Install Apache HTTP Server -- `setup_caddy()` - Install Caddy -- `setup_traefik()` - Install Traefik proxy - -**Containers**: -- `setup_docker()` - Install Docker -- `setup_podman()` - Install Podman - -**Development**: -- `setup_git()` - Install Git -- `setup_docker_compose()` - Install Docker Compose -- `setup_composer()` - Install PHP Composer -- `setup_build_tools()` - Install build-essential -- `setup_yq()` - Install mikefarah/yq processor - -**Monitoring**: -- `setup_grafana()` - Install Grafana -- `setup_prometheus()` - Install Prometheus -- `setup_telegraf()` - Install Telegraf - -**System**: -- `setup_wireguard()` - Install WireGuard VPN -- `setup_netdata()` - Install Netdata monitoring -- `setup_tailscale()` - Install Tailscale -- (+ more...) - ---- - -## Core Functions - -### install_packages_with_retry() - -Install one or more packages safely with automatic retry logic (3 attempts), APT refresh, and lock handling. - -**Signature**: -```bash -install_packages_with_retry PACKAGE1 [PACKAGE2 ...] -``` - -**Parameters**: -- `PACKAGE1, PACKAGE2, ...` - Package names to install - -**Returns**: -- `0` - All packages installed successfully -- `1` - Installation failed after all retries - -**Features**: -- Automatically sets `DEBIAN_FRONTEND=noninteractive` -- Handles DPKG lock errors with `dpkg --configure -a` -- Retries on transient network or APT failures - -**Example**: -```bash -install_packages_with_retry curl wget git -``` - ---- - -### upgrade_packages_with_retry() - -Upgrades installed packages with the same robust retry logic as the installation helper. - -**Signature**: -```bash -upgrade_packages_with_retry -``` - -**Returns**: -- `0` - Upgrade successful -- `1` - Upgrade failed - ---- - -### fetch_and_deploy_gh_release() - -The primary tool for downloading and installing software from GitHub Releases. Supports binaries, tarballs, and Debian packages. - -**Signature**: -```bash -fetch_and_deploy_gh_release APPREPO TYPE [VERSION] [DEST] [ASSET_PATTERN] -``` - -**Environment Variables**: -- `APPREPO`: GitHub repository (e.g., `owner/repo`) -- `TYPE`: Asset type (`binary`, `tarball`, `prebuild`, `singlefile`) -- `VERSION`: Specific tag or `latest` (Default: `latest`) -- `DEST`: Target directory (Default: `/opt/$APP`) -- `ASSET_PATTERN`: Regex or string pattern to match the release asset (Required for `prebuild` and `singlefile`) - -**Supported Operation Modes**: -- `tarball`: Downloads and extracts the source tarball. -- `binary`: Detects host architecture and installs a `.deb` package using `apt` or `dpkg`. -- `prebuild`: Downloads and extracts a pre-built binary archive (supports `.tar.gz`, `.zip`, `.tgz`, `.txz`). -- `singlefile`: Downloads a single binary file to the destination. - -**Environment Variables**: -- `CLEAN_INSTALL=1`: Removes all contents of the destination directory before extraction. -- `DPKG_FORCE_CONFOLD=1`: Forces `dpkg` to keep old config files during package updates. -- `SYSTEMD_OFFLINE=1`: Used automatically for `.deb` installs to prevent systemd-tmpfiles failures in unprivileged containers. - -**Example**: -```bash -fetch_and_deploy_gh_release "muesli/duf" "binary" "latest" "/opt/duf" "duf_.*_linux_amd64.tar.gz" -``` - ---- - -### check_for_gh_release() - -Checks if a newer version is available on GitHub compared to the installed version. - -**Signature**: -```bash -check_for_gh_release APP REPO -``` - -**Example**: -```bash -if check_for_gh_release "nodejs" "nodesource/distributions"; then - # update logic -fi -``` - ---- - -### prepare_repository_setup() - -Performs safe repository preparation by cleaning up old files, keyrings, and ensuring the APT system is in a working state. - -**Signature**: -```bash -prepare_repository_setup REPO_NAME [REPO_NAME2 ...] -``` - -**Example**: -```bash -prepare_repository_setup "mariadb" "mysql" -``` - ---- - -### verify_tool_version() - -Validates if the installed major version matches the expected version. - -**Signature**: -```bash -verify_tool_version NAME EXPECTED INSTALLED -``` - -**Example**: -```bash -verify_tool_version "nodejs" "22" "$(node -v | grep -oP '^v\K[0-9]+')" -``` - ---- - -### setup_deb822_repo() - -Add repository in modern deb822 format. - -**Signature**: -```bash -setup_deb822_repo NAME GPG_URL REPO_URL SUITE COMPONENT [ARCHITECTURES] [ENABLED] -``` - -**Parameters**: -- `NAME` - Repository name (e.g., "nodejs") -- `GPG_URL` - URL to GPG key (e.g., https://example.com/key.gpg) -- `REPO_URL` - Main repository URL (e.g., https://example.com/repo) -- `SUITE` - Repository suite (e.g., "jammy", "bookworm") -- `COMPONENT` - Repository component (e.g., "main", "testing") -- `ARCHITECTURES` - Optional Comma-separated list of architectures (e.g., "amd64,arm64") -- `ENABLED` - Optional "true" or "false" (default: "true") - -**Returns**: -- `0` - Repository added successfully -- `1` - Repository setup failed - -**Example**: -```bash -setup_deb822_repo \ - "nodejs" \ - "https://deb.nodesource.com/gpgkey/nodesource.gpg.key" \ - "https://deb.nodesource.com/node_20.x" \ - "jammy" \ - "main" -``` - ---- - -### cleanup_repo_metadata() - -Clean up GPG keys and old repository configurations. - -**Signature**: -```bash -cleanup_repo_metadata -``` - -**Parameters**: None - -**Returns**: -- `0` - Cleanup complete - -**Example**: -```bash -cleanup_repo_metadata -``` - ---- - -## Tool Installation Functions - -### setup_nodejs() - -Install Node.js and npm from official repositories. Handles legacy version cleanup (nvm) automatically. - -**Signature**: -```bash -setup_nodejs -``` - -**Environment Variables**: -- `NODE_VERSION`: Major version to install (e.g. "20", "22", "24"). Default: "24". -- `NODE_MODULE`: Optional npm package to install globally during setup (e.g. "pnpm", "yarn"). - -**Example**: -```bash -NODE_VERSION="22" NODE_MODULE="pnpm" setup_nodejs -``` - ---- - -### setup_php() - -Install PHP with configurable extensions and FPM/Apache integration. - -**Signature**: -```bash -setup_php -``` - -**Environment Variables**: -- `PHP_VERSION`: Version to install (e.g. "8.3", "8.4"). Default: "8.4". -- `PHP_MODULE`: Comma-separated list of additional extensions. -- `PHP_FPM`: Set to "YES" to install php-fpm. -- `PHP_APACHE`: Set to "YES" to install libapache2-mod-php. - -**Example**: -```bash -PHP_VERSION="8.3" PHP_FPM="YES" PHP_MODULE="mysql,xml,zip" setup_php -``` - ---- - -### setup_mariadb_db() - -Creates a new MariaDB database and a dedicated user with all privileges. Automatically generates a password if not provided and saves it to a credentials file. - -**Environment Variables**: -- `MARIADB_DB_NAME`: Name of the database (required) -- `MARIADB_DB_USER`: Name of the database user (required) -- `MARIADB_DB_PASS`: User password (optional, auto-generated if omitted) - -**Example**: -```bash -MARIADB_DB_NAME="myapp" MARIADB_DB_USER="myapp_user" setup_mariadb_db -``` - ---- - -### setup_postgresql_db() - -Creates a new PostgreSQL database and a dedicated user/role with all privileges. Automatically generates a password if not provided and saves it to a credentials file. - -**Environment Variables**: -- `PG_DB_NAME`: Name of the database (required) -- `PG_DB_USER`: Name of the database user (required) -- `PG_DB_PASS`: User password (optional, auto-generated if omitted) - ---- - -### setup_java() - -Installs Temurin JDK. - -**Signature**: -```bash -JAVA_VERSION="21" setup_java -``` - -**Parameters**: -- `JAVA_VERSION` - JDK version (e.g., "17", "21") (default: "21") - -**Example**: -```bash -JAVA_VERSION="17" setup_java -``` - ---- - -### setup_uv() - -Installs `uv` (modern Python package manager). - -**Signature**: -```bash -PYTHON_VERSION="3.13" setup_uv -``` - -**Parameters**: -- `PYTHON_VERSION` - Optional Python version to pre-install via uv (e.g., "3.12", "3.13") - -**Example**: -```bash -PYTHON_VERSION="3.13" setup_uv -``` - ---- - -### setup_go() - -Installs Go programming language. - -**Signature**: -```bash -GO_VERSION="1.23" setup_go -``` - -**Parameters**: -- `GO_VERSION` - Go version to install (default: "1.23") - -**Example**: -```bash -GO_VERSION="1.24" setup_go -``` - ---- - -### setup_yq() - -Installs `yq` (YAML processor). - -**Signature**: -```bash -setup_yq -``` - -**Example**: -```bash -setup_yq -``` - ---- - -### setup_composer() - -Installs PHP Composer. - -**Signature**: -```bash -setup_composer -``` - -**Example**: -```bash -setup_composer -``` - ---- - -### setup_meilisearch() - -Install and configure Meilisearch search engine. - -**Environment Variables**: -- `MEILISEARCH_BIND`: Address and port to bind to (Default: "127.0.0.1:7700") -- `MEILISEARCH_ENV`: Environment mode (Default: "production") - ---- - -### setup_yq() - -Install the `mikefarah/yq` YAML processor. Removes existing non-compliant versions. - -**Example**: -```bash -setup_yq -yq eval '.app.version = "1.0.0"' -i config.yaml -``` - ---- - -### setup_composer() - -Install or update the PHP Composer package manager. Handles `COMPOSER_ALLOW_SUPERUSER` automatically and performs self-updates if already installed. - -**Example**: -```bash -setup_php -setup_composer -$STD composer install --no-dev -``` - ---- - -### setup_build_tools() - -Install the `build-essential` package suite for compiling software. - ---- - -### setup_uv() - -Install the modern Python package manager `uv`. Extremely fast replacement for pip/venv. - -**Environment Variables**: -- `PYTHON_VERSION`: Major.Minor version to ensure is installed. - -**Example**: -```bash -PYTHON_VERSION="3.12" setup_uv -uv sync --locked -``` - ---- - -### setup_java() - -Install OpenJDK via the Adoptium repository. - -**Environment Variables**: -- `JAVA_VERSION`: Major version to install (e.g. "17", "21"). Default: "21". - -**Example**: -```bash -JAVA_VERSION="21" setup_java -``` - ---- -```bash -setup_nodejs VERSION -``` - -**Parameters**: -- `VERSION` - Node.js version (e.g., "20", "22", "lts") - -**Returns**: -- `0` - Installation successful -- `1` - Installation failed - -**Creates**: -- `/opt/nodejs_version.txt` - Version file - -**Example**: -```bash -setup_nodejs "20" -``` - ---- - -### setup_php(VERSION) - -Install PHP-FPM, CLI, and common extensions. - -**Signature**: -```bash -setup_php VERSION -``` - -**Parameters**: -- `VERSION` - PHP version (e.g., "8.2", "8.3") - -**Returns**: -- `0` - Installation successful -- `1` - Installation failed - -**Creates**: -- `/opt/php_version.txt` - Version file - -**Example**: -```bash -setup_php "8.3" -``` - ---- - -### setup_mariadb() - -Install MariaDB server and client utilities. - -**Signature**: -```bash -setup_mariadb # Uses distribution packages (recommended) -MARIADB_VERSION="11.4" setup_mariadb # Uses official MariaDB repository -``` - -**Variables**: -- `MARIADB_VERSION` - (optional) Specific MariaDB version - - Not set or `"latest"`: Uses distribution packages (most reliable, avoids mirror issues) - - Specific version (e.g., `"11.4"`, `"12.2"`): Uses official MariaDB repository - -**Returns**: -- `0` - Installation successful -- `1` - Installation failed - -**Creates**: -- `/opt/mariadb_version.txt` - Version file - -**Example**: -```bash -# Recommended: Use distribution packages (stable, no mirror issues) -setup_mariadb - -# Specific version from official repository -MARIADB_VERSION="11.4" setup_mariadb -``` - ---- - -### setup_postgresql(VERSION) - -Install PostgreSQL server and client utilities. - -**Signature**: -```bash -setup_postgresql VERSION -``` - -**Parameters**: -- `VERSION` - PostgreSQL version (e.g., "14", "15", "16") - -**Returns**: -- `0` - Installation successful -- `1` - Installation failed - -**Creates**: -- `/opt/postgresql_version.txt` - Version file - -**Example**: -```bash -setup_postgresql "16" -``` - ---- - -### setup_docker() - -Install Docker and Docker CLI. - -**Signature**: -```bash -setup_docker -``` - -**Parameters**: None - -**Returns**: -- `0` - Installation successful -- `1` - Installation failed - -**Creates**: -- `/opt/docker_version.txt` - Version file - -**Example**: -```bash -setup_docker -``` - ---- - -### setup_composer() - -Install PHP Composer (dependency manager). - -**Signature**: -```bash -setup_composer -``` - -**Parameters**: None - -**Returns**: -- `0` - Installation successful -- `1` - Installation failed - -**Creates**: -- `/usr/local/bin/composer` - Composer executable - -**Example**: -```bash -setup_composer -``` - ---- - -### setup_build_tools() - -Install build-essential and development tools (gcc, make, etc.). - -**Signature**: -```bash -setup_build_tools -``` - -**Parameters**: None - -**Returns**: -- `0` - Installation successful -- `1` - Installation failed - -**Example**: -```bash -setup_build_tools -``` - ---- - -## System Configuration - -### setting_up_container() - -Display setup message and initialize container environment. - -**Signature**: -```bash -setting_up_container -``` - -**Example**: -```bash -setting_up_container -# Output: ⏳ Setting up container... -``` - ---- - -### motd_ssh() - -Configure SSH daemon and MOTD for container. - -**Signature**: -```bash -motd_ssh -``` - -**Example**: -```bash -motd_ssh -# Configures SSH and creates MOTD -``` - ---- - -### customize() - -Apply container customizations and final setup. - -**Signature**: -```bash -customize -``` - -**Example**: -```bash -customize -``` - ---- - -### cleanup_lxc() - -Final cleanup of temporary files and logs. - -**Signature**: -```bash -cleanup_lxc -``` - -**Example**: -```bash -cleanup_lxc -# Removes temp files, finalizes installation -``` - ---- - -## Usage Patterns - -### Basic Installation Sequence - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -pkg_update # Update package lists -setup_nodejs "20" # Install Node.js -setup_mariadb # Install MariaDB (distribution packages) - -# ... application installation ... - -motd_ssh # Setup SSH/MOTD -customize # Apply customizations -cleanup_lxc # Final cleanup -``` - -### Tool Chain Installation - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -# Install full web stack -pkg_update -setup_nginx -setup_php "8.3" -setup_mariadb # Uses distribution packages -setup_composer -``` - -### With Repository Setup - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -pkg_update - -# Add Node.js repository -setup_deb822_repo \ - "https://deb.nodesource.com/gpgkey/nodesource.gpg.key" \ - "nodejs" \ - "jammy" \ - "https://deb.nodesource.com/node_20.x" \ - "main" - -pkg_update -setup_nodejs "20" -``` - ---- - -**Last Updated**: December 2025 -**Total Functions**: 30+ -**Maintained by**: community-scripts team diff --git a/docs/misc/tools.func/TOOLS_FUNC_INTEGRATION.md b/docs/misc/tools.func/TOOLS_FUNC_INTEGRATION.md deleted file mode 100644 index 6c1f82643..000000000 --- a/docs/misc/tools.func/TOOLS_FUNC_INTEGRATION.md +++ /dev/null @@ -1,418 +0,0 @@ -# tools.func Integration Guide - -How tools.func integrates with other components and provides package/tool services to the ProxmoxVE ecosystem. - -## Component Relationships - -### tools.func in the Installation Pipeline - -``` -ct/AppName.sh (host) - │ - ├─ Calls build.func - │ - └─ Creates Container - │ - ▼ -install/appname-install.sh (container) - │ - ├─ Sources: core.func (colors, messaging) - ├─ Sources: error_handler.func (error handling) - ├─ Sources: install.func (container setup) - │ - └─ ★ Sources: tools.func ★ - │ - ├─ pkg_update() - ├─ pkg_install() - ├─ setup_nodejs() - ├─ setup_php() - ├─ setup_mariadb() - └─ ... 30+ functions -``` - -### Integration with core.func - -**tools.func uses core.func for**: -- `msg_info()` - Display progress messages -- `msg_ok()` - Display success messages -- `msg_error()` - Display error messages -- `msg_warn()` - Display warnings -- Color codes (GN, RD, YW, BL) for formatted output -- `$STD` variable - Output suppression control - -**Example**: -```bash -# tools.func internally calls: -msg_info "Installing Node.js" # Uses core.func -setup_nodejs "20" # Setup happens -msg_ok "Node.js installed" # Uses core.func -``` - -### Integration with error_handler.func - -**tools.func uses error_handler.func for**: -- Exit code mapping to error descriptions -- Automatic error trapping (catch_errors) -- Signal handlers (SIGINT, SIGTERM, EXIT) -- Structured error reporting - -**Example**: -```bash -# If setup_nodejs fails, error_handler catches it: -catch_errors # Calls from error_handler.func -setup_nodejs "20" # If this exits non-zero - # error_handler logs and traps it -``` - -### Integration with install.func - -**tools.func coordinates with install.func for**: -- Initial OS updates (install.func) → then tools (tools.func) -- Network verification before tool installation -- Package manager state validation -- Cleanup procedures after tool setup - -**Sequence**: -```bash -setting_up_container() # From install.func -network_check() # From install.func -update_os() # From install.func - -pkg_update # From tools.func -setup_nodejs() # From tools.func - -motd_ssh() # From install.func -customize() # From install.func -cleanup_lxc() # From install.func -``` - ---- - -## Integration with alpine-tools.func (Alpine Containers) - -### When to Use tools.func vs alpine-tools.func - -| Feature | tools.func (Debian) | alpine-tools.func (Alpine) | -|---------|:---:|:---:| -| Package Manager | apt-get | apk | -| Installation Scripts | install/*.sh | install/*-alpine.sh | -| Tool Setup | `setup_nodejs()` (apt) | `setup_nodejs()` (apk) | -| Repository | `setup_deb822_repo()` | `add_community_repo()` | -| Services | systemctl | rc-service | - -### Automatic Selection - -Installation scripts detect OS and source appropriate functions: - -```bash -# install/myapp-install.sh -if grep -qi 'alpine' /etc/os-release; then - # Alpine detected - uses alpine-tools.func - apk_update - apk_add package -else - # Debian detected - uses tools.func - pkg_update - pkg_install package -fi -``` - ---- - -## Dependencies Management - -### External Dependencies - -``` -tools.func requires: -├─ curl (for HTTP requests, GPG keys) -├─ wget (for downloads) -├─ apt-get (package manager) -├─ gpg (GPG key management) -├─ openssl (for encryption) -└─ systemctl (service management on Debian) -``` - -### Internal Function Dependencies - -``` -setup_nodejs() - ├─ Calls: setup_deb822_repo() - ├─ Calls: pkg_update() - ├─ Calls: pkg_install() - └─ Uses: msg_info(), msg_ok() [from core.func] - -setup_mariadb() - ├─ Calls: setup_deb822_repo() - ├─ Calls: pkg_update() - ├─ Calls: pkg_install() - └─ Uses: msg_info(), msg_ok() - -setup_docker() - ├─ Calls: cleanup_repo_metadata() - ├─ Calls: setup_deb822_repo() - ├─ Calls: pkg_update() - └─ Uses: msg_info(), msg_ok() -``` - ---- - -## Function Call Graph - -### Complete Installation Dependency Tree - -``` -install/app-install.sh - │ - ├─ setting_up_container() [install.func] - │ - ├─ network_check() [install.func] - │ - ├─ update_os() [install.func] - │ - ├─ pkg_update() [tools.func] - │ └─ Calls: apt-get update (with retry) - │ - ├─ setup_nodejs("20") [tools.func] - │ ├─ setup_deb822_repo() [tools.func] - │ │ └─ Calls: apt-get update - │ ├─ pkg_update() [tools.func] - │ └─ pkg_install() [tools.func] - │ - ├─ setup_php("8.3") [tools.func] - │ └─ Similar to setup_nodejs - │ - ├─ setup_mariadb("11") [tools.func] - │ └─ Similar to setup_nodejs - │ - ├─ motd_ssh() [install.func] - │ - ├─ customize() [install.func] - │ - └─ cleanup_lxc() [install.func] -``` - ---- - -## Configuration Management - -### Environment Variables Used by tools.func - -```bash -# Output control -STD="silent" # Suppress apt/apk output -VERBOSE="yes" # Show all output - -# Package management -DEBIAN_FRONTEND="noninteractive" - -# Tool versions (optional) -NODEJS_VERSION="20" -PHP_VERSION="8.3" -POSTGRES_VERSION="16" -``` - -### Tools Configuration Files Created - -``` -/opt/ -├─ nodejs_version.txt # Node.js version -├─ php_version.txt # PHP version -├─ mariadb_version.txt # MariaDB version -├─ postgresql_version.txt # PostgreSQL version -├─ docker_version.txt # Docker version -└─ [TOOL]_version.txt # For all installed tools - -/etc/apt/sources.list.d/ -├─ nodejs.sources # Node.js repo (deb822) -├─ docker.sources # Docker repo (deb822) -└─ [name].sources # Other repos (deb822) -``` - ---- - -## Error Handling Integration - -### Exit Codes from tools.func - -| Code | Meaning | Handled By | -|------|:---:|:---:| -| 0 | Success | Normal flow | -| 1 | Package installation failed | error_handler.func | -| 100-101 | APT error | error_handler.func | -| 127 | Command not found | error_handler.func | - -### Automatic Cleanup on Failure - -```bash -# If any step fails in install script: -catch_errors -pkg_update # Fail here? -setup_nodejs # Doesn't get here - -# error_handler automatically: -├─ Logs error -├─ Captures exit code -├─ Calls cleanup_lxc() -└─ Exits with proper code -``` - ---- - -## Integration with build.func - -### Variable Flow - -``` -ct/app.sh - │ - ├─ var_cpu="2" - ├─ var_ram="2048" - ├─ var_disk="10" - │ - └─ Calls: build_container() [build.func] - │ - └─ Creates container - │ - └─ Calls: install/app-install.sh - │ - └─ Uses: tools.func for installation -``` - -### Resource Considerations - -tools.func respects container resource limits: -- Large package installations respect allocated RAM -- Database setups use allocated disk space -- Build tools (gcc, make) stay within CPU allocation - ---- - -## Version Management - -### How tools.func Tracks Versions - -Each tool installation creates a version file: - -```bash -# setup_nodejs() creates: -echo "20.10.5" > /opt/nodejs_version.txt - -# Used by update scripts: -CURRENT=$(cat /opt/nodejs_version.txt) -LATEST=$(curl ... # fetch latest) -if [[ "$LATEST" != "$CURRENT" ]]; then - # Update needed -fi -``` - -### Integration with Update Functions - -```bash -# In ct/app.sh: -function update_script() { - # Check Node version - RELEASE=$(curl ... | jq '.version') - CURRENT=$(cat /opt/nodejs_version.txt) - - if [[ "$RELEASE" != "$CURRENT" ]]; then - # Use tools.func to upgrade - setup_nodejs "$RELEASE" - fi -} -``` - ---- - -## Best Practices for Integration - -### ✅ DO - -1. **Call functions in proper order** - ```bash - pkg_update - setup_tool "version" - ``` - -2. **Use $STD for production** - ```bash - export STD="silent" - pkg_install curl wget - ``` - -3. **Check for existing installations** - ```bash - command -v nodejs >/dev/null || setup_nodejs "20" - ``` - -4. **Coordinate with install.func** - ```bash - setting_up_container - update_os # From install.func - setup_nodejs # From tools.func - motd_ssh # Back to install.func - ``` - -### ❌ DON'T - -1. **Don't skip pkg_update** - ```bash - # Bad - may fail due to stale cache - pkg_install curl - ``` - -2. **Don't hardcode versions** - ```bash - # Bad - apt-get install nodejs=20.x - - # Good - setup_nodejs "20" - ``` - -3. **Don't mix package managers** - ```bash - # Bad - apt-get install curl - apk add wget - ``` - -4. **Don't ignore errors** - ```bash - # Bad - setup_docker || true - - # Good - if ! setup_docker; then - msg_error "Docker failed" - exit 1 - fi - ``` - ---- - -## Troubleshooting Integration Issues - -### "Package installation fails" -- Check: `pkg_update` was called first -- Check: Package name is correct for OS -- Solution: Manually verify in container - -### "Tool not accessible after installation" -- Check: Tool added to PATH -- Check: Version file created -- Solution: `which toolname` to verify - -### "Repository conflicts" -- Check: No duplicate repositories -- Solution: `cleanup_repo_metadata()` before adding - -### "Alpine-specific errors when using Debian tools" -- Problem: Using tools.func functions on Alpine -- Solution: Use alpine-tools.func instead - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team -**Integration Status**: All components fully integrated diff --git a/docs/misc/tools.func/TOOLS_FUNC_USAGE_EXAMPLES.md b/docs/misc/tools.func/TOOLS_FUNC_USAGE_EXAMPLES.md deleted file mode 100644 index 39475b37b..000000000 --- a/docs/misc/tools.func/TOOLS_FUNC_USAGE_EXAMPLES.md +++ /dev/null @@ -1,420 +0,0 @@ -# tools.func Usage Examples - -Practical, real-world examples for using tools.func functions in application installation scripts. - -## Basic Examples - -### Example 1: Simple Package Installation - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -# Update packages -pkg_update - -# Install basic tools -pkg_install curl wget git htop - -msg_ok "Basic tools installed" -``` - -### Example 2: Node.js Application - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -setting_up_container -network_check -update_os - -msg_info "Installing Node.js" -pkg_update -setup_nodejs "20" -msg_ok "Node.js installed" - -msg_info "Downloading application" -cd /opt -git clone https://github.com/example/app.git -cd app -npm install -msg_ok "Application installed" - -motd_ssh -customize -cleanup_lxc -``` - ---- - -## Advanced Examples - -### Example 3: PHP + MySQL Web Application - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -setting_up_container -update_os - -# Install web stack -msg_info "Installing web server stack" -pkg_update - -setup_nginx -setup_php "8.3" -setup_mariadb # Uses distribution packages (recommended) -setup_composer - -msg_ok "Web stack installed" - -# Download application -msg_info "Downloading application" -git clone https://github.com/example/php-app /var/www/html/app -cd /var/www/html/app - -# Install dependencies -composer install --no-dev - -# Setup database -msg_info "Setting up database" -DBPASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) -mysql -e "CREATE DATABASE phpapp; GRANT ALL ON phpapp.* TO 'phpapp'@'localhost' IDENTIFIED BY '$DBPASS';" - -# Create .env file -cat > .env < .env < /etc/systemd/system/nodeapp.service </dev/null 2>&1; then - msg_ok "Node.js already installed: $(node --version)" -else - msg_info "Installing Node.js" - setup_nodejs "20" - msg_ok "Node.js installed: $(node --version)" -fi - -# Same for other tools -if command -v docker >/dev/null 2>&1; then - msg_ok "Docker already installed" -else - msg_info "Installing Docker" - setup_docker -fi -``` - ---- - -## Production Patterns - -### Example 10: Production Installation Template - -```bash -#!/usr/bin/env bash -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" - -# === INITIALIZATION === -catch_errors -setting_up_container -network_check -update_os - -# === DEPENDENCIES === -msg_info "Installing base dependencies" -pkg_update -pkg_install curl wget git build-essential - -# === RUNTIME SETUP === -msg_info "Installing runtime" -setup_nodejs "20" -setup_postgresql "16" - -# === APPLICATION === -msg_info "Installing application" -git clone https://github.com/user/app /opt/app -cd /opt/app -npm install --omit=dev -npm run build - -# === CONFIGURATION === -msg_info "Configuring application" -# ... configuration steps ... - -# === SERVICES === -msg_info "Setting up services" -# ... service setup ... - -# === FINALIZATION === -msg_ok "Installation complete" -motd_ssh -customize -cleanup_lxc -``` - ---- - -## Tips & Best Practices - -### ✅ DO -```bash -# Use $STD for silent operations -$STD apt-get install curl - -# Use pkg_update before installing -pkg_update -pkg_install package-name - -# Chain multiple tools together -setup_nodejs "20" -setup_php "8.3" -setup_mariadb # Distribution packages (recommended) - -# Check command success -if ! setup_docker; then - msg_error "Docker installation failed" - exit 1 -fi -``` - -### ❌ DON'T -```bash -# Don't hardcode commands -apt-get install curl # Bad - -# Don't skip updates -pkg_install package # May fail if cache stale - -# Don't ignore errors -setup_nodejs || true # Silences errors silently - -# Don't mix package managers -apt-get install curl -apk add wget # Don't mix! -``` - ---- - -**Last Updated**: December 2025 -**Examples**: 10 detailed patterns -**All examples tested and verified** diff --git a/docs/tools/README.md b/docs/tools/README.md deleted file mode 100644 index 82d1a8871..000000000 --- a/docs/tools/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Tools & Add-ons Documentation (/tools) - -This directory contains comprehensive documentation for tools, utilities, and add-ons in the `/tools` directory. - -## Overview - -The `/tools` directory contains: -- **Proxmox management tools** - Helper scripts for Proxmox administration -- **Proxmox VE add-ons** - Extensions and integrations -- **Utility scripts** - General-purpose automation tools - -## Documentation Structure - -Tools documentation focuses on purpose, usage, and integration with the main ecosystem. - -## Available Tools - -The `/tools` directory structure includes: - -### `/tools/pve/` -Proxmox VE management and administration tools: -- Container management utilities -- VM management helpers -- Storage management tools -- Network configuration tools -- Backup and recovery utilities - -### `/tools/addon/` -Proxmox add-ons and extensions: -- Web UI enhancements -- API extensions -- Integration modules -- Custom scripts - -### `/tools/headers/` -ASCII art headers and templates for scripts. - -## Common Tools & Scripts - -Examples of tools available: - -- **Container management** - Batch operations on containers -- **VM provisioning** - Automated VM setup -- **Backup automation** - Scheduled backups -- **Monitoring integration** - Connect to monitoring systems -- **Configuration management** - Infrastructure as code -- **Reporting tools** - Generate reports and statistics - -## Integration Points - -Tools integrate with: -- **build.func** - Main container orchestrator -- **core.func** - Utility functions -- **error_handler.func** - Error handling -- **tools.func** - Package installation - -## Contributing Tools - -To contribute a new tool: - -1. Place script in appropriate `/tools/` subdirectory -2. Follow project standards: - - Use `#!/usr/bin/env bash` - - Source build.func if needed - - Handle errors with error_handler.func -3. Document usage in script header comments -4. Submit PR - -## Common Tasks - -- **Create Proxmox management tool** → Study existing tools -- **Create add-on** → Follow add-on guidelines -- **Integration** → Use build.func and core.func -- **Error handling** → Use error_handler.func - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team diff --git a/docs/vm/README.md b/docs/vm/README.md deleted file mode 100644 index 01535b542..000000000 --- a/docs/vm/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# VM Scripts Documentation (/vm) - -This directory contains comprehensive documentation for virtual machine creation scripts in the `/vm` directory. - -## Overview - -VM scripts (`vm/*.sh`) create full virtual machines (not containers) in Proxmox VE with complete operating systems and cloud-init provisioning. - -## Documentation Structure - -VM documentation parallels container documentation but focuses on VM-specific features. - -## Key Resources - -- **[misc/cloud-init.func/](../misc/cloud-init.func/)** - Cloud-init provisioning documentation -- **[CONTRIBUTION_GUIDE.md](../CONTRIBUTION_GUIDE.md)** - Contribution workflow -- **[EXIT_CODES.md](../EXIT_CODES.md)** - Exit code reference - -## VM Creation Flow - -``` -vm/OsName-vm.sh (host-side) - │ - ├─ Calls: build.func (orchestrator) - │ - ├─ Variables: var_cpu, var_ram, var_disk, var_os - │ - ├─ Uses: cloud-init.func (provisioning) - │ - └─ Creates: KVM/QEMU VM - │ - └─ Boots with: Cloud-init config - │ - ├─ System phase - ├─ Config phase - └─ Final phase -``` - -## Available VM Scripts - -See `/vm` directory for all VM creation scripts. Examples: - -- `ubuntu2504-vm.sh` - Ubuntu 25.04 VM (Latest) -- `ubuntu2404-vm.sh` - Ubuntu 24.04 VM (LTS) -- `debian-13-vm.sh` - Debian 13 VM (Trixie) -- `archlinux-vm.sh` - Arch Linux VM -- `haos-vm.sh` - Home Assistant OS -- `mikrotik-routeros.sh` - MikroTik RouterOS -- `openwrt-vm.sh` - OpenWrt VM -- `opnsense-vm.sh` - OPNsense firewall -- `umbrel-os-vm.sh` - Umbrel OS VM -- And 10+ more... - -## VM vs Container - -| Feature | VM | Container | -|---------|:---:|:---:| -| Isolation | Full | Lightweight | -| Boot Time | Slower | Instant | -| Resource Use | Higher | Lower | -| Use Case | Full OS | Single app | -| Init System | systemd/etc | cloud-init | -| Storage | Disk image | Filesystem | - -## Quick Start - -To understand VM creation: - -1. Read: [misc/cloud-init.func/README.md](../misc/cloud-init.func/README.md) -2. Study: A similar existing script in `/vm` -3. Understand cloud-init configuration -4. Test locally -5. Submit PR - -## Contributing a New VM - -1. Create `vm/osname-vm.sh` -2. Use cloud-init for provisioning -3. Follow VM script template -4. Test VM creation and boot -5. Submit PR - -## Cloud-Init Provisioning - -VMs are provisioned using cloud-init: - -```yaml -#cloud-config -hostname: myvm -timezone: UTC - -packages: - - curl - - wget - -users: - - name: ubuntu - ssh_authorized_keys: - - ssh-rsa AAAAB3... - -bootcmd: - - echo "VM starting..." - -runcmd: - - apt-get update - - apt-get upgrade -y -``` - -## Common VM Operations - -- **Create VM with cloud-init** → [misc/cloud-init.func/](../misc/cloud-init.func/) -- **Configure networking** → Cloud-init YAML documentation -- **Setup SSH keys** → [misc/cloud-init.func/CLOUD_INIT_FUNC_USAGE_EXAMPLES.md](../misc/cloud-init.func/CLOUD_INIT_FUNC_USAGE_EXAMPLES.md) -- **Debug VM creation** → [EXIT_CODES.md](../EXIT_CODES.md) - -## VM Templates - -Common VM templates available: - -- **Ubuntu LTS** - Latest stable Ubuntu -- **Debian Stable** - Latest stable Debian -- **OPNsense** - Network security platform -- **Home Assistant** - Home automation -- **Kubernetes** - K3s lightweight cluster -- **Proxmox Backup** - Backup server - ---- - -**Last Updated**: December 2025 -**Maintainers**: community-scripts team diff --git a/frontend/.gitignore b/frontend/.gitignore deleted file mode 100644 index 4a2000d19..000000000 --- a/frontend/.gitignore +++ /dev/null @@ -1,39 +0,0 @@ -# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. - -# dependencies -/node_modules -/.pnp -.pnp.js -.yarn/install-state.gz - -# wrangler -.worker-next -.wrangler - -# testing -/coverage - -# next.js -/.next/ -out -# production -/build - -# misc -.DS_Store -*.pem - -# debug -npm-debug.log* -yarn-debug.log* -yarn-error.log* - -# # local env files -# .env*.local -# .env -# vercel -.vercel - -# typescript -*.tsbuildinfo -next-env.d.ts diff --git a/frontend/.vscode/settings.json b/frontend/.vscode/settings.json deleted file mode 100644 index 9f2f44827..000000000 --- a/frontend/.vscode/settings.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - // Disable the default formatter, use eslint instead - "prettier.enable": false, - "editor.formatOnSave": false, - - // Auto fix - "editor.codeActionsOnSave": { - "source.fixAll.eslint": "explicit", - "source.organizeImports": "never" - }, - - // Silent the stylistic rules in you IDE, but still auto fix them - "eslint.rules.customizations": [ - { "rule": "style/*", "severity": "off", "fixable": true }, - { "rule": "format/*", "severity": "off", "fixable": true }, - { "rule": "*-indent", "severity": "off", "fixable": true }, - { "rule": "*-spacing", "severity": "off", "fixable": true }, - { "rule": "*-spaces", "severity": "off", "fixable": true }, - { "rule": "*-order", "severity": "off", "fixable": true }, - { "rule": "*-dangle", "severity": "off", "fixable": true }, - { "rule": "*-newline", "severity": "off", "fixable": true }, - { "rule": "*quotes", "severity": "off", "fixable": true }, - { "rule": "*semi", "severity": "off", "fixable": true } - ], - - // Enable eslint for all supported languages - "eslint.validate": [ - "javascript", - "javascriptreact", - "typescript", - "typescriptreact", - "vue", - "html", - "markdown", - "json", - "json5", - "jsonc", - "yaml", - "toml", - "xml", - "gql", - "graphql", - "astro", - "svelte", - "css", - "less", - "scss", - "pcss", - "postcss" - ] -} diff --git a/frontend/LICENSE b/frontend/LICENSE deleted file mode 100644 index 9e9b270a5..000000000 --- a/frontend/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2024-Present Bram Suurd - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/frontend/README.md b/frontend/README.md deleted file mode 100644 index f9638f033..000000000 --- a/frontend/README.md +++ /dev/null @@ -1,281 +0,0 @@ -# Proxmox VE Helper-Scripts Frontend - -> 🚀 **Modern frontend for the Community-Scripts Proxmox VE Helper-Scripts repository** - -A comprehensive, user-friendly interface built with Next.js that provides access to 300+ automation scripts for Proxmox Virtual Environment management. This frontend serves as the official website for the Community-Scripts organization's Proxmox VE Helper-Scripts repository. - -![Next.js](https://img.shields.io/badge/Next.js-15.2.4-black?style=flat-square&logo=next.js) -![React](https://img.shields.io/badge/React-19.0.0-blue?style=flat-square&logo=react) -![TypeScript](https://img.shields.io/badge/TypeScript-5.8.2-blue?style=flat-square&logo=typescript) -![Tailwind CSS](https://img.shields.io/badge/Tailwind-3.4.17-06B6D4?style=flat-square&logo=tailwindcss) -![License](https://img.shields.io/badge/License-MIT-green?style=flat-square) - -## 🌟 Features - -### Core Functionality - -- **📜 Script Management**: Browse, search, and filter 300+ Proxmox VE scripts -- **📱 Responsive Design**: Mobile-first approach with modern UI/UX -- **🔍 Advanced Search**: Fuzzy search with category filtering -- **📊 Analytics Integration**: Built-in analytics for usage tracking -- **🌙 Dark/Light Mode**: Theme switching with system preference detection -- **⚡ Performance Optimized**: Static site generation for lightning-fast loading - -### Technical Features - -- **🎨 Modern UI Components**: Built with Radix UI and shadcn/ui -- **📈 Data Visualization**: Charts and metrics using Chart.js -- **🔄 State Management**: React Query for efficient data fetching -- **📝 Type Safety**: Full TypeScript implementation -- **🚀 Static Export**: Optimized for GitHub Pages deployment - -## 🛠️ Tech Stack - -### Frontend Framework - -- **[Next.js 15.2.4](https://nextjs.org/)** - React framework with App Router -- **[React 19.0.0](https://react.dev/)** - Latest React with concurrent features -- **[TypeScript 5.8.2](https://www.typescriptlang.org/)** - Type-safe JavaScript - -### Styling & UI - -- **[Tailwind CSS 3.4.17](https://tailwindcss.com/)** - Utility-first CSS framework -- **[Radix UI](https://www.radix-ui.com/)** - Unstyled, accessible UI components -- **[shadcn/ui](https://ui.shadcn.com/)** - Re-usable components built on Radix UI -- **[Framer Motion](https://www.framer.com/motion/)** - Animation library -- **[Lucide React](https://lucide.dev/)** - Icon library - -### Data & State Management - -- **[TanStack Query 5.71.1](https://tanstack.com/query)** - Powerful data synchronization -- **[Zod 3.24.2](https://zod.dev/)** - TypeScript-first schema validation -- **[nuqs 2.4.1](https://nuqs.47ng.com/)** - Type-safe search params state manager - -### Development Tools - -- **[Vitest 3.1.1](https://vitest.dev/)** - Fast unit testing framework -- **[React Testing Library](https://testing-library.com/react)** - Simple testing utilities -- **[ESLint](https://eslint.org/)** - Code linting and formatting -- **[Prettier](https://prettier.io/)** - Code formatting - -### Additional Libraries - -- **[Chart.js](https://www.chartjs.org/)** - Data visualization -- **[Fuse.js](https://fusejs.io/)** - Fuzzy search -- **[date-fns](https://date-fns.org/)** - Date utility library -- **[Next Themes](https://github.com/pacocoursey/next-themes)** - Theme management - -## 🚀 Getting Started - -### Prerequisites - -- **Node.js 18+** (recommend using the latest LTS version) -- **npm**, **yarn**, **pnpm**, or **bun** package manager -- **Git** for version control - -### Installation - -1. **Clone the repository** - - ```bash - git clone https://github.com/community-scripts/ProxmoxVE.git - cd ProxmoxVE/frontend - ``` - -2. **Install dependencies** - - ```bash - # Using npm - npm install - - # Using yarn - yarn install - - # Using pnpm - pnpm install - - # Using bun - bun install - ``` - -3. **Start the development server** - - ```bash - npm run dev - # or - yarn dev - # or - pnpm dev - # or - bun dev - ``` - -4. **Open your browser** - - Navigate to [http://localhost:3000](http://localhost:3000) to see the application running. - -### Environment Configuration - -The application uses the following environment variables: - -- `BASE_PATH`: Set to "ProxmoxVE" for GitHub Pages deployment -- Analytics configuration is handled in `src/config/siteConfig.tsx` - -## 🧪 Development - -### Available Scripts - -```bash -# Development -npm run dev # Start development server with Turbopack -npm run build # Build for production -npm run start # Start production server (after build) - -# Code Quality -npm run lint # Run ESLint -npm run typecheck # Run TypeScript type checking -npm run format:write # Format code with Prettier -npm run format:check # Check code formatting - -# Deployment -npm run deploy # Build and deploy to GitHub Pages -``` - -### Development Workflow - -1. **Feature Development** - - - Create a new branch for your feature - - Follow the established TypeScript and React patterns - - Use the existing component library (shadcn/ui) - - Ensure responsive design principles - -2. **Code Standards** - - - Follow TypeScript strict mode - - Use functional components with hooks - - Implement proper error boundaries - - Write descriptive variable and function names - - Use early returns for better readability - -3. **Styling Guidelines** - - - Use Tailwind CSS utility classes - - Follow mobile-first responsive design - - Implement dark/light mode considerations - - Use CSS variables from the design system - -4. **Testing** - - Write unit tests for utility functions - - Test React components with React Testing Library - - Ensure accessibility standards are met - - Run tests before committing - -### Component Development - -The project uses a component-driven development approach: - -```typescript -// Example component structure -import { cn } from "@/lib/utils"; -import { Button } from "@/components/ui/button"; - -interface ComponentProps { - title: string; - className?: string; -} - -export const Component = ({ title, className }: ComponentProps) => { - return ( -
- -
- ); -}; -``` - -### Configuration for Static Export - -The application is configured for static export in `next.config.mjs`: - -```javascript -const nextConfig = { - output: "export", - basePath: `/ProxmoxVE`, - images: { - unoptimized: true // Required for static export - } -}; -``` - -## 🤝 Contributing - -We welcome contributions from the community! Here's how you can help: - -### Getting Started - -1. **Fork the repository** on GitHub -2. **Clone your fork** locally -3. **Create a new branch** for your feature or bugfix -4. **Make your changes** following our coding standards -5. **Submit a pull request** with a clear description - -### Contribution Guidelines - -#### Code Style - -- Follow the existing TypeScript and React patterns -- Use descriptive variable and function names -- Implement proper error handling -- Write self-documenting code with appropriate comments - -#### Component Guidelines - -- Use functional components with hooks -- Implement proper TypeScript types -- Follow accessibility best practices -- Ensure responsive design -- Use the existing design system components - -#### Pull Request Process - -1. Update documentation if needed -2. Update the README if you've added new features -3. Request review from maintainers - -### Areas for Contribution - -- **🐛 Bug fixes**: Report and fix issues -- **✨ New features**: Enhance functionality -- **📚 Documentation**: Improve guides and examples -- **🎨 UI/UX**: Improve design and user experience -- **♿ Accessibility**: Enhance accessibility features -- **🚀 Performance**: Optimize loading and runtime performance - -## 📄 License - -This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. - -## 🙏 Acknowledgments - -- **[tteck](https://github.com/tteck)** - Original creator of the Proxmox VE Helper-Scripts -- **[Community-Scripts Organization](https://github.com/community-scripts)** - Maintaining and expanding the project -- **[Proxmox Community](https://forum.proxmox.com/)** - For continuous feedback and support -- **All Contributors** - Thank you for your valuable contributions! - -## 📚 Additional Resources - -- **[Proxmox VE Documentation](https://pve.proxmox.com/pve-docs/)** -- **[Community Scripts Repository](https://github.com/community-scripts/ProxmoxVE)** -- **[Discord Community](https://discord.gg/3AnUqsXnmK)** -- **[GitHub Discussions](https://github.com/community-scripts/ProxmoxVE/discussions)** - -## 🔗 Links - -- **🌐 Live Website**: [https://community-scripts.github.io/ProxmoxVE/](https://community-scripts.github.io/ProxmoxVE/) -- **💬 Discord Server**: [https://discord.gg/3AnUqsXnmK](https://discord.gg/3AnUqsXnmK) -- **📝 Change Log**: [https://github.com/community-scripts/ProxmoxVE/blob/main/CHANGELOG.md](https://github.com/community-scripts/ProxmoxVE/blob/main/CHANGELOG.md) - ---- - -**Made with ❤️ by the Community-Scripts team and contributors** diff --git a/frontend/bun.lock b/frontend/bun.lock deleted file mode 100644 index 8ebc77104..000000000 --- a/frontend/bun.lock +++ /dev/null @@ -1,2031 +0,0 @@ -{ - "lockfileVersion": 1, - "configVersion": 1, - "workspaces": { - "": { - "name": "proxmox-helper-scripts-website", - "dependencies": { - "@radix-ui/react-accordion": "^1.2.12", - "@radix-ui/react-dialog": "^1.1.15", - "@radix-ui/react-dropdown-menu": "^2.1.16", - "@radix-ui/react-icons": "^1.3.2", - "@radix-ui/react-label": "^2.1.8", - "@radix-ui/react-navigation-menu": "^1.2.14", - "@radix-ui/react-popover": "^1.1.15", - "@radix-ui/react-scroll-area": "^1.2.10", - "@radix-ui/react-select": "^2.2.6", - "@radix-ui/react-separator": "^1.1.8", - "@radix-ui/react-slot": "^1.2.4", - "@radix-ui/react-switch": "^1.2.6", - "@radix-ui/react-tabs": "^1.1.13", - "@radix-ui/react-tooltip": "^1.2.8", - "@tanstack/react-query": "^5.90.12", - "@types/react-syntax-highlighter": "^15.5.13", - "chart.js": "^4.5.1", - "chartjs-plugin-datalabels": "^2.2.0", - "class-variance-authority": "^0.7.1", - "clsx": "^2.1.1", - "cmdk": "^1.1.1", - "date-fns": "^4.1.0", - "framer-motion": "^12.23.26", - "fuse.js": "^7.1.0", - "lucide-react": "^0.561.0", - "mini-svg-data-uri": "^1.4.4", - "motion": "^12.23.26", - "next": "15.5.8", - "next-themes": "^0.4.6", - "nuqs": "^2.8.5", - "react": "19.2.3", - "react-chartjs-2": "^5.3.1", - "react-code-blocks": "^0.1.6", - "react-datepicker": "^9.0.0", - "react-day-picker": "^9.12.0", - "react-dom": "19.2.3", - "react-icons": "^5.5.0", - "react-syntax-highlighter": "^16.1.0", - "react-use-measure": "^2.1.7", - "recharts": "3.6.0", - "sharp": "^0.34.5", - "sonner": "^2.0.7", - "tailwind-merge": "^3.4.0", - "zod": "^4.2.1", - }, - "devDependencies": { - "@antfu/eslint-config": "^6.7.1", - "@eslint-react/eslint-plugin": "^2.3.13", - "@next/eslint-plugin-next": "^15.5.8", - "@tanstack/eslint-plugin-query": "^5.91.2", - "@types/node": "^25.0.2", - "@types/react": "npm:types-react@19.0.0-rc.1", - "@types/react-dom": "npm:types-react-dom@19.0.0-rc.1", - "@typescript-eslint/eslint-plugin": "^8.50.0", - "@typescript-eslint/parser": "^8.50.0", - "@vitejs/plugin-react": "^5.1.2", - "eslint": "^9.39.2", - "eslint-config-next": "15.5.8", - "eslint-plugin-format": "^1.1.0", - "eslint-plugin-react-hooks": "^7.0.1", - "eslint-plugin-react-refresh": "^0.4.25", - "jsdom": "^27.3.0", - "postcss": "^8.5.6", - "tailwindcss": "^3.4.17", - "tailwindcss-animate": "^1.0.7", - "tailwindcss-animated": "^1.1.2", - "typescript": "^5.9.3", - }, - }, - }, - "packages": { - "@acemir/cssom": ["@acemir/cssom@0.9.29", "", {}, "sha512-G90x0VW+9nW4dFajtjCoT+NM0scAfH9Mb08IcjgFHYbfiL/lU04dTF9JuVOi3/OH+DJCQdcIseSXkdCB9Ky6JA=="], - - "@alloc/quick-lru": ["@alloc/quick-lru@5.2.0", "", {}, "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw=="], - - "@antfu/eslint-config": ["@antfu/eslint-config@6.7.1", "", { "dependencies": { "@antfu/install-pkg": "^1.1.0", "@clack/prompts": "^0.11.0", "@eslint-community/eslint-plugin-eslint-comments": "^4.5.0", "@eslint/markdown": "^7.5.1", "@stylistic/eslint-plugin": "^5.6.1", "@typescript-eslint/eslint-plugin": "^8.49.0", "@typescript-eslint/parser": "^8.49.0", "@vitest/eslint-plugin": "^1.5.2", "ansis": "^4.2.0", "cac": "^6.7.14", "eslint-config-flat-gitignore": "^2.1.0", "eslint-flat-config-utils": "^2.1.4", "eslint-merge-processors": "^2.0.0", "eslint-plugin-antfu": "^3.1.1", "eslint-plugin-command": "^3.4.0", "eslint-plugin-import-lite": "^0.3.0", "eslint-plugin-jsdoc": "^61.5.0", "eslint-plugin-jsonc": "^2.21.0", "eslint-plugin-n": "^17.23.1", "eslint-plugin-no-only-tests": "^3.3.0", "eslint-plugin-perfectionist": "^4.15.1", "eslint-plugin-pnpm": "^1.4.3", "eslint-plugin-regexp": "^2.10.0", "eslint-plugin-toml": "^0.12.0", "eslint-plugin-unicorn": "^62.0.0", "eslint-plugin-unused-imports": "^4.3.0", "eslint-plugin-vue": "^10.6.2", "eslint-plugin-yml": "^1.19.0", "eslint-processor-vue-blocks": "^2.0.0", "globals": "^16.5.0", "jsonc-eslint-parser": "^2.4.2", "local-pkg": "^1.1.2", "parse-gitignore": "^2.0.0", "toml-eslint-parser": "^0.10.1", "vue-eslint-parser": "^10.2.0", "yaml-eslint-parser": "^1.3.2" }, "peerDependencies": { "@eslint-react/eslint-plugin": "^2.0.1", "@next/eslint-plugin-next": ">=15.0.0", "@prettier/plugin-xml": "^3.4.1", "@unocss/eslint-plugin": ">=0.50.0", "astro-eslint-parser": "^1.0.2", "eslint": "^9.10.0", "eslint-plugin-astro": "^1.2.0", "eslint-plugin-format": ">=0.1.0", "eslint-plugin-jsx-a11y": ">=6.10.2", "eslint-plugin-react-hooks": "^7.0.0", "eslint-plugin-react-refresh": "^0.4.19", "eslint-plugin-solid": "^0.14.3", "eslint-plugin-svelte": ">=2.35.1", "eslint-plugin-vuejs-accessibility": "^2.4.1", "prettier-plugin-astro": "^0.14.0", "prettier-plugin-slidev": "^1.0.5", "svelte-eslint-parser": ">=0.37.0" }, "optionalPeers": ["@eslint-react/eslint-plugin", "@next/eslint-plugin-next", "@prettier/plugin-xml", "@unocss/eslint-plugin", "astro-eslint-parser", "eslint-plugin-astro", "eslint-plugin-format", "eslint-plugin-jsx-a11y", "eslint-plugin-react-hooks", "eslint-plugin-react-refresh", "eslint-plugin-solid", "eslint-plugin-svelte", "eslint-plugin-vuejs-accessibility", "prettier-plugin-astro", "prettier-plugin-slidev", "svelte-eslint-parser"], "bin": { "eslint-config": "bin/index.mjs" } }, "sha512-+8GIMmOfrtAVXoqVK9sfovAlHPkp35ilntqZ6XloO/Rty36gOxaa8dvwCh8/eqwwIsloA/hDJo3Ef95TRbdyEg=="], - - "@antfu/install-pkg": ["@antfu/install-pkg@1.1.0", "", { "dependencies": { "package-manager-detector": "^1.3.0", "tinyexec": "^1.0.1" } }, "sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ=="], - - "@asamuzakjp/css-color": ["@asamuzakjp/css-color@4.1.1", "", { "dependencies": { "@csstools/css-calc": "^2.1.4", "@csstools/css-color-parser": "^3.1.0", "@csstools/css-parser-algorithms": "^3.0.5", "@csstools/css-tokenizer": "^3.0.4", "lru-cache": "^11.2.4" } }, "sha512-B0Hv6G3gWGMn0xKJ0txEi/jM5iFpT3MfDxmhZFb4W047GvytCf1DHQ1D69W3zHI4yWe2aTZAA0JnbMZ7Xc8DuQ=="], - - "@asamuzakjp/dom-selector": ["@asamuzakjp/dom-selector@6.7.6", "", { "dependencies": { "@asamuzakjp/nwsapi": "^2.3.9", "bidi-js": "^1.0.3", "css-tree": "^3.1.0", "is-potential-custom-element-name": "^1.0.1", "lru-cache": "^11.2.4" } }, "sha512-hBaJER6A9MpdG3WgdlOolHmbOYvSk46y7IQN/1+iqiCuUu6iWdQrs9DGKF8ocqsEqWujWf/V7b7vaDgiUmIvUg=="], - - "@asamuzakjp/nwsapi": ["@asamuzakjp/nwsapi@2.3.9", "", {}, "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q=="], - - "@babel/code-frame": ["@babel/code-frame@7.27.1", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.27.1", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg=="], - - "@babel/compat-data": ["@babel/compat-data@7.28.5", "", {}, "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA=="], - - "@babel/core": ["@babel/core@7.28.5", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", "@babel/helper-compilation-targets": "^7.27.2", "@babel/helper-module-transforms": "^7.28.3", "@babel/helpers": "^7.28.4", "@babel/parser": "^7.28.5", "@babel/template": "^7.27.2", "@babel/traverse": "^7.28.5", "@babel/types": "^7.28.5", "@jridgewell/remapping": "^2.3.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw=="], - - "@babel/generator": ["@babel/generator@7.28.5", "", { "dependencies": { "@babel/parser": "^7.28.5", "@babel/types": "^7.28.5", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ=="], - - "@babel/helper-compilation-targets": ["@babel/helper-compilation-targets@7.27.2", "", { "dependencies": { "@babel/compat-data": "^7.27.2", "@babel/helper-validator-option": "^7.27.1", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" } }, "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ=="], - - "@babel/helper-globals": ["@babel/helper-globals@7.28.0", "", {}, "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw=="], - - "@babel/helper-module-imports": ["@babel/helper-module-imports@7.27.1", "", { "dependencies": { "@babel/traverse": "^7.27.1", "@babel/types": "^7.27.1" } }, "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w=="], - - "@babel/helper-module-transforms": ["@babel/helper-module-transforms@7.28.3", "", { "dependencies": { "@babel/helper-module-imports": "^7.27.1", "@babel/helper-validator-identifier": "^7.27.1", "@babel/traverse": "^7.28.3" }, "peerDependencies": { "@babel/core": "^7.0.0" } }, "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw=="], - - "@babel/helper-plugin-utils": ["@babel/helper-plugin-utils@7.27.1", "", {}, "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw=="], - - "@babel/helper-string-parser": ["@babel/helper-string-parser@7.27.1", "", {}, "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA=="], - - "@babel/helper-validator-identifier": ["@babel/helper-validator-identifier@7.28.5", "", {}, "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q=="], - - "@babel/helper-validator-option": ["@babel/helper-validator-option@7.27.1", "", {}, "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg=="], - - "@babel/helpers": ["@babel/helpers@7.28.4", "", { "dependencies": { "@babel/template": "^7.27.2", "@babel/types": "^7.28.4" } }, "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w=="], - - "@babel/parser": ["@babel/parser@7.28.5", "", { "dependencies": { "@babel/types": "^7.28.5" }, "bin": "./bin/babel-parser.js" }, "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ=="], - - "@babel/plugin-transform-react-jsx-self": ["@babel/plugin-transform-react-jsx-self@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw=="], - - "@babel/plugin-transform-react-jsx-source": ["@babel/plugin-transform-react-jsx-source@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw=="], - - "@babel/runtime": ["@babel/runtime@7.28.4", "", {}, "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ=="], - - "@babel/template": ["@babel/template@7.27.2", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/parser": "^7.27.2", "@babel/types": "^7.27.1" } }, "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw=="], - - "@babel/traverse": ["@babel/traverse@7.28.5", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.28.5", "@babel/template": "^7.27.2", "@babel/types": "^7.28.5", "debug": "^4.3.1" } }, "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ=="], - - "@babel/types": ["@babel/types@7.28.5", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" } }, "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA=="], - - "@clack/core": ["@clack/core@0.5.0", "", { "dependencies": { "picocolors": "^1.0.0", "sisteransi": "^1.0.5" } }, "sha512-p3y0FIOwaYRUPRcMO7+dlmLh8PSRcrjuTndsiA0WAFbWES0mLZlrjVoBRZ9DzkPFJZG6KGkJmoEAY0ZcVWTkow=="], - - "@clack/prompts": ["@clack/prompts@0.11.0", "", { "dependencies": { "@clack/core": "0.5.0", "picocolors": "^1.0.0", "sisteransi": "^1.0.5" } }, "sha512-pMN5FcrEw9hUkZA4f+zLlzivQSeQf5dRGJjSUbvVYDLvpKCdQx5OaknvKzgbtXOizhP+SJJJjqEbOe55uKKfAw=="], - - "@csstools/color-helpers": ["@csstools/color-helpers@5.1.0", "", {}, "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA=="], - - "@csstools/css-calc": ["@csstools/css-calc@2.1.4", "", { "peerDependencies": { "@csstools/css-parser-algorithms": "^3.0.5", "@csstools/css-tokenizer": "^3.0.4" } }, "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ=="], - - "@csstools/css-color-parser": ["@csstools/css-color-parser@3.1.0", "", { "dependencies": { "@csstools/color-helpers": "^5.1.0", "@csstools/css-calc": "^2.1.4" }, "peerDependencies": { "@csstools/css-parser-algorithms": "^3.0.5", "@csstools/css-tokenizer": "^3.0.4" } }, "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA=="], - - "@csstools/css-parser-algorithms": ["@csstools/css-parser-algorithms@3.0.5", "", { "peerDependencies": { "@csstools/css-tokenizer": "^3.0.4" } }, "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ=="], - - "@csstools/css-syntax-patches-for-csstree": ["@csstools/css-syntax-patches-for-csstree@1.0.14", "", { "peerDependencies": { "postcss": "^8.4" } }, "sha512-zSlIxa20WvMojjpCSy8WrNpcZ61RqfTfX3XTaOeVlGJrt/8HF3YbzgFZa01yTbT4GWQLwfTcC3EB8i3XnB647Q=="], - - "@csstools/css-tokenizer": ["@csstools/css-tokenizer@3.0.4", "", {}, "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw=="], - - "@date-fns/tz": ["@date-fns/tz@1.4.1", "", {}, "sha512-P5LUNhtbj6YfI3iJjw5EL9eUAG6OitD0W3fWQcpQjDRc/QIsL0tRNuO1PcDvPccWL1fSTXXdE1ds+l95DV/OFA=="], - - "@dprint/formatter": ["@dprint/formatter@0.3.0", "", {}, "sha512-N9fxCxbaBOrDkteSOzaCqwWjso5iAe+WJPsHC021JfHNj2ThInPNEF13ORDKta3llq5D1TlclODCvOvipH7bWQ=="], - - "@dprint/markdown": ["@dprint/markdown@0.17.8", "", {}, "sha512-ukHFOg+RpG284aPdIg7iPrCYmMs3Dqy43S1ejybnwlJoFiW02b+6Bbr5cfZKFRYNP3dKGM86BqHEnMzBOyLvvA=="], - - "@dprint/toml": ["@dprint/toml@0.6.4", "", {}, "sha512-bZXIUjxr0LIuHWshZr/5mtUkOrnh0NKVZEF6ACojW5z7zkJu7s9sV2mMXm8XQDqN4cJzdHYUYzUyEGdfciaLJA=="], - - "@emnapi/core": ["@emnapi/core@1.7.1", "", { "dependencies": { "@emnapi/wasi-threads": "1.1.0", "tslib": "^2.4.0" } }, "sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg=="], - - "@emnapi/runtime": ["@emnapi/runtime@1.7.1", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA=="], - - "@emnapi/wasi-threads": ["@emnapi/wasi-threads@1.1.0", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ=="], - - "@emotion/is-prop-valid": ["@emotion/is-prop-valid@1.2.2", "", { "dependencies": { "@emotion/memoize": "^0.8.1" } }, "sha512-uNsoYd37AFmaCdXlg6EYD1KaPOaRWRByMCYzbKUX4+hhMfrxdVSelShywL4JVaAeM/eHUOSprYBQls+/neX3pw=="], - - "@emotion/memoize": ["@emotion/memoize@0.8.1", "", {}, "sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA=="], - - "@emotion/unitless": ["@emotion/unitless@0.8.1", "", {}, "sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ=="], - - "@es-joy/jsdoccomment": ["@es-joy/jsdoccomment@0.78.0", "", { "dependencies": { "@types/estree": "^1.0.8", "@typescript-eslint/types": "^8.46.4", "comment-parser": "1.4.1", "esquery": "^1.6.0", "jsdoc-type-pratt-parser": "~7.0.0" } }, "sha512-rQkU5u8hNAq2NVRzHnIUUvR6arbO0b6AOlvpTNS48CkiKSn/xtNfOzBK23JE4SiW89DgvU7GtxLVgV4Vn2HBAw=="], - - "@es-joy/resolve.exports": ["@es-joy/resolve.exports@1.2.0", "", {}, "sha512-Q9hjxWI5xBM+qW2enxfe8wDKdFWMfd0Z29k5ZJnuBqD/CasY5Zryj09aCA6owbGATWz+39p5uIdaHXpopOcG8g=="], - - "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.25.12", "", { "os": "aix", "cpu": "ppc64" }, "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA=="], - - "@esbuild/android-arm": ["@esbuild/android-arm@0.25.12", "", { "os": "android", "cpu": "arm" }, "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg=="], - - "@esbuild/android-arm64": ["@esbuild/android-arm64@0.25.12", "", { "os": "android", "cpu": "arm64" }, "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg=="], - - "@esbuild/android-x64": ["@esbuild/android-x64@0.25.12", "", { "os": "android", "cpu": "x64" }, "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg=="], - - "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.25.12", "", { "os": "darwin", "cpu": "arm64" }, "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg=="], - - "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.25.12", "", { "os": "darwin", "cpu": "x64" }, "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA=="], - - "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.25.12", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg=="], - - "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.25.12", "", { "os": "freebsd", "cpu": "x64" }, "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ=="], - - "@esbuild/linux-arm": ["@esbuild/linux-arm@0.25.12", "", { "os": "linux", "cpu": "arm" }, "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw=="], - - "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.25.12", "", { "os": "linux", "cpu": "arm64" }, "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ=="], - - "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.25.12", "", { "os": "linux", "cpu": "ia32" }, "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA=="], - - "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng=="], - - "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw=="], - - "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.25.12", "", { "os": "linux", "cpu": "ppc64" }, "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA=="], - - "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w=="], - - "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.25.12", "", { "os": "linux", "cpu": "s390x" }, "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg=="], - - "@esbuild/linux-x64": ["@esbuild/linux-x64@0.25.12", "", { "os": "linux", "cpu": "x64" }, "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw=="], - - "@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.25.12", "", { "os": "none", "cpu": "arm64" }, "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg=="], - - "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.25.12", "", { "os": "none", "cpu": "x64" }, "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ=="], - - "@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.25.12", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A=="], - - "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.25.12", "", { "os": "openbsd", "cpu": "x64" }, "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw=="], - - "@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.25.12", "", { "os": "none", "cpu": "arm64" }, "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg=="], - - "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.25.12", "", { "os": "sunos", "cpu": "x64" }, "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w=="], - - "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.25.12", "", { "os": "win32", "cpu": "arm64" }, "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg=="], - - "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.25.12", "", { "os": "win32", "cpu": "ia32" }, "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ=="], - - "@esbuild/win32-x64": ["@esbuild/win32-x64@0.25.12", "", { "os": "win32", "cpu": "x64" }, "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA=="], - - "@eslint-community/eslint-plugin-eslint-comments": ["@eslint-community/eslint-plugin-eslint-comments@4.5.0", "", { "dependencies": { "escape-string-regexp": "^4.0.0", "ignore": "^5.2.4" }, "peerDependencies": { "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0 || ^9.0.0" } }, "sha512-MAhuTKlr4y/CE3WYX26raZjy+I/kS2PLKSzvfmDCGrBLTFHOYwqROZdr4XwPgXwX3K9rjzMr4pSmUWGnzsUyMg=="], - - "@eslint-community/eslint-utils": ["@eslint-community/eslint-utils@4.9.0", "", { "dependencies": { "eslint-visitor-keys": "^3.4.3" }, "peerDependencies": { "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g=="], - - "@eslint-community/regexpp": ["@eslint-community/regexpp@4.12.2", "", {}, "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew=="], - - "@eslint-react/ast": ["@eslint-react/ast@2.3.13", "", { "dependencies": { "@eslint-react/eff": "2.3.13", "@typescript-eslint/types": "^8.49.0", "@typescript-eslint/typescript-estree": "^8.49.0", "@typescript-eslint/utils": "^8.49.0", "string-ts": "^2.3.1" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-OP2rOhHYLx2nfd9uA9uACKZJN9z9rX9uuAMx4PjT75JNOdYr1GgqWQZcYCepyJ+gmVNCyiXcLXuyhavqxCSM8Q=="], - - "@eslint-react/core": ["@eslint-react/core@2.3.13", "", { "dependencies": { "@eslint-react/ast": "2.3.13", "@eslint-react/eff": "2.3.13", "@eslint-react/shared": "2.3.13", "@eslint-react/var": "2.3.13", "@typescript-eslint/scope-manager": "^8.49.0", "@typescript-eslint/types": "^8.49.0", "@typescript-eslint/utils": "^8.49.0", "birecord": "^0.1.1", "ts-pattern": "^5.9.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-4bWBE+1kApuxJKIrLJH2FuFtCbM4fXfDs6Ou8MNamGoX6hdynlntssvaMZTd/lk/L8dt01H/3btr7xBX4+4BNA=="], - - "@eslint-react/eff": ["@eslint-react/eff@2.3.13", "", {}, "sha512-byXsssozwh3VaiqcOonAKQgLXgpMVNSxBWFjdfbNhW7+NttorSt950qtiw+P7A9JoRab1OuGYk4MDY5UVBno8Q=="], - - "@eslint-react/eslint-plugin": ["@eslint-react/eslint-plugin@2.3.13", "", { "dependencies": { "@eslint-react/eff": "2.3.13", "@eslint-react/shared": "2.3.13", "@typescript-eslint/scope-manager": "^8.49.0", "@typescript-eslint/type-utils": "^8.49.0", "@typescript-eslint/types": "^8.49.0", "@typescript-eslint/utils": "^8.49.0", "eslint-plugin-react-dom": "2.3.13", "eslint-plugin-react-hooks-extra": "2.3.13", "eslint-plugin-react-naming-convention": "2.3.13", "eslint-plugin-react-web-api": "2.3.13", "eslint-plugin-react-x": "2.3.13", "ts-api-utils": "^2.1.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-gq0Z0wADAXvJS8Y/Wk3isK7WIEcfrQGGGdWvorAv0T7MxPd3d32TVwdc1Gx3hVLka3fYq1BBlQ5Fr8e1VgNuIg=="], - - "@eslint-react/shared": ["@eslint-react/shared@2.3.13", "", { "dependencies": { "@eslint-react/eff": "2.3.13", "@typescript-eslint/utils": "^8.49.0", "ts-pattern": "^5.9.0", "zod": "^4.1.13" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-ESE7dVeOXtem3K6BD6k2wJaFt35kPtTT9SWCL99LFk7pym4OEGoMxPcyB2R7PMWiVudwl63BmiOgQOdaFYPONg=="], - - "@eslint-react/var": ["@eslint-react/var@2.3.13", "", { "dependencies": { "@eslint-react/ast": "2.3.13", "@eslint-react/eff": "2.3.13", "@typescript-eslint/scope-manager": "^8.49.0", "@typescript-eslint/types": "^8.49.0", "@typescript-eslint/utils": "^8.49.0", "ts-pattern": "^5.9.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-BozBfUZkzzobD6x/M8XERAnZQ3UvZPsD49zTGFKKU9M/bgsM78HwzxAPLkiu88W55v3sO/Kqf8fQTXT4VEeZ/g=="], - - "@eslint/compat": ["@eslint/compat@1.4.1", "", { "dependencies": { "@eslint/core": "^0.17.0" }, "peerDependencies": { "eslint": "^8.40 || 9" }, "optionalPeers": ["eslint"] }, "sha512-cfO82V9zxxGBxcQDr1lfaYB7wykTa0b00mGa36FrJl7iTFd0Z2cHfEYuxcBRP/iNijCsWsEkA+jzT8hGYmv33w=="], - - "@eslint/config-array": ["@eslint/config-array@0.21.1", "", { "dependencies": { "@eslint/object-schema": "^2.1.7", "debug": "^4.3.1", "minimatch": "^3.1.2" } }, "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA=="], - - "@eslint/config-helpers": ["@eslint/config-helpers@0.4.2", "", { "dependencies": { "@eslint/core": "^0.17.0" } }, "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw=="], - - "@eslint/core": ["@eslint/core@0.17.0", "", { "dependencies": { "@types/json-schema": "^7.0.15" } }, "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ=="], - - "@eslint/eslintrc": ["@eslint/eslintrc@3.3.3", "", { "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", "espree": "^10.0.1", "globals": "^14.0.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", "js-yaml": "^4.1.1", "minimatch": "^3.1.2", "strip-json-comments": "^3.1.1" } }, "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ=="], - - "@eslint/js": ["@eslint/js@9.39.2", "", {}, "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA=="], - - "@eslint/markdown": ["@eslint/markdown@7.5.1", "", { "dependencies": { "@eslint/core": "^0.17.0", "@eslint/plugin-kit": "^0.4.1", "github-slugger": "^2.0.0", "mdast-util-from-markdown": "^2.0.2", "mdast-util-frontmatter": "^2.0.1", "mdast-util-gfm": "^3.1.0", "micromark-extension-frontmatter": "^2.0.0", "micromark-extension-gfm": "^3.0.0", "micromark-util-normalize-identifier": "^2.0.1" } }, "sha512-R8uZemG9dKTbru/DQRPblbJyXpObwKzo8rv1KYGGuPUPtjM4LXBYM9q5CIZAComzZupws3tWbDwam5AFpPLyJQ=="], - - "@eslint/object-schema": ["@eslint/object-schema@2.1.7", "", {}, "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA=="], - - "@eslint/plugin-kit": ["@eslint/plugin-kit@0.4.1", "", { "dependencies": { "@eslint/core": "^0.17.0", "levn": "^0.4.1" } }, "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA=="], - - "@floating-ui/core": ["@floating-ui/core@1.7.3", "", { "dependencies": { "@floating-ui/utils": "^0.2.10" } }, "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w=="], - - "@floating-ui/dom": ["@floating-ui/dom@1.7.4", "", { "dependencies": { "@floating-ui/core": "^1.7.3", "@floating-ui/utils": "^0.2.10" } }, "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA=="], - - "@floating-ui/react": ["@floating-ui/react@0.27.16", "", { "dependencies": { "@floating-ui/react-dom": "^2.1.6", "@floating-ui/utils": "^0.2.10", "tabbable": "^6.0.0" }, "peerDependencies": { "react": ">=17.0.0", "react-dom": ">=17.0.0" } }, "sha512-9O8N4SeG2z++TSM8QA/KTeKFBVCNEz/AGS7gWPJf6KFRzmRWixFRnCnkPHRDwSVZW6QPDO6uT0P2SpWNKCc9/g=="], - - "@floating-ui/react-dom": ["@floating-ui/react-dom@2.1.6", "", { "dependencies": { "@floating-ui/dom": "^1.7.4" }, "peerDependencies": { "react": ">=16.8.0", "react-dom": ">=16.8.0" } }, "sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw=="], - - "@floating-ui/utils": ["@floating-ui/utils@0.2.10", "", {}, "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ=="], - - "@humanfs/core": ["@humanfs/core@0.19.1", "", {}, "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA=="], - - "@humanfs/node": ["@humanfs/node@0.16.7", "", { "dependencies": { "@humanfs/core": "^0.19.1", "@humanwhocodes/retry": "^0.4.0" } }, "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ=="], - - "@humanwhocodes/module-importer": ["@humanwhocodes/module-importer@1.0.1", "", {}, "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA=="], - - "@humanwhocodes/retry": ["@humanwhocodes/retry@0.4.3", "", {}, "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ=="], - - "@img/colour": ["@img/colour@1.0.0", "", {}, "sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw=="], - - "@img/sharp-darwin-arm64": ["@img/sharp-darwin-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-arm64": "1.2.4" }, "os": "darwin", "cpu": "arm64" }, "sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w=="], - - "@img/sharp-darwin-x64": ["@img/sharp-darwin-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-x64": "1.2.4" }, "os": "darwin", "cpu": "x64" }, "sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw=="], - - "@img/sharp-libvips-darwin-arm64": ["@img/sharp-libvips-darwin-arm64@1.2.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g=="], - - "@img/sharp-libvips-darwin-x64": ["@img/sharp-libvips-darwin-x64@1.2.4", "", { "os": "darwin", "cpu": "x64" }, "sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg=="], - - "@img/sharp-libvips-linux-arm": ["@img/sharp-libvips-linux-arm@1.2.4", "", { "os": "linux", "cpu": "arm" }, "sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A=="], - - "@img/sharp-libvips-linux-arm64": ["@img/sharp-libvips-linux-arm64@1.2.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw=="], - - "@img/sharp-libvips-linux-ppc64": ["@img/sharp-libvips-linux-ppc64@1.2.4", "", { "os": "linux", "cpu": "ppc64" }, "sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA=="], - - "@img/sharp-libvips-linux-riscv64": ["@img/sharp-libvips-linux-riscv64@1.2.4", "", { "os": "linux", "cpu": "none" }, "sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA=="], - - "@img/sharp-libvips-linux-s390x": ["@img/sharp-libvips-linux-s390x@1.2.4", "", { "os": "linux", "cpu": "s390x" }, "sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ=="], - - "@img/sharp-libvips-linux-x64": ["@img/sharp-libvips-linux-x64@1.2.4", "", { "os": "linux", "cpu": "x64" }, "sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw=="], - - "@img/sharp-libvips-linuxmusl-arm64": ["@img/sharp-libvips-linuxmusl-arm64@1.2.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw=="], - - "@img/sharp-libvips-linuxmusl-x64": ["@img/sharp-libvips-linuxmusl-x64@1.2.4", "", { "os": "linux", "cpu": "x64" }, "sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg=="], - - "@img/sharp-linux-arm": ["@img/sharp-linux-arm@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm": "1.2.4" }, "os": "linux", "cpu": "arm" }, "sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw=="], - - "@img/sharp-linux-arm64": ["@img/sharp-linux-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm64": "1.2.4" }, "os": "linux", "cpu": "arm64" }, "sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg=="], - - "@img/sharp-linux-ppc64": ["@img/sharp-linux-ppc64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-ppc64": "1.2.4" }, "os": "linux", "cpu": "ppc64" }, "sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA=="], - - "@img/sharp-linux-riscv64": ["@img/sharp-linux-riscv64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-riscv64": "1.2.4" }, "os": "linux", "cpu": "none" }, "sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw=="], - - "@img/sharp-linux-s390x": ["@img/sharp-linux-s390x@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-s390x": "1.2.4" }, "os": "linux", "cpu": "s390x" }, "sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg=="], - - "@img/sharp-linux-x64": ["@img/sharp-linux-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-x64": "1.2.4" }, "os": "linux", "cpu": "x64" }, "sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ=="], - - "@img/sharp-linuxmusl-arm64": ["@img/sharp-linuxmusl-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-arm64": "1.2.4" }, "os": "linux", "cpu": "arm64" }, "sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg=="], - - "@img/sharp-linuxmusl-x64": ["@img/sharp-linuxmusl-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-x64": "1.2.4" }, "os": "linux", "cpu": "x64" }, "sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q=="], - - "@img/sharp-wasm32": ["@img/sharp-wasm32@0.34.5", "", { "dependencies": { "@emnapi/runtime": "^1.7.0" }, "cpu": "none" }, "sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw=="], - - "@img/sharp-win32-arm64": ["@img/sharp-win32-arm64@0.34.5", "", { "os": "win32", "cpu": "arm64" }, "sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g=="], - - "@img/sharp-win32-ia32": ["@img/sharp-win32-ia32@0.34.5", "", { "os": "win32", "cpu": "ia32" }, "sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg=="], - - "@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.34.5", "", { "os": "win32", "cpu": "x64" }, "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw=="], - - "@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.3.13", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA=="], - - "@jridgewell/remapping": ["@jridgewell/remapping@2.3.5", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ=="], - - "@jridgewell/resolve-uri": ["@jridgewell/resolve-uri@3.1.2", "", {}, "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw=="], - - "@jridgewell/sourcemap-codec": ["@jridgewell/sourcemap-codec@1.5.5", "", {}, "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og=="], - - "@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.31", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw=="], - - "@kurkle/color": ["@kurkle/color@0.3.4", "", {}, "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w=="], - - "@napi-rs/wasm-runtime": ["@napi-rs/wasm-runtime@0.2.12", "", { "dependencies": { "@emnapi/core": "^1.4.3", "@emnapi/runtime": "^1.4.3", "@tybys/wasm-util": "^0.10.0" } }, "sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ=="], - - "@next/env": ["@next/env@15.5.8", "", {}, "sha512-ejZHa3ogTxcy851dFoNtfB5B2h7AbSAtHbR5CymUlnz4yW1QjHNufVpvTu8PTnWBKFKjrd4k6Gbi2SsCiJKvxw=="], - - "@next/eslint-plugin-next": ["@next/eslint-plugin-next@15.5.9", "", { "dependencies": { "fast-glob": "3.3.1" } }, "sha512-kUzXx0iFiXw27cQAViE1yKWnz/nF8JzRmwgMRTMh8qMY90crNsdXJRh2e+R0vBpFR3kk1yvAR7wev7+fCCb79Q=="], - - "@next/swc-darwin-arm64": ["@next/swc-darwin-arm64@15.5.7", "", { "os": "darwin", "cpu": "arm64" }, "sha512-IZwtxCEpI91HVU/rAUOOobWSZv4P2DeTtNaCdHqLcTJU4wdNXgAySvKa/qJCgR5m6KI8UsKDXtO2B31jcaw1Yw=="], - - "@next/swc-darwin-x64": ["@next/swc-darwin-x64@15.5.7", "", { "os": "darwin", "cpu": "x64" }, "sha512-UP6CaDBcqaCBuiq/gfCEJw7sPEoX1aIjZHnBWN9v9qYHQdMKvCKcAVs4OX1vIjeE+tC5EIuwDTVIoXpUes29lg=="], - - "@next/swc-linux-arm64-gnu": ["@next/swc-linux-arm64-gnu@15.5.7", "", { "os": "linux", "cpu": "arm64" }, "sha512-NCslw3GrNIw7OgmRBxHtdWFQYhexoUCq+0oS2ccjyYLtcn1SzGzeM54jpTFonIMUjNbHmpKpziXnpxhSWLcmBA=="], - - "@next/swc-linux-arm64-musl": ["@next/swc-linux-arm64-musl@15.5.7", "", { "os": "linux", "cpu": "arm64" }, "sha512-nfymt+SE5cvtTrG9u1wdoxBr9bVB7mtKTcj0ltRn6gkP/2Nu1zM5ei8rwP9qKQP0Y//umK+TtkKgNtfboBxRrw=="], - - "@next/swc-linux-x64-gnu": ["@next/swc-linux-x64-gnu@15.5.7", "", { "os": "linux", "cpu": "x64" }, "sha512-hvXcZvCaaEbCZcVzcY7E1uXN9xWZfFvkNHwbe/n4OkRhFWrs1J1QV+4U1BN06tXLdaS4DazEGXwgqnu/VMcmqw=="], - - "@next/swc-linux-x64-musl": ["@next/swc-linux-x64-musl@15.5.7", "", { "os": "linux", "cpu": "x64" }, "sha512-4IUO539b8FmF0odY6/SqANJdgwn1xs1GkPO5doZugwZ3ETF6JUdckk7RGmsfSf7ws8Qb2YB5It33mvNL/0acqA=="], - - "@next/swc-win32-arm64-msvc": ["@next/swc-win32-arm64-msvc@15.5.7", "", { "os": "win32", "cpu": "arm64" }, "sha512-CpJVTkYI3ZajQkC5vajM7/ApKJUOlm6uP4BknM3XKvJ7VXAvCqSjSLmM0LKdYzn6nBJVSjdclx8nYJSa3xlTgQ=="], - - "@next/swc-win32-x64-msvc": ["@next/swc-win32-x64-msvc@15.5.7", "", { "os": "win32", "cpu": "x64" }, "sha512-gMzgBX164I6DN+9/PGA+9dQiwmTkE4TloBNx8Kv9UiGARsr9Nba7IpcBRA1iTV9vwlYnrE3Uy6I7Aj6qLjQuqw=="], - - "@nodelib/fs.scandir": ["@nodelib/fs.scandir@2.1.5", "", { "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" } }, "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g=="], - - "@nodelib/fs.stat": ["@nodelib/fs.stat@2.0.5", "", {}, "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="], - - "@nodelib/fs.walk": ["@nodelib/fs.walk@1.2.8", "", { "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" } }, "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg=="], - - "@nolyfill/is-core-module": ["@nolyfill/is-core-module@1.0.39", "", {}, "sha512-nn5ozdjYQpUCZlWGuxcJY/KpxkWQs4DcbMCmKojjyrYDEAGy4Ce19NN4v5MduafTwJlbKc99UA8YhSVqq9yPZA=="], - - "@pkgr/core": ["@pkgr/core@0.2.9", "", {}, "sha512-QNqXyfVS2wm9hweSYD2O7F0G06uurj9kZ96TRQE5Y9hU7+tgdZwIkbAKc5Ocy1HxEY2kuDQa6cQ1WRs/O5LFKA=="], - - "@radix-ui/number": ["@radix-ui/number@1.1.1", "", {}, "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g=="], - - "@radix-ui/primitive": ["@radix-ui/primitive@1.1.3", "", {}, "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg=="], - - "@radix-ui/react-accordion": ["@radix-ui/react-accordion@1.2.12", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collapsible": "1.1.12", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-T4nygeh9YE9dLRPhAHSeOZi7HBXo+0kYIPJXayZfvWOWA0+n3dESrZbjfDPUABkUNym6Hd+f2IR113To8D2GPA=="], - - "@radix-ui/react-arrow": ["@radix-ui/react-arrow@1.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w=="], - - "@radix-ui/react-collapsible": ["@radix-ui/react-collapsible@1.1.12", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Uu+mSh4agx2ib1uIGPP4/CKNULyajb3p92LsVXmH2EHVMTfZWpll88XJ0j4W0z3f8NK1eYl1+Mf/szHPmcHzyA=="], - - "@radix-ui/react-collection": ["@radix-ui/react-collection@1.1.7", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw=="], - - "@radix-ui/react-compose-refs": ["@radix-ui/react-compose-refs@1.1.2", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg=="], - - "@radix-ui/react-context": ["@radix-ui/react-context@1.1.2", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA=="], - - "@radix-ui/react-dialog": ["@radix-ui/react-dialog@1.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-focus-guards": "1.1.3", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw=="], - - "@radix-ui/react-direction": ["@radix-ui/react-direction@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw=="], - - "@radix-ui/react-dismissable-layer": ["@radix-ui/react-dismissable-layer@1.1.11", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-escape-keydown": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg=="], - - "@radix-ui/react-dropdown-menu": ["@radix-ui/react-dropdown-menu@2.1.16", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-menu": "2.1.16", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-1PLGQEynI/3OX/ftV54COn+3Sud/Mn8vALg2rWnBLnRaGtJDduNW/22XjlGgPdpcIbiQxjKtb7BkcjP00nqfJw=="], - - "@radix-ui/react-focus-guards": ["@radix-ui/react-focus-guards@1.1.3", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw=="], - - "@radix-ui/react-focus-scope": ["@radix-ui/react-focus-scope@1.1.7", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw=="], - - "@radix-ui/react-icons": ["@radix-ui/react-icons@1.3.2", "", { "peerDependencies": { "react": "^16.x || ^17.x || ^18.x || ^19.0.0 || ^19.0.0-rc" } }, "sha512-fyQIhGDhzfc9pK2kH6Pl9c4BDJGfMkPqkyIgYDthyNYoNg3wVhoJMMh19WS4Up/1KMPFVpNsT2q3WmXn2N1m6g=="], - - "@radix-ui/react-id": ["@radix-ui/react-id@1.1.1", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg=="], - - "@radix-ui/react-label": ["@radix-ui/react-label@2.1.8", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.4" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-FmXs37I6hSBVDlO4y764TNz1rLgKwjJMQ0EGte6F3Cb3f4bIuHB/iLa/8I9VKkmOy+gNHq8rql3j686ACVV21A=="], - - "@radix-ui/react-menu": ["@radix-ui/react-menu@2.1.16", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-focus-guards": "1.1.3", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.8", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.11", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-callback-ref": "1.1.1", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-72F2T+PLlphrqLcAotYPp0uJMr5SjP5SL01wfEspJbru5Zs5vQaSHb4VB3ZMJPimgHHCHG7gMOeOB9H3Hdmtxg=="], - - "@radix-ui/react-navigation-menu": ["@radix-ui/react-navigation-menu@1.2.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-YB9mTFQvCOAQMHU+C/jVl96WmuWeltyUEpRJJky51huhds5W2FQr1J8D/16sQlf0ozxkPK8uF3niQMdUwZPv5w=="], - - "@radix-ui/react-popover": ["@radix-ui/react-popover@1.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-focus-guards": "1.1.3", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.8", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA=="], - - "@radix-ui/react-popper": ["@radix-ui/react-popper@1.2.8", "", { "dependencies": { "@floating-ui/react-dom": "^2.0.0", "@radix-ui/react-arrow": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-rect": "1.1.1", "@radix-ui/react-use-size": "1.1.1", "@radix-ui/rect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw=="], - - "@radix-ui/react-portal": ["@radix-ui/react-portal@1.1.9", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ=="], - - "@radix-ui/react-presence": ["@radix-ui/react-presence@1.1.5", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ=="], - - "@radix-ui/react-primitive": ["@radix-ui/react-primitive@2.1.3", "", { "dependencies": { "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ=="], - - "@radix-ui/react-roving-focus": ["@radix-ui/react-roving-focus@1.1.11", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA=="], - - "@radix-ui/react-scroll-area": ["@radix-ui/react-scroll-area@1.2.10", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-tAXIa1g3sM5CGpVT0uIbUx/U3Gs5N8T52IICuCtObaos1S8fzsrPXG5WObkQN3S6NVl6wKgPhAIiBGbWnvc97A=="], - - "@radix-ui/react-select": ["@radix-ui/react-select@2.2.6", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-focus-guards": "1.1.3", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.8", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ=="], - - "@radix-ui/react-separator": ["@radix-ui/react-separator@1.1.8", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.4" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-sDvqVY4itsKwwSMEe0jtKgfTh+72Sy3gPmQpjqcQneqQ4PFmr/1I0YA+2/puilhggCe2gJcx5EBAYFkWkdpa5g=="], - - "@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.4", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA=="], - - "@radix-ui/react-switch": ["@radix-ui/react-switch@1.2.6", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-bByzr1+ep1zk4VubeEVViV592vu2lHE2BZY5OnzehZqOOgogN80+mNtCqPkhn2gklJqOpxWgPoYTSnhBCqpOXQ=="], - - "@radix-ui/react-tabs": ["@radix-ui/react-tabs@1.1.13", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.11", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A=="], - - "@radix-ui/react-tooltip": ["@radix-ui/react-tooltip@1.2.8", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.8", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg=="], - - "@radix-ui/react-use-callback-ref": ["@radix-ui/react-use-callback-ref@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg=="], - - "@radix-ui/react-use-controllable-state": ["@radix-ui/react-use-controllable-state@1.2.2", "", { "dependencies": { "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg=="], - - "@radix-ui/react-use-effect-event": ["@radix-ui/react-use-effect-event@0.0.2", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA=="], - - "@radix-ui/react-use-escape-keydown": ["@radix-ui/react-use-escape-keydown@1.1.1", "", { "dependencies": { "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g=="], - - "@radix-ui/react-use-layout-effect": ["@radix-ui/react-use-layout-effect@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ=="], - - "@radix-ui/react-use-previous": ["@radix-ui/react-use-previous@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ=="], - - "@radix-ui/react-use-rect": ["@radix-ui/react-use-rect@1.1.1", "", { "dependencies": { "@radix-ui/rect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w=="], - - "@radix-ui/react-use-size": ["@radix-ui/react-use-size@1.1.1", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ=="], - - "@radix-ui/react-visually-hidden": ["@radix-ui/react-visually-hidden@1.2.3", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug=="], - - "@radix-ui/rect": ["@radix-ui/rect@1.1.1", "", {}, "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw=="], - - "@reduxjs/toolkit": ["@reduxjs/toolkit@2.11.2", "", { "dependencies": { "@standard-schema/spec": "^1.0.0", "@standard-schema/utils": "^0.3.0", "immer": "^11.0.0", "redux": "^5.0.1", "redux-thunk": "^3.1.0", "reselect": "^5.1.0" }, "peerDependencies": { "react": "^16.9.0 || ^17.0.0 || ^18 || ^19", "react-redux": "^7.2.1 || ^8.1.3 || ^9.0.0" }, "optionalPeers": ["react", "react-redux"] }, "sha512-Kd6kAHTA6/nUpp8mySPqj3en3dm0tdMIgbttnQ1xFMVpufoj+ADi8pXLBsd4xzTRHQa7t/Jv8W5UnCuW4kuWMQ=="], - - "@rolldown/pluginutils": ["@rolldown/pluginutils@1.0.0-beta.53", "", {}, "sha512-vENRlFU4YbrwVqNDZ7fLvy+JR1CRkyr01jhSiDpE1u6py3OMzQfztQU2jxykW3ALNxO4kSlqIDeYyD0Y9RcQeQ=="], - - "@rollup/rollup-android-arm-eabi": ["@rollup/rollup-android-arm-eabi@4.53.3", "", { "os": "android", "cpu": "arm" }, "sha512-mRSi+4cBjrRLoaal2PnqH82Wqyb+d3HsPUN/W+WslCXsZsyHa9ZeQQX/pQsZaVIWDkPcpV6jJ+3KLbTbgnwv8w=="], - - "@rollup/rollup-android-arm64": ["@rollup/rollup-android-arm64@4.53.3", "", { "os": "android", "cpu": "arm64" }, "sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w=="], - - "@rollup/rollup-darwin-arm64": ["@rollup/rollup-darwin-arm64@4.53.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA=="], - - "@rollup/rollup-darwin-x64": ["@rollup/rollup-darwin-x64@4.53.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ=="], - - "@rollup/rollup-freebsd-arm64": ["@rollup/rollup-freebsd-arm64@4.53.3", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w=="], - - "@rollup/rollup-freebsd-x64": ["@rollup/rollup-freebsd-x64@4.53.3", "", { "os": "freebsd", "cpu": "x64" }, "sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q=="], - - "@rollup/rollup-linux-arm-gnueabihf": ["@rollup/rollup-linux-arm-gnueabihf@4.53.3", "", { "os": "linux", "cpu": "arm" }, "sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw=="], - - "@rollup/rollup-linux-arm-musleabihf": ["@rollup/rollup-linux-arm-musleabihf@4.53.3", "", { "os": "linux", "cpu": "arm" }, "sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg=="], - - "@rollup/rollup-linux-arm64-gnu": ["@rollup/rollup-linux-arm64-gnu@4.53.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w=="], - - "@rollup/rollup-linux-arm64-musl": ["@rollup/rollup-linux-arm64-musl@4.53.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A=="], - - "@rollup/rollup-linux-loong64-gnu": ["@rollup/rollup-linux-loong64-gnu@4.53.3", "", { "os": "linux", "cpu": "none" }, "sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g=="], - - "@rollup/rollup-linux-ppc64-gnu": ["@rollup/rollup-linux-ppc64-gnu@4.53.3", "", { "os": "linux", "cpu": "ppc64" }, "sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw=="], - - "@rollup/rollup-linux-riscv64-gnu": ["@rollup/rollup-linux-riscv64-gnu@4.53.3", "", { "os": "linux", "cpu": "none" }, "sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g=="], - - "@rollup/rollup-linux-riscv64-musl": ["@rollup/rollup-linux-riscv64-musl@4.53.3", "", { "os": "linux", "cpu": "none" }, "sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A=="], - - "@rollup/rollup-linux-s390x-gnu": ["@rollup/rollup-linux-s390x-gnu@4.53.3", "", { "os": "linux", "cpu": "s390x" }, "sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg=="], - - "@rollup/rollup-linux-x64-gnu": ["@rollup/rollup-linux-x64-gnu@4.53.3", "", { "os": "linux", "cpu": "x64" }, "sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w=="], - - "@rollup/rollup-linux-x64-musl": ["@rollup/rollup-linux-x64-musl@4.53.3", "", { "os": "linux", "cpu": "x64" }, "sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q=="], - - "@rollup/rollup-openharmony-arm64": ["@rollup/rollup-openharmony-arm64@4.53.3", "", { "os": "none", "cpu": "arm64" }, "sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw=="], - - "@rollup/rollup-win32-arm64-msvc": ["@rollup/rollup-win32-arm64-msvc@4.53.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw=="], - - "@rollup/rollup-win32-ia32-msvc": ["@rollup/rollup-win32-ia32-msvc@4.53.3", "", { "os": "win32", "cpu": "ia32" }, "sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA=="], - - "@rollup/rollup-win32-x64-gnu": ["@rollup/rollup-win32-x64-gnu@4.53.3", "", { "os": "win32", "cpu": "x64" }, "sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg=="], - - "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.53.3", "", { "os": "win32", "cpu": "x64" }, "sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ=="], - - "@rtsao/scc": ["@rtsao/scc@1.1.0", "", {}, "sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g=="], - - "@rushstack/eslint-patch": ["@rushstack/eslint-patch@1.15.0", "", {}, "sha512-ojSshQPKwVvSMR8yT2L/QtUkV5SXi/IfDiJ4/8d6UbTPjiHVmxZzUAzGD8Tzks1b9+qQkZa0isUOvYObedITaw=="], - - "@sindresorhus/base62": ["@sindresorhus/base62@1.0.0", "", {}, "sha512-TeheYy0ILzBEI/CO55CP6zJCSdSWeRtGnHy8U8dWSUH4I68iqTsy7HkMktR4xakThc9jotkPQUXT4ITdbV7cHA=="], - - "@standard-schema/spec": ["@standard-schema/spec@1.0.0", "", {}, "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA=="], - - "@standard-schema/utils": ["@standard-schema/utils@0.3.0", "", {}, "sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g=="], - - "@stylistic/eslint-plugin": ["@stylistic/eslint-plugin@5.6.1", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.9.0", "@typescript-eslint/types": "^8.47.0", "eslint-visitor-keys": "^4.2.1", "espree": "^10.4.0", "estraverse": "^5.3.0", "picomatch": "^4.0.3" }, "peerDependencies": { "eslint": ">=9.0.0" } }, "sha512-JCs+MqoXfXrRPGbGmho/zGS/jMcn3ieKl/A8YImqib76C8kjgZwq5uUFzc30lJkMvcchuRn6/v8IApLxli3Jyw=="], - - "@swc/helpers": ["@swc/helpers@0.5.15", "", { "dependencies": { "tslib": "^2.8.0" } }, "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g=="], - - "@tanstack/eslint-plugin-query": ["@tanstack/eslint-plugin-query@5.91.2", "", { "dependencies": { "@typescript-eslint/utils": "^8.44.1" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0" } }, "sha512-UPeWKl/Acu1IuuHJlsN+eITUHqAaa9/04geHHPedY8siVarSaWprY0SVMKrkpKfk5ehRT7+/MZ5QwWuEtkWrFw=="], - - "@tanstack/query-core": ["@tanstack/query-core@5.90.12", "", {}, "sha512-T1/8t5DhV/SisWjDnaiU2drl6ySvsHj1bHBCWNXd+/T+Hh1cf6JodyEYMd5sgwm+b/mETT4EV3H+zCVczCU5hg=="], - - "@tanstack/react-query": ["@tanstack/react-query@5.90.12", "", { "dependencies": { "@tanstack/query-core": "5.90.12" }, "peerDependencies": { "react": "^18 || ^19" } }, "sha512-graRZspg7EoEaw0a8faiUASCyJrqjKPdqJ9EwuDRUF9mEYJ1YPczI9H+/agJ0mOJkPCJDk0lsz5QTrLZ/jQ2rg=="], - - "@tybys/wasm-util": ["@tybys/wasm-util@0.10.1", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg=="], - - "@types/babel__core": ["@types/babel__core@7.20.5", "", { "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", "@types/babel__generator": "*", "@types/babel__template": "*", "@types/babel__traverse": "*" } }, "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA=="], - - "@types/babel__generator": ["@types/babel__generator@7.27.0", "", { "dependencies": { "@babel/types": "^7.0.0" } }, "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg=="], - - "@types/babel__template": ["@types/babel__template@7.4.4", "", { "dependencies": { "@babel/parser": "^7.1.0", "@babel/types": "^7.0.0" } }, "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A=="], - - "@types/babel__traverse": ["@types/babel__traverse@7.28.0", "", { "dependencies": { "@babel/types": "^7.28.2" } }, "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q=="], - - "@types/d3-array": ["@types/d3-array@3.2.2", "", {}, "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw=="], - - "@types/d3-color": ["@types/d3-color@3.1.3", "", {}, "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A=="], - - "@types/d3-ease": ["@types/d3-ease@3.0.2", "", {}, "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA=="], - - "@types/d3-interpolate": ["@types/d3-interpolate@3.0.4", "", { "dependencies": { "@types/d3-color": "*" } }, "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA=="], - - "@types/d3-path": ["@types/d3-path@3.1.1", "", {}, "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg=="], - - "@types/d3-scale": ["@types/d3-scale@4.0.9", "", { "dependencies": { "@types/d3-time": "*" } }, "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw=="], - - "@types/d3-shape": ["@types/d3-shape@3.1.7", "", { "dependencies": { "@types/d3-path": "*" } }, "sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg=="], - - "@types/d3-time": ["@types/d3-time@3.0.4", "", {}, "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g=="], - - "@types/d3-timer": ["@types/d3-timer@3.0.2", "", {}, "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw=="], - - "@types/debug": ["@types/debug@4.1.12", "", { "dependencies": { "@types/ms": "*" } }, "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ=="], - - "@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="], - - "@types/hast": ["@types/hast@3.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ=="], - - "@types/json-schema": ["@types/json-schema@7.0.15", "", {}, "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA=="], - - "@types/json5": ["@types/json5@0.0.29", "", {}, "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ=="], - - "@types/mdast": ["@types/mdast@4.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA=="], - - "@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="], - - "@types/node": ["@types/node@25.0.2", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-gWEkeiyYE4vqjON/+Obqcoeffmk0NF15WSBwSs7zwVA2bAbTaE0SJ7P0WNGoJn8uE7fiaV5a7dKYIJriEqOrmA=="], - - "@types/prismjs": ["@types/prismjs@1.26.5", "", {}, "sha512-AUZTa7hQ2KY5L7AmtSiqxlhWxb4ina0yd8hNbl4TWuqnv/pFP0nDMb3YrfSBf4hJVGLh2YEIBfKaBW/9UEl6IQ=="], - - "@types/react": ["types-react@19.0.0-rc.1", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-RshndUfqTW6K3STLPis8BtAYCGOkMbtvYsi90gmVNDZBXUyUc5juf2PE9LfS/JmOlUIRO8cWTS/1MTnmhjDqyQ=="], - - "@types/react-dom": ["types-react-dom@19.0.0-rc.1", "", { "dependencies": { "@types/react": "*" } }, "sha512-VSLZJl8VXCD0fAWp7DUTFUDCcZ8DVXOQmjhJMD03odgeFmu14ZQJHCXeETm3BEAhJqfgJaFkLnGkQv88sRx0fQ=="], - - "@types/react-syntax-highlighter": ["@types/react-syntax-highlighter@15.5.13", "", { "dependencies": { "@types/react": "*" } }, "sha512-uLGJ87j6Sz8UaBAooU0T6lWJ0dBmjZgN1PZTrj05TNql2/XpC6+4HhMT5syIdFUUt+FASfCeLLv4kBygNU+8qA=="], - - "@types/stylis": ["@types/stylis@4.2.5", "", {}, "sha512-1Xve+NMN7FWjY14vLoY5tL3BVEQ/n42YLwaqJIPYhotZ9uBHt87VceMwWQpzmdEt2TNXIorIFG+YeCUUW7RInw=="], - - "@types/unist": ["@types/unist@3.0.3", "", {}, "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q=="], - - "@types/use-sync-external-store": ["@types/use-sync-external-store@0.0.6", "", {}, "sha512-zFDAD+tlpf2r4asuHEj0XH6pY6i0g5NeAHPn+15wk3BV6JA69eERFXC1gyGThDkVa1zCyKr5jox1+2LbV/AMLg=="], - - "@typescript-eslint/eslint-plugin": ["@typescript-eslint/eslint-plugin@8.50.0", "", { "dependencies": { "@eslint-community/regexpp": "^4.10.0", "@typescript-eslint/scope-manager": "8.50.0", "@typescript-eslint/type-utils": "8.50.0", "@typescript-eslint/utils": "8.50.0", "@typescript-eslint/visitor-keys": "8.50.0", "ignore": "^7.0.0", "natural-compare": "^1.4.0", "ts-api-utils": "^2.1.0" }, "peerDependencies": { "@typescript-eslint/parser": "^8.50.0", "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-O7QnmOXYKVtPrfYzMolrCTfkezCJS9+ljLdKW/+DCvRsc3UAz+sbH6Xcsv7p30+0OwUbeWfUDAQE0vpabZ3QLg=="], - - "@typescript-eslint/parser": ["@typescript-eslint/parser@8.50.0", "", { "dependencies": { "@typescript-eslint/scope-manager": "8.50.0", "@typescript-eslint/types": "8.50.0", "@typescript-eslint/typescript-estree": "8.50.0", "@typescript-eslint/visitor-keys": "8.50.0", "debug": "^4.3.4" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-6/cmF2piao+f6wSxUsJLZjck7OQsYyRtcOZS02k7XINSNlz93v6emM8WutDQSXnroG2xwYlEVHJI+cPA7CPM3Q=="], - - "@typescript-eslint/project-service": ["@typescript-eslint/project-service@8.50.0", "", { "dependencies": { "@typescript-eslint/tsconfig-utils": "^8.50.0", "@typescript-eslint/types": "^8.50.0", "debug": "^4.3.4" }, "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-Cg/nQcL1BcoTijEWyx4mkVC56r8dj44bFDvBdygifuS20f3OZCHmFbjF34DPSi07kwlFvqfv/xOLnJ5DquxSGQ=="], - - "@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@8.49.0", "", { "dependencies": { "@typescript-eslint/types": "8.49.0", "@typescript-eslint/visitor-keys": "8.49.0" } }, "sha512-npgS3zi+/30KSOkXNs0LQXtsg9ekZ8OISAOLGWA/ZOEn0ZH74Ginfl7foziV8DT+D98WfQ5Kopwqb/PZOaIJGg=="], - - "@typescript-eslint/tsconfig-utils": ["@typescript-eslint/tsconfig-utils@8.50.0", "", { "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-vxd3G/ybKTSlm31MOA96gqvrRGv9RJ7LGtZCn2Vrc5htA0zCDvcMqUkifcjrWNNKXHUU3WCkYOzzVSFBd0wa2w=="], - - "@typescript-eslint/type-utils": ["@typescript-eslint/type-utils@8.49.0", "", { "dependencies": { "@typescript-eslint/types": "8.49.0", "@typescript-eslint/typescript-estree": "8.49.0", "@typescript-eslint/utils": "8.49.0", "debug": "^4.3.4", "ts-api-utils": "^2.1.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-KTExJfQ+svY8I10P4HdxKzWsvtVnsuCifU5MvXrRwoP2KOlNZ9ADNEWWsQTJgMxLzS5VLQKDjkCT/YzgsnqmZg=="], - - "@typescript-eslint/types": ["@typescript-eslint/types@8.49.0", "", {}, "sha512-e9k/fneezorUo6WShlQpMxXh8/8wfyc+biu6tnAqA81oWrEic0k21RHzP9uqqpyBBeBKu4T+Bsjy9/b8u7obXQ=="], - - "@typescript-eslint/typescript-estree": ["@typescript-eslint/typescript-estree@8.50.0", "", { "dependencies": { "@typescript-eslint/project-service": "8.50.0", "@typescript-eslint/tsconfig-utils": "8.50.0", "@typescript-eslint/types": "8.50.0", "@typescript-eslint/visitor-keys": "8.50.0", "debug": "^4.3.4", "minimatch": "^9.0.4", "semver": "^7.6.0", "tinyglobby": "^0.2.15", "ts-api-utils": "^2.1.0" }, "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-W7SVAGBR/IX7zm1t70Yujpbk+zdPq/u4soeFSknWFdXIFuWsBGBOUu/Tn/I6KHSKvSh91OiMuaSnYp3mtPt5IQ=="], - - "@typescript-eslint/utils": ["@typescript-eslint/utils@8.49.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", "@typescript-eslint/scope-manager": "8.49.0", "@typescript-eslint/types": "8.49.0", "@typescript-eslint/typescript-estree": "8.49.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-N3W7rJw7Rw+z1tRsHZbK395TWSYvufBXumYtEGzypgMUthlg0/hmCImeA8hgO2d2G4pd7ftpxxul2J8OdtdaFA=="], - - "@typescript-eslint/visitor-keys": ["@typescript-eslint/visitor-keys@8.50.0", "", { "dependencies": { "@typescript-eslint/types": "8.50.0", "eslint-visitor-keys": "^4.2.1" } }, "sha512-Xzmnb58+Db78gT/CCj/PVCvK+zxbnsw6F+O1oheYszJbBSdEjVhQi3C/Xttzxgi/GLmpvOggRs1RFpiJ8+c34Q=="], - - "@unrs/resolver-binding-android-arm-eabi": ["@unrs/resolver-binding-android-arm-eabi@1.11.1", "", { "os": "android", "cpu": "arm" }, "sha512-ppLRUgHVaGRWUx0R0Ut06Mjo9gBaBkg3v/8AxusGLhsIotbBLuRk51rAzqLC8gq6NyyAojEXglNjzf6R948DNw=="], - - "@unrs/resolver-binding-android-arm64": ["@unrs/resolver-binding-android-arm64@1.11.1", "", { "os": "android", "cpu": "arm64" }, "sha512-lCxkVtb4wp1v+EoN+HjIG9cIIzPkX5OtM03pQYkG+U5O/wL53LC4QbIeazgiKqluGeVEeBlZahHalCaBvU1a2g=="], - - "@unrs/resolver-binding-darwin-arm64": ["@unrs/resolver-binding-darwin-arm64@1.11.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-gPVA1UjRu1Y/IsB/dQEsp2V1pm44Of6+LWvbLc9SDk1c2KhhDRDBUkQCYVWe6f26uJb3fOK8saWMgtX8IrMk3g=="], - - "@unrs/resolver-binding-darwin-x64": ["@unrs/resolver-binding-darwin-x64@1.11.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-cFzP7rWKd3lZaCsDze07QX1SC24lO8mPty9vdP+YVa3MGdVgPmFc59317b2ioXtgCMKGiCLxJ4HQs62oz6GfRQ=="], - - "@unrs/resolver-binding-freebsd-x64": ["@unrs/resolver-binding-freebsd-x64@1.11.1", "", { "os": "freebsd", "cpu": "x64" }, "sha512-fqtGgak3zX4DCB6PFpsH5+Kmt/8CIi4Bry4rb1ho6Av2QHTREM+47y282Uqiu3ZRF5IQioJQ5qWRV6jduA+iGw=="], - - "@unrs/resolver-binding-linux-arm-gnueabihf": ["@unrs/resolver-binding-linux-arm-gnueabihf@1.11.1", "", { "os": "linux", "cpu": "arm" }, "sha512-u92mvlcYtp9MRKmP+ZvMmtPN34+/3lMHlyMj7wXJDeXxuM0Vgzz0+PPJNsro1m3IZPYChIkn944wW8TYgGKFHw=="], - - "@unrs/resolver-binding-linux-arm-musleabihf": ["@unrs/resolver-binding-linux-arm-musleabihf@1.11.1", "", { "os": "linux", "cpu": "arm" }, "sha512-cINaoY2z7LVCrfHkIcmvj7osTOtm6VVT16b5oQdS4beibX2SYBwgYLmqhBjA1t51CarSaBuX5YNsWLjsqfW5Cw=="], - - "@unrs/resolver-binding-linux-arm64-gnu": ["@unrs/resolver-binding-linux-arm64-gnu@1.11.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-34gw7PjDGB9JgePJEmhEqBhWvCiiWCuXsL9hYphDF7crW7UgI05gyBAi6MF58uGcMOiOqSJ2ybEeCvHcq0BCmQ=="], - - "@unrs/resolver-binding-linux-arm64-musl": ["@unrs/resolver-binding-linux-arm64-musl@1.11.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-RyMIx6Uf53hhOtJDIamSbTskA99sPHS96wxVE/bJtePJJtpdKGXO1wY90oRdXuYOGOTuqjT8ACccMc4K6QmT3w=="], - - "@unrs/resolver-binding-linux-ppc64-gnu": ["@unrs/resolver-binding-linux-ppc64-gnu@1.11.1", "", { "os": "linux", "cpu": "ppc64" }, "sha512-D8Vae74A4/a+mZH0FbOkFJL9DSK2R6TFPC9M+jCWYia/q2einCubX10pecpDiTmkJVUH+y8K3BZClycD8nCShA=="], - - "@unrs/resolver-binding-linux-riscv64-gnu": ["@unrs/resolver-binding-linux-riscv64-gnu@1.11.1", "", { "os": "linux", "cpu": "none" }, "sha512-frxL4OrzOWVVsOc96+V3aqTIQl1O2TjgExV4EKgRY09AJ9leZpEg8Ak9phadbuX0BA4k8U5qtvMSQQGGmaJqcQ=="], - - "@unrs/resolver-binding-linux-riscv64-musl": ["@unrs/resolver-binding-linux-riscv64-musl@1.11.1", "", { "os": "linux", "cpu": "none" }, "sha512-mJ5vuDaIZ+l/acv01sHoXfpnyrNKOk/3aDoEdLO/Xtn9HuZlDD6jKxHlkN8ZhWyLJsRBxfv9GYM2utQ1SChKew=="], - - "@unrs/resolver-binding-linux-s390x-gnu": ["@unrs/resolver-binding-linux-s390x-gnu@1.11.1", "", { "os": "linux", "cpu": "s390x" }, "sha512-kELo8ebBVtb9sA7rMe1Cph4QHreByhaZ2QEADd9NzIQsYNQpt9UkM9iqr2lhGr5afh885d/cB5QeTXSbZHTYPg=="], - - "@unrs/resolver-binding-linux-x64-gnu": ["@unrs/resolver-binding-linux-x64-gnu@1.11.1", "", { "os": "linux", "cpu": "x64" }, "sha512-C3ZAHugKgovV5YvAMsxhq0gtXuwESUKc5MhEtjBpLoHPLYM+iuwSj3lflFwK3DPm68660rZ7G8BMcwSro7hD5w=="], - - "@unrs/resolver-binding-linux-x64-musl": ["@unrs/resolver-binding-linux-x64-musl@1.11.1", "", { "os": "linux", "cpu": "x64" }, "sha512-rV0YSoyhK2nZ4vEswT/QwqzqQXw5I6CjoaYMOX0TqBlWhojUf8P94mvI7nuJTeaCkkds3QE4+zS8Ko+GdXuZtA=="], - - "@unrs/resolver-binding-wasm32-wasi": ["@unrs/resolver-binding-wasm32-wasi@1.11.1", "", { "dependencies": { "@napi-rs/wasm-runtime": "^0.2.11" }, "cpu": "none" }, "sha512-5u4RkfxJm+Ng7IWgkzi3qrFOvLvQYnPBmjmZQ8+szTK/b31fQCnleNl1GgEt7nIsZRIf5PLhPwT0WM+q45x/UQ=="], - - "@unrs/resolver-binding-win32-arm64-msvc": ["@unrs/resolver-binding-win32-arm64-msvc@1.11.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-nRcz5Il4ln0kMhfL8S3hLkxI85BXs3o8EYoattsJNdsX4YUU89iOkVn7g0VHSRxFuVMdM4Q1jEpIId1Ihim/Uw=="], - - "@unrs/resolver-binding-win32-ia32-msvc": ["@unrs/resolver-binding-win32-ia32-msvc@1.11.1", "", { "os": "win32", "cpu": "ia32" }, "sha512-DCEI6t5i1NmAZp6pFonpD5m7i6aFrpofcp4LA2i8IIq60Jyo28hamKBxNrZcyOwVOZkgsRp9O2sXWBWP8MnvIQ=="], - - "@unrs/resolver-binding-win32-x64-msvc": ["@unrs/resolver-binding-win32-x64-msvc@1.11.1", "", { "os": "win32", "cpu": "x64" }, "sha512-lrW200hZdbfRtztbygyaq/6jP6AKE8qQN2KvPcJ+x7wiD038YtnYtZ82IMNJ69GJibV7bwL3y9FgK+5w/pYt6g=="], - - "@vitejs/plugin-react": ["@vitejs/plugin-react@5.1.2", "", { "dependencies": { "@babel/core": "^7.28.5", "@babel/plugin-transform-react-jsx-self": "^7.27.1", "@babel/plugin-transform-react-jsx-source": "^7.27.1", "@rolldown/pluginutils": "1.0.0-beta.53", "@types/babel__core": "^7.20.5", "react-refresh": "^0.18.0" }, "peerDependencies": { "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" } }, "sha512-EcA07pHJouywpzsoTUqNh5NwGayl2PPVEJKUSinGGSxFGYn+shYbqMGBg6FXDqgXum9Ou/ecb+411ssw8HImJQ=="], - - "@vitest/eslint-plugin": ["@vitest/eslint-plugin@1.5.2", "", { "dependencies": { "@typescript-eslint/scope-manager": "^8.46.1", "@typescript-eslint/utils": "^8.46.1" }, "peerDependencies": { "eslint": ">=8.57.0", "typescript": ">=5.0.0", "vitest": "*" }, "optionalPeers": ["typescript", "vitest"] }, "sha512-2t1F2iecXB/b1Ox4U137lhD3chihEE3dRVtu3qMD35tc6UqUjg1VGRJoS1AkFKwpT8zv8OQInzPQO06hrRkeqw=="], - - "@vue/compiler-core": ["@vue/compiler-core@3.5.25", "", { "dependencies": { "@babel/parser": "^7.28.5", "@vue/shared": "3.5.25", "entities": "^4.5.0", "estree-walker": "^2.0.2", "source-map-js": "^1.2.1" } }, "sha512-vay5/oQJdsNHmliWoZfHPoVZZRmnSWhug0BYT34njkYTPqClh3DNWLkZNJBVSjsNMrg0CCrBfoKkjZQPM/QVUw=="], - - "@vue/compiler-dom": ["@vue/compiler-dom@3.5.25", "", { "dependencies": { "@vue/compiler-core": "3.5.25", "@vue/shared": "3.5.25" } }, "sha512-4We0OAcMZsKgYoGlMjzYvaoErltdFI2/25wqanuTu+S4gismOTRTBPi4IASOjxWdzIwrYSjnqONfKvuqkXzE2Q=="], - - "@vue/compiler-sfc": ["@vue/compiler-sfc@3.5.25", "", { "dependencies": { "@babel/parser": "^7.28.5", "@vue/compiler-core": "3.5.25", "@vue/compiler-dom": "3.5.25", "@vue/compiler-ssr": "3.5.25", "@vue/shared": "3.5.25", "estree-walker": "^2.0.2", "magic-string": "^0.30.21", "postcss": "^8.5.6", "source-map-js": "^1.2.1" } }, "sha512-PUgKp2rn8fFsI++lF2sO7gwO2d9Yj57Utr5yEsDf3GNaQcowCLKL7sf+LvVFvtJDXUp/03+dC6f2+LCv5aK1ag=="], - - "@vue/compiler-ssr": ["@vue/compiler-ssr@3.5.25", "", { "dependencies": { "@vue/compiler-dom": "3.5.25", "@vue/shared": "3.5.25" } }, "sha512-ritPSKLBcParnsKYi+GNtbdbrIE1mtuFEJ4U1sWeuOMlIziK5GtOL85t5RhsNy4uWIXPgk+OUdpnXiTdzn8o3A=="], - - "@vue/shared": ["@vue/shared@3.5.25", "", {}, "sha512-AbOPdQQnAnzs58H2FrrDxYj/TJfmeS2jdfEEhgiKINy+bnOANmVizIEgq1r+C5zsbs6l1CCQxtcj71rwNQ4jWg=="], - - "acorn": ["acorn@8.15.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg=="], - - "acorn-jsx": ["acorn-jsx@5.3.2", "", { "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ=="], - - "agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], - - "ajv": ["ajv@6.12.6", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g=="], - - "ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], - - "ansis": ["ansis@4.2.0", "", {}, "sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig=="], - - "any-promise": ["any-promise@1.3.0", "", {}, "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A=="], - - "anymatch": ["anymatch@3.1.3", "", { "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" } }, "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw=="], - - "are-docs-informative": ["are-docs-informative@0.0.2", "", {}, "sha512-ixiS0nLNNG5jNQzgZJNoUpBKdo9yTYZMGJ+QgT2jmjR7G7+QHRCc4v6LQ3NgE7EBJq+o0ams3waJwkrlBom8Ig=="], - - "arg": ["arg@5.0.2", "", {}, "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg=="], - - "argparse": ["argparse@2.0.1", "", {}, "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="], - - "aria-hidden": ["aria-hidden@1.2.6", "", { "dependencies": { "tslib": "^2.0.0" } }, "sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA=="], - - "aria-query": ["aria-query@5.3.2", "", {}, "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw=="], - - "array-buffer-byte-length": ["array-buffer-byte-length@1.0.2", "", { "dependencies": { "call-bound": "^1.0.3", "is-array-buffer": "^3.0.5" } }, "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw=="], - - "array-includes": ["array-includes@3.1.9", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.4", "define-properties": "^1.2.1", "es-abstract": "^1.24.0", "es-object-atoms": "^1.1.1", "get-intrinsic": "^1.3.0", "is-string": "^1.1.1", "math-intrinsics": "^1.1.0" } }, "sha512-FmeCCAenzH0KH381SPT5FZmiA/TmpndpcaShhfgEN9eCVjnFBqq3l1xrI42y8+PPLI6hypzou4GXw00WHmPBLQ=="], - - "array.prototype.findlast": ["array.prototype.findlast@1.2.5", "", { "dependencies": { "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-abstract": "^1.23.2", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0", "es-shim-unscopables": "^1.0.2" } }, "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ=="], - - "array.prototype.findlastindex": ["array.prototype.findlastindex@1.2.6", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.4", "define-properties": "^1.2.1", "es-abstract": "^1.23.9", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "es-shim-unscopables": "^1.1.0" } }, "sha512-F/TKATkzseUExPlfvmwQKGITM3DGTK+vkAsCZoDc5daVygbJBnjEUCbgkAvVFsgfXfX4YIqZ/27G3k3tdXrTxQ=="], - - "array.prototype.flat": ["array.prototype.flat@1.3.3", "", { "dependencies": { "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-abstract": "^1.23.5", "es-shim-unscopables": "^1.0.2" } }, "sha512-rwG/ja1neyLqCuGZ5YYrznA62D4mZXg0i1cIskIUKSiqF3Cje9/wXAls9B9s1Wa2fomMsIv8czB8jZcPmxCXFg=="], - - "array.prototype.flatmap": ["array.prototype.flatmap@1.3.3", "", { "dependencies": { "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-abstract": "^1.23.5", "es-shim-unscopables": "^1.0.2" } }, "sha512-Y7Wt51eKJSyi80hFrJCePGGNo5ktJCslFuboqJsbf57CCPcm5zztluPlc4/aD8sWsKvlwatezpV4U1efk8kpjg=="], - - "array.prototype.tosorted": ["array.prototype.tosorted@1.1.4", "", { "dependencies": { "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-abstract": "^1.23.3", "es-errors": "^1.3.0", "es-shim-unscopables": "^1.0.2" } }, "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA=="], - - "arraybuffer.prototype.slice": ["arraybuffer.prototype.slice@1.0.4", "", { "dependencies": { "array-buffer-byte-length": "^1.0.1", "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-abstract": "^1.23.5", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "is-array-buffer": "^3.0.4" } }, "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ=="], - - "ast-types-flow": ["ast-types-flow@0.0.8", "", {}, "sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ=="], - - "async-function": ["async-function@1.0.0", "", {}, "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA=="], - - "available-typed-arrays": ["available-typed-arrays@1.0.7", "", { "dependencies": { "possible-typed-array-names": "^1.0.0" } }, "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ=="], - - "axe-core": ["axe-core@4.11.0", "", {}, "sha512-ilYanEU8vxxBexpJd8cWM4ElSQq4QctCLKih0TSfjIfCQTeyH/6zVrmIJfLPrKTKJRbiG+cfnZbQIjAlJmF1jQ=="], - - "axobject-query": ["axobject-query@4.1.0", "", {}, "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ=="], - - "balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="], - - "baseline-browser-mapping": ["baseline-browser-mapping@2.9.7", "", { "bin": { "baseline-browser-mapping": "dist/cli.js" } }, "sha512-k9xFKplee6KIio3IDbwj+uaCLpqzOwakOgmqzPezM0sFJlFKcg30vk2wOiAJtkTSfx0SSQDSe8q+mWA/fSH5Zg=="], - - "bidi-js": ["bidi-js@1.0.3", "", { "dependencies": { "require-from-string": "^2.0.2" } }, "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw=="], - - "binary-extensions": ["binary-extensions@2.3.0", "", {}, "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw=="], - - "birecord": ["birecord@0.1.1", "", {}, "sha512-VUpsf/qykW0heRlC8LooCq28Kxn3mAqKohhDG/49rrsQ1dT1CXyj/pgXS+5BSRzFTR/3DyIBOqQOrGyZOh71Aw=="], - - "boolbase": ["boolbase@1.0.0", "", {}, "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="], - - "brace-expansion": ["brace-expansion@1.1.12", "", { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg=="], - - "braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="], - - "browserslist": ["browserslist@4.28.1", "", { "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", "electron-to-chromium": "^1.5.263", "node-releases": "^2.0.27", "update-browserslist-db": "^1.2.0" }, "bin": { "browserslist": "cli.js" } }, "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA=="], - - "builtin-modules": ["builtin-modules@5.0.0", "", {}, "sha512-bkXY9WsVpY7CvMhKSR6pZilZu9Ln5WDrKVBUXf2S443etkmEO4V58heTecXcUIsNsi4Rx8JUO4NfX1IcQl4deg=="], - - "cac": ["cac@6.7.14", "", {}, "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ=="], - - "call-bind": ["call-bind@1.0.8", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.0", "es-define-property": "^1.0.0", "get-intrinsic": "^1.2.4", "set-function-length": "^1.2.2" } }, "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww=="], - - "call-bind-apply-helpers": ["call-bind-apply-helpers@1.0.2", "", { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ=="], - - "call-bound": ["call-bound@1.0.4", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "get-intrinsic": "^1.3.0" } }, "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg=="], - - "callsites": ["callsites@3.1.0", "", {}, "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ=="], - - "camelcase-css": ["camelcase-css@2.0.1", "", {}, "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA=="], - - "camelize": ["camelize@1.0.1", "", {}, "sha512-dU+Tx2fsypxTgtLoE36npi3UqcjSSMNYfkqgmoEhtZrraP5VWq0K7FkWVTYa8eMPtnU/G2txVsfdCJTn9uzpuQ=="], - - "caniuse-lite": ["caniuse-lite@1.0.30001760", "", {}, "sha512-7AAMPcueWELt1p3mi13HR/LHH0TJLT11cnwDJEs3xA4+CK/PLKeO9Kl1oru24htkyUKtkGCvAx4ohB0Ttry8Dw=="], - - "ccount": ["ccount@2.0.1", "", {}, "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg=="], - - "chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - - "change-case": ["change-case@5.4.4", "", {}, "sha512-HRQyTk2/YPEkt9TnUPbOpr64Uw3KOicFWPVBb+xiHvd6eBx/qPr9xqfBFDT8P2vWsvvz4jbEkfDe71W3VyNu2w=="], - - "character-entities": ["character-entities@2.0.2", "", {}, "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ=="], - - "character-entities-legacy": ["character-entities-legacy@3.0.0", "", {}, "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ=="], - - "character-reference-invalid": ["character-reference-invalid@2.0.1", "", {}, "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw=="], - - "chart.js": ["chart.js@4.5.1", "", { "dependencies": { "@kurkle/color": "^0.3.0" } }, "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw=="], - - "chartjs-plugin-datalabels": ["chartjs-plugin-datalabels@2.2.0", "", { "peerDependencies": { "chart.js": ">=3.0.0" } }, "sha512-14ZU30lH7n89oq+A4bWaJPnAG8a7ZTk7dKf48YAzMvJjQtjrgg5Dpk9f+LbjCF6bpx3RAGTeL13IXpKQYyRvlw=="], - - "chokidar": ["chokidar@3.6.0", "", { "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", "glob-parent": "~5.1.2", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", "readdirp": "~3.6.0" }, "optionalDependencies": { "fsevents": "~2.3.2" } }, "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw=="], - - "ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - - "class-variance-authority": ["class-variance-authority@0.7.1", "", { "dependencies": { "clsx": "^2.1.1" } }, "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg=="], - - "clean-regexp": ["clean-regexp@1.0.0", "", { "dependencies": { "escape-string-regexp": "^1.0.5" } }, "sha512-GfisEZEJvzKrmGWkvfhgzcz/BllN1USeqD2V6tg14OAOgaCD2Z/PUEuxnAZ/nPvmaHRG7a8y77p1T/IRQ4D1Hw=="], - - "client-only": ["client-only@0.0.1", "", {}, "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA=="], - - "clsx": ["clsx@2.1.1", "", {}, "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA=="], - - "cmdk": ["cmdk@1.1.1", "", { "dependencies": { "@radix-ui/react-compose-refs": "^1.1.1", "@radix-ui/react-dialog": "^1.1.6", "@radix-ui/react-id": "^1.1.0", "@radix-ui/react-primitive": "^2.0.2" }, "peerDependencies": { "react": "^18 || ^19 || ^19.0.0-rc", "react-dom": "^18 || ^19 || ^19.0.0-rc" } }, "sha512-Vsv7kFaXm+ptHDMZ7izaRsP70GgrW9NBNGswt9OZaVBLlE0SNpDq8eu/VGXyF9r7M0azK3Wy7OlYXsuyYLFzHg=="], - - "color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], - - "color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], - - "comma-separated-tokens": ["comma-separated-tokens@2.0.3", "", {}, "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg=="], - - "commander": ["commander@4.1.1", "", {}, "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA=="], - - "comment-parser": ["comment-parser@1.4.1", "", {}, "sha512-buhp5kePrmda3vhc5B9t7pUQXAb2Tnd0qgpkIhPhkHXxJpiPJ11H0ZEU0oBpJ2QztSbzG/ZxMj/CHsYJqRHmyg=="], - - "compare-versions": ["compare-versions@6.1.1", "", {}, "sha512-4hm4VPpIecmlg59CHXnRDnqGplJFrbLG4aFEl5vl6cK1u76ws3LLvX7ikFnTDl5vo39sjWD6AaDPYodJp/NNHg=="], - - "concat-map": ["concat-map@0.0.1", "", {}, "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="], - - "confbox": ["confbox@0.2.2", "", {}, "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ=="], - - "convert-source-map": ["convert-source-map@2.0.0", "", {}, "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg=="], - - "core-js-compat": ["core-js-compat@3.47.0", "", { "dependencies": { "browserslist": "^4.28.0" } }, "sha512-IGfuznZ/n7Kp9+nypamBhvwdwLsW6KC8IOaURw2doAK5e98AG3acVLdh0woOnEqCfUtS+Vu882JE4k/DAm3ItQ=="], - - "cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="], - - "css-color-keywords": ["css-color-keywords@1.0.0", "", {}, "sha512-FyyrDHZKEjXDpNJYvVsV960FiqQyXc/LlYmsxl2BcdMb2WPx0OGRVgTg55rPSyLSNMqP52R9r8geSp7apN3Ofg=="], - - "css-to-react-native": ["css-to-react-native@3.2.0", "", { "dependencies": { "camelize": "^1.0.0", "css-color-keywords": "^1.0.0", "postcss-value-parser": "^4.0.2" } }, "sha512-e8RKaLXMOFii+02mOlqwjbD00KSEKqblnpO9e++1aXS1fPQOpS1YoqdVHBqPjHNoxeF2mimzVqawm2KCbEdtHQ=="], - - "css-tree": ["css-tree@3.1.0", "", { "dependencies": { "mdn-data": "2.12.2", "source-map-js": "^1.0.1" } }, "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w=="], - - "cssesc": ["cssesc@3.0.0", "", { "bin": { "cssesc": "bin/cssesc" } }, "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg=="], - - "cssstyle": ["cssstyle@5.3.4", "", { "dependencies": { "@asamuzakjp/css-color": "^4.1.0", "@csstools/css-syntax-patches-for-csstree": "1.0.14", "css-tree": "^3.1.0" } }, "sha512-KyOS/kJMEq5O9GdPnaf82noigg5X5DYn0kZPJTaAsCUaBizp6Xa1y9D4Qoqf/JazEXWuruErHgVXwjN5391ZJw=="], - - "csstype": ["csstype@3.2.3", "", {}, "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ=="], - - "d3-array": ["d3-array@3.2.4", "", { "dependencies": { "internmap": "1 - 2" } }, "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg=="], - - "d3-color": ["d3-color@3.1.0", "", {}, "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA=="], - - "d3-ease": ["d3-ease@3.0.1", "", {}, "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w=="], - - "d3-format": ["d3-format@3.1.0", "", {}, "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA=="], - - "d3-interpolate": ["d3-interpolate@3.0.1", "", { "dependencies": { "d3-color": "1 - 3" } }, "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g=="], - - "d3-path": ["d3-path@3.1.0", "", {}, "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ=="], - - "d3-scale": ["d3-scale@4.0.2", "", { "dependencies": { "d3-array": "2.10.0 - 3", "d3-format": "1 - 3", "d3-interpolate": "1.2.0 - 3", "d3-time": "2.1.1 - 3", "d3-time-format": "2 - 4" } }, "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ=="], - - "d3-shape": ["d3-shape@3.2.0", "", { "dependencies": { "d3-path": "^3.1.0" } }, "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA=="], - - "d3-time": ["d3-time@3.1.0", "", { "dependencies": { "d3-array": "2 - 3" } }, "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q=="], - - "d3-time-format": ["d3-time-format@4.1.0", "", { "dependencies": { "d3-time": "1 - 3" } }, "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg=="], - - "d3-timer": ["d3-timer@3.0.1", "", {}, "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA=="], - - "damerau-levenshtein": ["damerau-levenshtein@1.0.8", "", {}, "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA=="], - - "data-urls": ["data-urls@6.0.0", "", { "dependencies": { "whatwg-mimetype": "^4.0.0", "whatwg-url": "^15.0.0" } }, "sha512-BnBS08aLUM+DKamupXs3w2tJJoqU+AkaE/+6vQxi/G/DPmIZFJJp9Dkb1kM03AZx8ADehDUZgsNxju3mPXZYIA=="], - - "data-view-buffer": ["data-view-buffer@1.0.2", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-data-view": "^1.0.2" } }, "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ=="], - - "data-view-byte-length": ["data-view-byte-length@1.0.2", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-data-view": "^1.0.2" } }, "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ=="], - - "data-view-byte-offset": ["data-view-byte-offset@1.0.1", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "is-data-view": "^1.0.1" } }, "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ=="], - - "date-fns": ["date-fns@4.1.0", "", {}, "sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg=="], - - "date-fns-jalali": ["date-fns-jalali@4.1.0-0", "", {}, "sha512-hTIP/z+t+qKwBDcmmsnmjWTduxCg+5KfdqWQvb2X/8C9+knYY6epN/pfxdDuyVlSVeFz0sM5eEfwIUQ70U4ckg=="], - - "debug": ["debug@4.4.3", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA=="], - - "decimal.js": ["decimal.js@10.6.0", "", {}, "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg=="], - - "decimal.js-light": ["decimal.js-light@2.5.1", "", {}, "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg=="], - - "decode-named-character-reference": ["decode-named-character-reference@1.2.0", "", { "dependencies": { "character-entities": "^2.0.0" } }, "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q=="], - - "deep-is": ["deep-is@0.1.4", "", {}, "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="], - - "define-data-property": ["define-data-property@1.1.4", "", { "dependencies": { "es-define-property": "^1.0.0", "es-errors": "^1.3.0", "gopd": "^1.0.1" } }, "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A=="], - - "define-properties": ["define-properties@1.2.1", "", { "dependencies": { "define-data-property": "^1.0.1", "has-property-descriptors": "^1.0.0", "object-keys": "^1.1.1" } }, "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg=="], - - "dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="], - - "detect-libc": ["detect-libc@2.1.2", "", {}, "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ=="], - - "detect-node-es": ["detect-node-es@1.1.0", "", {}, "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ=="], - - "devlop": ["devlop@1.1.0", "", { "dependencies": { "dequal": "^2.0.0" } }, "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA=="], - - "didyoumean": ["didyoumean@1.2.2", "", {}, "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw=="], - - "diff-sequences": ["diff-sequences@27.5.1", "", {}, "sha512-k1gCAXAsNgLwEL+Y8Wvl+M6oEFj5bgazfZULpS5CneoPPXRaCCW7dm+q21Ky2VEE5X+VeRDBVg1Pcvvsr4TtNQ=="], - - "dlv": ["dlv@1.1.3", "", {}, "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA=="], - - "doctrine": ["doctrine@2.1.0", "", { "dependencies": { "esutils": "^2.0.2" } }, "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw=="], - - "dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="], - - "electron-to-chromium": ["electron-to-chromium@1.5.267", "", {}, "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw=="], - - "emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], - - "empathic": ["empathic@2.0.0", "", {}, "sha512-i6UzDscO/XfAcNYD75CfICkmfLedpyPDdozrLMmQc5ORaQcdMoc21OnlEylMIqI7U8eniKrPMxxtj8k0vhmJhA=="], - - "enhanced-resolve": ["enhanced-resolve@5.18.4", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" } }, "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q=="], - - "entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="], - - "es-abstract": ["es-abstract@1.24.1", "", { "dependencies": { "array-buffer-byte-length": "^1.0.2", "arraybuffer.prototype.slice": "^1.0.4", "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", "call-bound": "^1.0.4", "data-view-buffer": "^1.0.2", "data-view-byte-length": "^1.0.2", "data-view-byte-offset": "^1.0.1", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "es-set-tostringtag": "^2.1.0", "es-to-primitive": "^1.3.0", "function.prototype.name": "^1.1.8", "get-intrinsic": "^1.3.0", "get-proto": "^1.0.1", "get-symbol-description": "^1.1.0", "globalthis": "^1.0.4", "gopd": "^1.2.0", "has-property-descriptors": "^1.0.2", "has-proto": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "internal-slot": "^1.1.0", "is-array-buffer": "^3.0.5", "is-callable": "^1.2.7", "is-data-view": "^1.0.2", "is-negative-zero": "^2.0.3", "is-regex": "^1.2.1", "is-set": "^2.0.3", "is-shared-array-buffer": "^1.0.4", "is-string": "^1.1.1", "is-typed-array": "^1.1.15", "is-weakref": "^1.1.1", "math-intrinsics": "^1.1.0", "object-inspect": "^1.13.4", "object-keys": "^1.1.1", "object.assign": "^4.1.7", "own-keys": "^1.0.1", "regexp.prototype.flags": "^1.5.4", "safe-array-concat": "^1.1.3", "safe-push-apply": "^1.0.0", "safe-regex-test": "^1.1.0", "set-proto": "^1.0.0", "stop-iteration-iterator": "^1.1.0", "string.prototype.trim": "^1.2.10", "string.prototype.trimend": "^1.0.9", "string.prototype.trimstart": "^1.0.8", "typed-array-buffer": "^1.0.3", "typed-array-byte-length": "^1.0.3", "typed-array-byte-offset": "^1.0.4", "typed-array-length": "^1.0.7", "unbox-primitive": "^1.1.0", "which-typed-array": "^1.1.19" } }, "sha512-zHXBLhP+QehSSbsS9Pt23Gg964240DPd6QCf8WpkqEXxQ7fhdZzYsocOr5u7apWonsS5EjZDmTF+/slGMyasvw=="], - - "es-define-property": ["es-define-property@1.0.1", "", {}, "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g=="], - - "es-errors": ["es-errors@1.3.0", "", {}, "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="], - - "es-iterator-helpers": ["es-iterator-helpers@1.2.2", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.4", "define-properties": "^1.2.1", "es-abstract": "^1.24.1", "es-errors": "^1.3.0", "es-set-tostringtag": "^2.1.0", "function-bind": "^1.1.2", "get-intrinsic": "^1.3.0", "globalthis": "^1.0.4", "gopd": "^1.2.0", "has-property-descriptors": "^1.0.2", "has-proto": "^1.2.0", "has-symbols": "^1.1.0", "internal-slot": "^1.1.0", "iterator.prototype": "^1.1.5", "safe-array-concat": "^1.1.3" } }, "sha512-BrUQ0cPTB/IwXj23HtwHjS9n7O4h9FX94b4xc5zlTHxeLgTAdzYUDyy6KdExAl9lbN5rtfe44xpjpmj9grxs5w=="], - - "es-object-atoms": ["es-object-atoms@1.1.1", "", { "dependencies": { "es-errors": "^1.3.0" } }, "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA=="], - - "es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="], - - "es-shim-unscopables": ["es-shim-unscopables@1.1.0", "", { "dependencies": { "hasown": "^2.0.2" } }, "sha512-d9T8ucsEhh8Bi1woXCf+TIKDIROLG5WCkxg8geBCbvk22kzwC5G2OnXVMO6FUsvQlgUUXQ2itephWDLqDzbeCw=="], - - "es-to-primitive": ["es-to-primitive@1.3.0", "", { "dependencies": { "is-callable": "^1.2.7", "is-date-object": "^1.0.5", "is-symbol": "^1.0.4" } }, "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g=="], - - "es-toolkit": ["es-toolkit@1.43.0", "", {}, "sha512-SKCT8AsWvYzBBuUqMk4NPwFlSdqLpJwmy6AP322ERn8W2YLIB6JBXnwMI2Qsh2gfphT3q7EKAxKb23cvFHFwKA=="], - - "esbuild": ["esbuild@0.25.12", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.25.12", "@esbuild/android-arm": "0.25.12", "@esbuild/android-arm64": "0.25.12", "@esbuild/android-x64": "0.25.12", "@esbuild/darwin-arm64": "0.25.12", "@esbuild/darwin-x64": "0.25.12", "@esbuild/freebsd-arm64": "0.25.12", "@esbuild/freebsd-x64": "0.25.12", "@esbuild/linux-arm": "0.25.12", "@esbuild/linux-arm64": "0.25.12", "@esbuild/linux-ia32": "0.25.12", "@esbuild/linux-loong64": "0.25.12", "@esbuild/linux-mips64el": "0.25.12", "@esbuild/linux-ppc64": "0.25.12", "@esbuild/linux-riscv64": "0.25.12", "@esbuild/linux-s390x": "0.25.12", "@esbuild/linux-x64": "0.25.12", "@esbuild/netbsd-arm64": "0.25.12", "@esbuild/netbsd-x64": "0.25.12", "@esbuild/openbsd-arm64": "0.25.12", "@esbuild/openbsd-x64": "0.25.12", "@esbuild/openharmony-arm64": "0.25.12", "@esbuild/sunos-x64": "0.25.12", "@esbuild/win32-arm64": "0.25.12", "@esbuild/win32-ia32": "0.25.12", "@esbuild/win32-x64": "0.25.12" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg=="], - - "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], - - "escape-string-regexp": ["escape-string-regexp@4.0.0", "", {}, "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA=="], - - "eslint": ["eslint@9.39.2", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", "@eslint/config-array": "^0.21.1", "@eslint/config-helpers": "^0.4.2", "@eslint/core": "^0.17.0", "@eslint/eslintrc": "^3.3.1", "@eslint/js": "9.39.2", "@eslint/plugin-kit": "^0.4.1", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.4.2", "@types/estree": "^1.0.6", "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.6", "debug": "^4.3.2", "escape-string-regexp": "^4.0.0", "eslint-scope": "^8.4.0", "eslint-visitor-keys": "^4.2.1", "espree": "^10.4.0", "esquery": "^1.5.0", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", "file-entry-cache": "^8.0.0", "find-up": "^5.0.0", "glob-parent": "^6.0.2", "ignore": "^5.2.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", "json-stable-stringify-without-jsonify": "^1.0.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.2", "natural-compare": "^1.4.0", "optionator": "^0.9.3" }, "peerDependencies": { "jiti": "*" }, "optionalPeers": ["jiti"], "bin": { "eslint": "bin/eslint.js" } }, "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw=="], - - "eslint-compat-utils": ["eslint-compat-utils@0.6.5", "", { "dependencies": { "semver": "^7.5.4" }, "peerDependencies": { "eslint": ">=6.0.0" } }, "sha512-vAUHYzue4YAa2hNACjB8HvUQj5yehAZgiClyFVVom9cP8z5NSFq3PwB/TtJslN2zAMgRX6FCFCjYBbQh71g5RQ=="], - - "eslint-config-flat-gitignore": ["eslint-config-flat-gitignore@2.1.0", "", { "dependencies": { "@eslint/compat": "^1.2.5" }, "peerDependencies": { "eslint": "^9.5.0" } }, "sha512-cJzNJ7L+psWp5mXM7jBX+fjHtBvvh06RBlcweMhKD8jWqQw0G78hOW5tpVALGHGFPsBV+ot2H+pdDGJy6CV8pA=="], - - "eslint-config-next": ["eslint-config-next@15.5.8", "", { "dependencies": { "@next/eslint-plugin-next": "15.5.8", "@rushstack/eslint-patch": "^1.10.3", "@typescript-eslint/eslint-plugin": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", "@typescript-eslint/parser": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", "eslint-import-resolver-node": "^0.3.6", "eslint-import-resolver-typescript": "^3.5.2", "eslint-plugin-import": "^2.31.0", "eslint-plugin-jsx-a11y": "^6.10.0", "eslint-plugin-react": "^7.37.0", "eslint-plugin-react-hooks": "^5.0.0" }, "peerDependencies": { "eslint": "^7.23.0 || ^8.0.0 || ^9.0.0", "typescript": ">=3.3.1" }, "optionalPeers": ["typescript"] }, "sha512-FU2nFCVYt3z60EH8upds4frThuIAiSt81zUtQI/9fIc25VVVT3yaKsFwGIY6BIDT/I0X/Dam+RR7xzTRZMyArQ=="], - - "eslint-flat-config-utils": ["eslint-flat-config-utils@2.1.4", "", { "dependencies": { "pathe": "^2.0.3" } }, "sha512-bEnmU5gqzS+4O+id9vrbP43vByjF+8KOs+QuuV4OlqAuXmnRW2zfI/Rza1fQvdihQ5h4DUo0NqFAiViD4mSrzQ=="], - - "eslint-formatting-reporter": ["eslint-formatting-reporter@0.0.0", "", { "dependencies": { "prettier-linter-helpers": "^1.0.0" }, "peerDependencies": { "eslint": ">=8.40.0" } }, "sha512-k9RdyTqxqN/wNYVaTk/ds5B5rA8lgoAmvceYN7bcZMBwU7TuXx5ntewJv81eF3pIL/CiJE+pJZm36llG8yhyyw=="], - - "eslint-import-resolver-node": ["eslint-import-resolver-node@0.3.9", "", { "dependencies": { "debug": "^3.2.7", "is-core-module": "^2.13.0", "resolve": "^1.22.4" } }, "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g=="], - - "eslint-import-resolver-typescript": ["eslint-import-resolver-typescript@3.10.1", "", { "dependencies": { "@nolyfill/is-core-module": "1.0.39", "debug": "^4.4.0", "get-tsconfig": "^4.10.0", "is-bun-module": "^2.0.0", "stable-hash": "^0.0.5", "tinyglobby": "^0.2.13", "unrs-resolver": "^1.6.2" }, "peerDependencies": { "eslint": "*", "eslint-plugin-import": "*", "eslint-plugin-import-x": "*" }, "optionalPeers": ["eslint-plugin-import", "eslint-plugin-import-x"] }, "sha512-A1rHYb06zjMGAxdLSkN2fXPBwuSaQ0iO5M/hdyS0Ajj1VBaRp0sPD3dn1FhME3c/JluGFbwSxyCfqdSbtQLAHQ=="], - - "eslint-json-compat-utils": ["eslint-json-compat-utils@0.2.1", "", { "dependencies": { "esquery": "^1.6.0" }, "peerDependencies": { "eslint": "*", "jsonc-eslint-parser": "^2.4.0" } }, "sha512-YzEodbDyW8DX8bImKhAcCeu/L31Dd/70Bidx2Qex9OFUtgzXLqtfWL4Hr5fM/aCCB8QUZLuJur0S9k6UfgFkfg=="], - - "eslint-merge-processors": ["eslint-merge-processors@2.0.0", "", { "peerDependencies": { "eslint": "*" } }, "sha512-sUuhSf3IrJdGooquEUB5TNpGNpBoQccbnaLHsb1XkBLUPPqCNivCpY05ZcpCOiV9uHwO2yxXEWVczVclzMxYlA=="], - - "eslint-module-utils": ["eslint-module-utils@2.12.1", "", { "dependencies": { "debug": "^3.2.7" } }, "sha512-L8jSWTze7K2mTg0vos/RuLRS5soomksDPoJLXIslC7c8Wmut3bx7CPpJijDcBZtxQ5lrbUdM+s0OlNbz0DCDNw=="], - - "eslint-parser-plain": ["eslint-parser-plain@0.1.1", "", {}, "sha512-KRgd6wuxH4U8kczqPp+Oyk4irThIhHWxgFgLDtpgjUGVIS3wGrJntvZW/p6hHq1T4FOwnOtCNkvAI4Kr+mQ/Hw=="], - - "eslint-plugin-antfu": ["eslint-plugin-antfu@3.1.1", "", { "peerDependencies": { "eslint": "*" } }, "sha512-7Q+NhwLfHJFvopI2HBZbSxWXngTwBLKxW1AGXLr2lEGxcEIK/AsDs8pn8fvIizl5aZjBbVbVK5ujmMpBe4Tvdg=="], - - "eslint-plugin-command": ["eslint-plugin-command@3.4.0", "", { "dependencies": { "@es-joy/jsdoccomment": "^0.78.0" }, "peerDependencies": { "eslint": "*" } }, "sha512-EW4eg/a7TKEhG0s5IEti72kh3YOTlnhfFNuctq5WnB1fst37/IHTd5OkD+vnlRf3opTvUcSRihAateP6bT5ZcA=="], - - "eslint-plugin-es-x": ["eslint-plugin-es-x@7.8.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.1.2", "@eslint-community/regexpp": "^4.11.0", "eslint-compat-utils": "^0.5.1" }, "peerDependencies": { "eslint": ">=8" } }, "sha512-7Ds8+wAAoV3T+LAKeu39Y5BzXCrGKrcISfgKEqTS4BDN8SFEDQd0S43jiQ8vIa3wUKD07qitZdfzlenSi8/0qQ=="], - - "eslint-plugin-format": ["eslint-plugin-format@1.1.0", "", { "dependencies": { "@dprint/formatter": "^0.3.0", "@dprint/markdown": "^0.17.8", "@dprint/toml": "^0.6.4", "eslint-formatting-reporter": "^0.0.0", "eslint-parser-plain": "^0.1.1", "prettier": "^3.7.4", "synckit": "^0.11.11" }, "peerDependencies": { "eslint": "^8.40.0 || ^9.0.0" } }, "sha512-zjGPZcftddkO9GydBwvTKBV4ICN6a++XK0zIPi3HZHlU8W9EaftTA3XAanJvGAXQUYEqAADtgQi08SX+afbPrg=="], - - "eslint-plugin-import": ["eslint-plugin-import@2.32.0", "", { "dependencies": { "@rtsao/scc": "^1.1.0", "array-includes": "^3.1.9", "array.prototype.findlastindex": "^1.2.6", "array.prototype.flat": "^1.3.3", "array.prototype.flatmap": "^1.3.3", "debug": "^3.2.7", "doctrine": "^2.1.0", "eslint-import-resolver-node": "^0.3.9", "eslint-module-utils": "^2.12.1", "hasown": "^2.0.2", "is-core-module": "^2.16.1", "is-glob": "^4.0.3", "minimatch": "^3.1.2", "object.fromentries": "^2.0.8", "object.groupby": "^1.0.3", "object.values": "^1.2.1", "semver": "^6.3.1", "string.prototype.trimend": "^1.0.9", "tsconfig-paths": "^3.15.0" }, "peerDependencies": { "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8 || ^9" } }, "sha512-whOE1HFo/qJDyX4SnXzP4N6zOWn79WhnCUY/iDR0mPfQZO8wcYE4JClzI2oZrhBnnMUCBCHZhO6VQyoBU95mZA=="], - - "eslint-plugin-import-lite": ["eslint-plugin-import-lite@0.3.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", "@typescript-eslint/types": "^8.34.0" }, "peerDependencies": { "eslint": ">=9.0.0", "typescript": ">=4.5" }, "optionalPeers": ["typescript"] }, "sha512-dkNBAL6jcoCsXZsQ/Tt2yXmMDoNt5NaBh/U7yvccjiK8cai6Ay+MK77bMykmqQA2bTF6lngaLCDij6MTO3KkvA=="], - - "eslint-plugin-jsdoc": ["eslint-plugin-jsdoc@61.5.0", "", { "dependencies": { "@es-joy/jsdoccomment": "~0.76.0", "@es-joy/resolve.exports": "1.2.0", "are-docs-informative": "^0.0.2", "comment-parser": "1.4.1", "debug": "^4.4.3", "escape-string-regexp": "^4.0.0", "espree": "^10.4.0", "esquery": "^1.6.0", "html-entities": "^2.6.0", "object-deep-merge": "^2.0.0", "parse-imports-exports": "^0.2.4", "semver": "^7.7.3", "spdx-expression-parse": "^4.0.0", "to-valid-identifier": "^1.0.0" }, "peerDependencies": { "eslint": "^7.0.0 || ^8.0.0 || ^9.0.0" } }, "sha512-PR81eOGq4S7diVnV9xzFSBE4CDENRQGP0Lckkek8AdHtbj+6Bm0cItwlFnxsLFriJHspiE3mpu8U20eODyToIg=="], - - "eslint-plugin-jsonc": ["eslint-plugin-jsonc@2.21.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.5.1", "diff-sequences": "^27.5.1", "eslint-compat-utils": "^0.6.4", "eslint-json-compat-utils": "^0.2.1", "espree": "^9.6.1 || ^10.3.0", "graphemer": "^1.4.0", "jsonc-eslint-parser": "^2.4.0", "natural-compare": "^1.4.0", "synckit": "^0.6.2 || ^0.7.3 || ^0.11.5" }, "peerDependencies": { "eslint": ">=6.0.0" } }, "sha512-HttlxdNG5ly3YjP1cFMP62R4qKLxJURfBZo2gnMY+yQojZxkLyOpY1H1KRTKBmvQeSG9pIpSGEhDjE17vvYosg=="], - - "eslint-plugin-jsx-a11y": ["eslint-plugin-jsx-a11y@6.10.2", "", { "dependencies": { "aria-query": "^5.3.2", "array-includes": "^3.1.8", "array.prototype.flatmap": "^1.3.2", "ast-types-flow": "^0.0.8", "axe-core": "^4.10.0", "axobject-query": "^4.1.0", "damerau-levenshtein": "^1.0.8", "emoji-regex": "^9.2.2", "hasown": "^2.0.2", "jsx-ast-utils": "^3.3.5", "language-tags": "^1.0.9", "minimatch": "^3.1.2", "object.fromentries": "^2.0.8", "safe-regex-test": "^1.0.3", "string.prototype.includes": "^2.0.1" }, "peerDependencies": { "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9" } }, "sha512-scB3nz4WmG75pV8+3eRUQOHZlNSUhFNq37xnpgRkCCELU3XMvXAxLk1eqWWyE22Ki4Q01Fnsw9BA3cJHDPgn2Q=="], - - "eslint-plugin-n": ["eslint-plugin-n@17.23.1", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.5.0", "enhanced-resolve": "^5.17.1", "eslint-plugin-es-x": "^7.8.0", "get-tsconfig": "^4.8.1", "globals": "^15.11.0", "globrex": "^0.1.2", "ignore": "^5.3.2", "semver": "^7.6.3", "ts-declaration-location": "^1.0.6" }, "peerDependencies": { "eslint": ">=8.23.0" } }, "sha512-68PealUpYoHOBh332JLLD9Sj7OQUDkFpmcfqt8R9sySfFSeuGJjMTJQvCRRB96zO3A/PELRLkPrzsHmzEFQQ5A=="], - - "eslint-plugin-no-only-tests": ["eslint-plugin-no-only-tests@3.3.0", "", {}, "sha512-brcKcxGnISN2CcVhXJ/kEQlNa0MEfGRtwKtWA16SkqXHKitaKIMrfemJKLKX1YqDU5C/5JY3PvZXd5jEW04e0Q=="], - - "eslint-plugin-perfectionist": ["eslint-plugin-perfectionist@4.15.1", "", { "dependencies": { "@typescript-eslint/types": "^8.38.0", "@typescript-eslint/utils": "^8.38.0", "natural-orderby": "^5.0.0" }, "peerDependencies": { "eslint": ">=8.45.0" } }, "sha512-MHF0cBoOG0XyBf7G0EAFCuJJu4I18wy0zAoT1OHfx2o6EOx1EFTIzr2HGeuZa1kDcusoX0xJ9V7oZmaeFd773Q=="], - - "eslint-plugin-pnpm": ["eslint-plugin-pnpm@1.4.3", "", { "dependencies": { "empathic": "^2.0.0", "jsonc-eslint-parser": "^2.4.2", "pathe": "^2.0.3", "pnpm-workspace-yaml": "1.4.3", "tinyglobby": "^0.2.15", "yaml": "^2.8.2", "yaml-eslint-parser": "^1.3.2" }, "peerDependencies": { "eslint": "^9.0.0" } }, "sha512-wdWrkWN5mxRgEADkQvxwv0xA+0++/hYDD5OyXTL6UqPLUPdcCFQJO61NO7IKhEqb3GclWs02OoFs1METN+a3zQ=="], - - "eslint-plugin-react": ["eslint-plugin-react@7.37.5", "", { "dependencies": { "array-includes": "^3.1.8", "array.prototype.findlast": "^1.2.5", "array.prototype.flatmap": "^1.3.3", "array.prototype.tosorted": "^1.1.4", "doctrine": "^2.1.0", "es-iterator-helpers": "^1.2.1", "estraverse": "^5.3.0", "hasown": "^2.0.2", "jsx-ast-utils": "^2.4.1 || ^3.0.0", "minimatch": "^3.1.2", "object.entries": "^1.1.9", "object.fromentries": "^2.0.8", "object.values": "^1.2.1", "prop-types": "^15.8.1", "resolve": "^2.0.0-next.5", "semver": "^6.3.1", "string.prototype.matchall": "^4.0.12", "string.prototype.repeat": "^1.0.0" }, "peerDependencies": { "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7" } }, "sha512-Qteup0SqU15kdocexFNAJMvCJEfa2xUKNV4CC1xsVMrIIqEy3SQ/rqyxCWNzfrd3/ldy6HMlD2e0JDVpDg2qIA=="], - - "eslint-plugin-react-dom": ["eslint-plugin-react-dom@2.3.13", "", { "dependencies": { "@eslint-react/ast": "2.3.13", "@eslint-react/core": "2.3.13", "@eslint-react/eff": "2.3.13", "@eslint-react/shared": "2.3.13", "@eslint-react/var": "2.3.13", "@typescript-eslint/scope-manager": "^8.49.0", "@typescript-eslint/types": "^8.49.0", "@typescript-eslint/utils": "^8.49.0", "compare-versions": "^6.1.1", "string-ts": "^2.3.1", "ts-pattern": "^5.9.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-O9jglTOnnuyfJcSxjeVc8lqIp5kuS9/0MLLCHlOTH8ZjIifHHxUr6GZ2fd4la9y0FsoEYXEO7DBIMjWx2vCwjg=="], - - "eslint-plugin-react-hooks": ["eslint-plugin-react-hooks@7.0.1", "", { "dependencies": { "@babel/core": "^7.24.4", "@babel/parser": "^7.24.4", "hermes-parser": "^0.25.1", "zod": "^3.25.0 || ^4.0.0", "zod-validation-error": "^3.5.0 || ^4.0.0" }, "peerDependencies": { "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" } }, "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA=="], - - "eslint-plugin-react-hooks-extra": ["eslint-plugin-react-hooks-extra@2.3.13", "", { "dependencies": { "@eslint-react/ast": "2.3.13", "@eslint-react/core": "2.3.13", "@eslint-react/eff": "2.3.13", "@eslint-react/shared": "2.3.13", "@eslint-react/var": "2.3.13", "@typescript-eslint/scope-manager": "^8.49.0", "@typescript-eslint/type-utils": "^8.49.0", "@typescript-eslint/types": "^8.49.0", "@typescript-eslint/utils": "^8.49.0", "string-ts": "^2.3.1", "ts-pattern": "^5.9.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-NSnY8yvtrvu2FAALLuvc2xesIAkMqGyJgilpy8wEi1w/Nw6v0IwBEffoNKLq9OHW4v3nikud3aBTqWfWKOx67Q=="], - - "eslint-plugin-react-naming-convention": ["eslint-plugin-react-naming-convention@2.3.13", "", { "dependencies": { "@eslint-react/ast": "2.3.13", "@eslint-react/core": "2.3.13", "@eslint-react/eff": "2.3.13", "@eslint-react/shared": "2.3.13", "@eslint-react/var": "2.3.13", "@typescript-eslint/scope-manager": "^8.49.0", "@typescript-eslint/type-utils": "^8.49.0", "@typescript-eslint/types": "^8.49.0", "@typescript-eslint/utils": "^8.49.0", "string-ts": "^2.3.1", "ts-pattern": "^5.9.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-2iler1ldFpB/PaNpN8WAVk6dKYKwKcoGm1j0JAAjdCrsfOTJ007ol2xTAyoHKAbMOvkZSi7qq90q+Q//RuhWwA=="], - - "eslint-plugin-react-refresh": ["eslint-plugin-react-refresh@0.4.25", "", { "peerDependencies": { "eslint": ">=8.40" } }, "sha512-dRUD2LOdEqI4zXHqbQ442blQAzdSuShAaiSq5Vtyy6LT08YUf0oOjBDo4VPx0dCPgiPWh1WB4dtbLOd0kOlDPQ=="], - - "eslint-plugin-react-web-api": ["eslint-plugin-react-web-api@2.3.13", "", { "dependencies": { "@eslint-react/ast": "2.3.13", "@eslint-react/core": "2.3.13", "@eslint-react/eff": "2.3.13", "@eslint-react/shared": "2.3.13", "@eslint-react/var": "2.3.13", "@typescript-eslint/scope-manager": "^8.49.0", "@typescript-eslint/types": "^8.49.0", "@typescript-eslint/utils": "^8.49.0", "string-ts": "^2.3.1", "ts-pattern": "^5.9.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-+UypRPHP9GFMulIENpsC/J+TygWywiyz2mb4qyUP6y/IwdcSilk1MyF9WquNYKB/4/FN4Rl1oRm6WMbfkbpMnQ=="], - - "eslint-plugin-react-x": ["eslint-plugin-react-x@2.3.13", "", { "dependencies": { "@eslint-react/ast": "2.3.13", "@eslint-react/core": "2.3.13", "@eslint-react/eff": "2.3.13", "@eslint-react/shared": "2.3.13", "@eslint-react/var": "2.3.13", "@typescript-eslint/scope-manager": "^8.49.0", "@typescript-eslint/type-utils": "^8.49.0", "@typescript-eslint/types": "^8.49.0", "@typescript-eslint/utils": "^8.49.0", "compare-versions": "^6.1.1", "is-immutable-type": "^5.0.1", "string-ts": "^2.3.1", "ts-api-utils": "^2.1.0", "ts-pattern": "^5.9.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-+m+V/5VLMxgx0VsFUUyflMNLQG0WFYspsfv0XJFqx7me3A2b3P20QatNDHQCYswz0PRbRFqinTPukPRhZh68ag=="], - - "eslint-plugin-regexp": ["eslint-plugin-regexp@2.10.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.11.0", "comment-parser": "^1.4.0", "jsdoc-type-pratt-parser": "^4.0.0", "refa": "^0.12.1", "regexp-ast-analysis": "^0.7.1", "scslre": "^0.3.0" }, "peerDependencies": { "eslint": ">=8.44.0" } }, "sha512-ovzQT8ESVn5oOe5a7gIDPD5v9bCSjIFJu57sVPDqgPRXicQzOnYfFN21WoQBQF18vrhT5o7UMKFwJQVVjyJ0ng=="], - - "eslint-plugin-toml": ["eslint-plugin-toml@0.12.0", "", { "dependencies": { "debug": "^4.1.1", "eslint-compat-utils": "^0.6.0", "lodash": "^4.17.19", "toml-eslint-parser": "^0.10.0" }, "peerDependencies": { "eslint": ">=6.0.0" } }, "sha512-+/wVObA9DVhwZB1nG83D2OAQRrcQZXy+drqUnFJKymqnmbnbfg/UPmEMCKrJNcEboUGxUjYrJlgy+/Y930mURQ=="], - - "eslint-plugin-unicorn": ["eslint-plugin-unicorn@62.0.0", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.28.5", "@eslint-community/eslint-utils": "^4.9.0", "@eslint/plugin-kit": "^0.4.0", "change-case": "^5.4.4", "ci-info": "^4.3.1", "clean-regexp": "^1.0.0", "core-js-compat": "^3.46.0", "esquery": "^1.6.0", "find-up-simple": "^1.0.1", "globals": "^16.4.0", "indent-string": "^5.0.0", "is-builtin-module": "^5.0.0", "jsesc": "^3.1.0", "pluralize": "^8.0.0", "regexp-tree": "^0.1.27", "regjsparser": "^0.13.0", "semver": "^7.7.3", "strip-indent": "^4.1.1" }, "peerDependencies": { "eslint": ">=9.38.0" } }, "sha512-HIlIkGLkvf29YEiS/ImuDZQbP12gWyx5i3C6XrRxMvVdqMroCI9qoVYCoIl17ChN+U89pn9sVwLxhIWj5nEc7g=="], - - "eslint-plugin-unused-imports": ["eslint-plugin-unused-imports@4.3.0", "", { "peerDependencies": { "@typescript-eslint/eslint-plugin": "^8.0.0-0 || ^7.0.0 || ^6.0.0 || ^5.0.0", "eslint": "^9.0.0 || ^8.0.0" }, "optionalPeers": ["@typescript-eslint/eslint-plugin"] }, "sha512-ZFBmXMGBYfHttdRtOG9nFFpmUvMtbHSjsKrS20vdWdbfiVYsO3yA2SGYy9i9XmZJDfMGBflZGBCm70SEnFQtOA=="], - - "eslint-plugin-vue": ["eslint-plugin-vue@10.6.2", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", "natural-compare": "^1.4.0", "nth-check": "^2.1.1", "postcss-selector-parser": "^7.1.0", "semver": "^7.6.3", "xml-name-validator": "^4.0.0" }, "peerDependencies": { "@stylistic/eslint-plugin": "^2.0.0 || ^3.0.0 || ^4.0.0 || ^5.0.0", "@typescript-eslint/parser": "^7.0.0 || ^8.0.0", "eslint": "^8.57.0 || ^9.0.0", "vue-eslint-parser": "^10.0.0" }, "optionalPeers": ["@stylistic/eslint-plugin", "@typescript-eslint/parser"] }, "sha512-nA5yUs/B1KmKzvC42fyD0+l9Yd+LtEpVhWRbXuDj0e+ZURcTtyRbMDWUeJmTAh2wC6jC83raS63anNM2YT3NPw=="], - - "eslint-plugin-yml": ["eslint-plugin-yml@1.19.0", "", { "dependencies": { "debug": "^4.3.2", "diff-sequences": "^27.5.1", "escape-string-regexp": "4.0.0", "eslint-compat-utils": "^0.6.0", "natural-compare": "^1.4.0", "yaml-eslint-parser": "^1.2.1" }, "peerDependencies": { "eslint": ">=6.0.0" } }, "sha512-S+4GbcCWksFKAvFJtf0vpdiCkZZvDJCV4Zsi9ahmYkYOYcf+LRqqzvzkb/ST7vTYV6sFwXOvawzYyL/jFT2nQA=="], - - "eslint-processor-vue-blocks": ["eslint-processor-vue-blocks@2.0.0", "", { "peerDependencies": { "@vue/compiler-sfc": "^3.3.0", "eslint": ">=9.0.0" } }, "sha512-u4W0CJwGoWY3bjXAuFpc/b6eK3NQEI8MoeW7ritKj3G3z/WtHrKjkqf+wk8mPEy5rlMGS+k6AZYOw2XBoN/02Q=="], - - "eslint-scope": ["eslint-scope@8.4.0", "", { "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" } }, "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg=="], - - "eslint-visitor-keys": ["eslint-visitor-keys@4.2.1", "", {}, "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ=="], - - "espree": ["espree@10.4.0", "", { "dependencies": { "acorn": "^8.15.0", "acorn-jsx": "^5.3.2", "eslint-visitor-keys": "^4.2.1" } }, "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ=="], - - "esquery": ["esquery@1.6.0", "", { "dependencies": { "estraverse": "^5.1.0" } }, "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg=="], - - "esrecurse": ["esrecurse@4.3.0", "", { "dependencies": { "estraverse": "^5.2.0" } }, "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag=="], - - "estraverse": ["estraverse@5.3.0", "", {}, "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA=="], - - "estree-walker": ["estree-walker@2.0.2", "", {}, "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w=="], - - "esutils": ["esutils@2.0.3", "", {}, "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g=="], - - "eventemitter3": ["eventemitter3@5.0.1", "", {}, "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA=="], - - "exsolve": ["exsolve@1.0.8", "", {}, "sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA=="], - - "fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="], - - "fast-diff": ["fast-diff@1.3.0", "", {}, "sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw=="], - - "fast-glob": ["fast-glob@3.3.1", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.4" } }, "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg=="], - - "fast-json-stable-stringify": ["fast-json-stable-stringify@2.1.0", "", {}, "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="], - - "fast-levenshtein": ["fast-levenshtein@2.0.6", "", {}, "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw=="], - - "fastq": ["fastq@1.19.1", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ=="], - - "fault": ["fault@1.0.4", "", { "dependencies": { "format": "^0.2.0" } }, "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA=="], - - "fdir": ["fdir@6.5.0", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg=="], - - "file-entry-cache": ["file-entry-cache@8.0.0", "", { "dependencies": { "flat-cache": "^4.0.0" } }, "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ=="], - - "fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="], - - "find-up": ["find-up@5.0.0", "", { "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" } }, "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng=="], - - "find-up-simple": ["find-up-simple@1.0.1", "", {}, "sha512-afd4O7zpqHeRyg4PfDQsXmlDe2PfdHtJt6Akt8jOWaApLOZk5JXs6VMR29lz03pRe9mpykrRCYIYxaJYcfpncQ=="], - - "flat-cache": ["flat-cache@4.0.1", "", { "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" } }, "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw=="], - - "flatted": ["flatted@3.3.3", "", {}, "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg=="], - - "for-each": ["for-each@0.3.5", "", { "dependencies": { "is-callable": "^1.2.7" } }, "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg=="], - - "format": ["format@0.2.2", "", {}, "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww=="], - - "framer-motion": ["framer-motion@12.23.26", "", { "dependencies": { "motion-dom": "^12.23.23", "motion-utils": "^12.23.6", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-cPcIhgR42xBn1Uj+PzOyheMtZ73H927+uWPDVhUMqxy8UHt6Okavb6xIz9J/phFUHUj0OncR6UvMfJTXoc/LKA=="], - - "fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], - - "function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="], - - "function.prototype.name": ["function.prototype.name@1.1.8", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "functions-have-names": "^1.2.3", "hasown": "^2.0.2", "is-callable": "^1.2.7" } }, "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q=="], - - "functions-have-names": ["functions-have-names@1.2.3", "", {}, "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ=="], - - "fuse.js": ["fuse.js@7.1.0", "", {}, "sha512-trLf4SzuuUxfusZADLINj+dE8clK1frKdmqiJNb1Es75fmI5oY6X2mxLVUciLLjxqw/xr72Dhy+lER6dGd02FQ=="], - - "generator-function": ["generator-function@2.0.1", "", {}, "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g=="], - - "gensync": ["gensync@1.0.0-beta.2", "", {}, "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg=="], - - "get-intrinsic": ["get-intrinsic@1.3.0", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" } }, "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ=="], - - "get-nonce": ["get-nonce@1.0.1", "", {}, "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q=="], - - "get-proto": ["get-proto@1.0.1", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g=="], - - "get-symbol-description": ["get-symbol-description@1.1.0", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6" } }, "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg=="], - - "get-tsconfig": ["get-tsconfig@4.13.0", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ=="], - - "github-slugger": ["github-slugger@2.0.0", "", {}, "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw=="], - - "glob-parent": ["glob-parent@6.0.2", "", { "dependencies": { "is-glob": "^4.0.3" } }, "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A=="], - - "globals": ["globals@16.5.0", "", {}, "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ=="], - - "globalthis": ["globalthis@1.0.4", "", { "dependencies": { "define-properties": "^1.2.1", "gopd": "^1.0.1" } }, "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ=="], - - "globrex": ["globrex@0.1.2", "", {}, "sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg=="], - - "gopd": ["gopd@1.2.0", "", {}, "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg=="], - - "graceful-fs": ["graceful-fs@4.2.11", "", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="], - - "graphemer": ["graphemer@1.4.0", "", {}, "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag=="], - - "has-bigints": ["has-bigints@1.1.0", "", {}, "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg=="], - - "has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="], - - "has-property-descriptors": ["has-property-descriptors@1.0.2", "", { "dependencies": { "es-define-property": "^1.0.0" } }, "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg=="], - - "has-proto": ["has-proto@1.2.0", "", { "dependencies": { "dunder-proto": "^1.0.0" } }, "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ=="], - - "has-symbols": ["has-symbols@1.1.0", "", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="], - - "has-tostringtag": ["has-tostringtag@1.0.2", "", { "dependencies": { "has-symbols": "^1.0.3" } }, "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw=="], - - "hasown": ["hasown@2.0.2", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="], - - "hast-util-parse-selector": ["hast-util-parse-selector@4.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A=="], - - "hastscript": ["hastscript@9.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-parse-selector": "^4.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w=="], - - "hermes-estree": ["hermes-estree@0.25.1", "", {}, "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw=="], - - "hermes-parser": ["hermes-parser@0.25.1", "", { "dependencies": { "hermes-estree": "0.25.1" } }, "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA=="], - - "highlight.js": ["highlight.js@10.7.3", "", {}, "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A=="], - - "highlightjs-vue": ["highlightjs-vue@1.0.0", "", {}, "sha512-PDEfEF102G23vHmPhLyPboFCD+BkMGu+GuJe2d9/eH4FsCwvgBpnc9n0pGE+ffKdph38s6foEZiEjdgHdzp+IA=="], - - "html-encoding-sniffer": ["html-encoding-sniffer@4.0.0", "", { "dependencies": { "whatwg-encoding": "^3.1.1" } }, "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ=="], - - "html-entities": ["html-entities@2.6.0", "", {}, "sha512-kig+rMn/QOVRvr7c86gQ8lWXq+Hkv6CbAH1hLu+RG338StTpE8Z0b44SDVaqVu7HGKf27frdmUYEs9hTUX/cLQ=="], - - "http-proxy-agent": ["http-proxy-agent@7.0.2", "", { "dependencies": { "agent-base": "^7.1.0", "debug": "^4.3.4" } }, "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig=="], - - "https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], - - "iconv-lite": ["iconv-lite@0.6.3", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw=="], - - "ignore": ["ignore@7.0.5", "", {}, "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg=="], - - "immer": ["immer@10.2.0", "", {}, "sha512-d/+XTN3zfODyjr89gM3mPq1WNX2B8pYsu7eORitdwyA2sBubnTl3laYlBk4sXY5FUa5qTZGBDPJICVbvqzjlbw=="], - - "import-fresh": ["import-fresh@3.3.1", "", { "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" } }, "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ=="], - - "imurmurhash": ["imurmurhash@0.1.4", "", {}, "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA=="], - - "indent-string": ["indent-string@5.0.0", "", {}, "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg=="], - - "internal-slot": ["internal-slot@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "hasown": "^2.0.2", "side-channel": "^1.1.0" } }, "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw=="], - - "internmap": ["internmap@2.0.3", "", {}, "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg=="], - - "is-alphabetical": ["is-alphabetical@2.0.1", "", {}, "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ=="], - - "is-alphanumerical": ["is-alphanumerical@2.0.1", "", { "dependencies": { "is-alphabetical": "^2.0.0", "is-decimal": "^2.0.0" } }, "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw=="], - - "is-array-buffer": ["is-array-buffer@3.0.5", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "get-intrinsic": "^1.2.6" } }, "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A=="], - - "is-async-function": ["is-async-function@2.1.1", "", { "dependencies": { "async-function": "^1.0.0", "call-bound": "^1.0.3", "get-proto": "^1.0.1", "has-tostringtag": "^1.0.2", "safe-regex-test": "^1.1.0" } }, "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ=="], - - "is-bigint": ["is-bigint@1.1.0", "", { "dependencies": { "has-bigints": "^1.0.2" } }, "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ=="], - - "is-binary-path": ["is-binary-path@2.1.0", "", { "dependencies": { "binary-extensions": "^2.0.0" } }, "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw=="], - - "is-boolean-object": ["is-boolean-object@1.2.2", "", { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A=="], - - "is-builtin-module": ["is-builtin-module@5.0.0", "", { "dependencies": { "builtin-modules": "^5.0.0" } }, "sha512-f4RqJKBUe5rQkJ2eJEJBXSticB3hGbN9j0yxxMQFqIW89Jp9WYFtzfTcRlstDKVUTRzSOTLKRfO9vIztenwtxA=="], - - "is-bun-module": ["is-bun-module@2.0.0", "", { "dependencies": { "semver": "^7.7.1" } }, "sha512-gNCGbnnnnFAUGKeZ9PdbyeGYJqewpmc2aKHUEMO5nQPWU9lOmv7jcmQIv+qHD8fXW6W7qfuCwX4rY9LNRjXrkQ=="], - - "is-callable": ["is-callable@1.2.7", "", {}, "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA=="], - - "is-core-module": ["is-core-module@2.16.1", "", { "dependencies": { "hasown": "^2.0.2" } }, "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w=="], - - "is-data-view": ["is-data-view@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "get-intrinsic": "^1.2.6", "is-typed-array": "^1.1.13" } }, "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw=="], - - "is-date-object": ["is-date-object@1.1.0", "", { "dependencies": { "call-bound": "^1.0.2", "has-tostringtag": "^1.0.2" } }, "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg=="], - - "is-decimal": ["is-decimal@2.0.1", "", {}, "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A=="], - - "is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="], - - "is-finalizationregistry": ["is-finalizationregistry@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3" } }, "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg=="], - - "is-generator-function": ["is-generator-function@1.1.2", "", { "dependencies": { "call-bound": "^1.0.4", "generator-function": "^2.0.0", "get-proto": "^1.0.1", "has-tostringtag": "^1.0.2", "safe-regex-test": "^1.1.0" } }, "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA=="], - - "is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="], - - "is-hexadecimal": ["is-hexadecimal@2.0.1", "", {}, "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg=="], - - "is-immutable-type": ["is-immutable-type@5.0.1", "", { "dependencies": { "@typescript-eslint/type-utils": "^8.0.0", "ts-api-utils": "^2.0.0", "ts-declaration-location": "^1.0.4" }, "peerDependencies": { "eslint": "*", "typescript": ">=4.7.4" } }, "sha512-LkHEOGVZZXxGl8vDs+10k3DvP++SEoYEAJLRk6buTFi6kD7QekThV7xHS0j6gpnUCQ0zpud/gMDGiV4dQneLTg=="], - - "is-map": ["is-map@2.0.3", "", {}, "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw=="], - - "is-negative-zero": ["is-negative-zero@2.0.3", "", {}, "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw=="], - - "is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="], - - "is-number-object": ["is-number-object@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw=="], - - "is-potential-custom-element-name": ["is-potential-custom-element-name@1.0.1", "", {}, "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ=="], - - "is-regex": ["is-regex@1.2.1", "", { "dependencies": { "call-bound": "^1.0.2", "gopd": "^1.2.0", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g=="], - - "is-set": ["is-set@2.0.3", "", {}, "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg=="], - - "is-shared-array-buffer": ["is-shared-array-buffer@1.0.4", "", { "dependencies": { "call-bound": "^1.0.3" } }, "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A=="], - - "is-string": ["is-string@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA=="], - - "is-symbol": ["is-symbol@1.1.1", "", { "dependencies": { "call-bound": "^1.0.2", "has-symbols": "^1.1.0", "safe-regex-test": "^1.1.0" } }, "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w=="], - - "is-typed-array": ["is-typed-array@1.1.15", "", { "dependencies": { "which-typed-array": "^1.1.16" } }, "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ=="], - - "is-weakmap": ["is-weakmap@2.0.2", "", {}, "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w=="], - - "is-weakref": ["is-weakref@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3" } }, "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew=="], - - "is-weakset": ["is-weakset@2.0.4", "", { "dependencies": { "call-bound": "^1.0.3", "get-intrinsic": "^1.2.6" } }, "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ=="], - - "isarray": ["isarray@2.0.5", "", {}, "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw=="], - - "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], - - "iterator.prototype": ["iterator.prototype@1.1.5", "", { "dependencies": { "define-data-property": "^1.1.4", "es-object-atoms": "^1.0.0", "get-intrinsic": "^1.2.6", "get-proto": "^1.0.0", "has-symbols": "^1.1.0", "set-function-name": "^2.0.2" } }, "sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g=="], - - "jiti": ["jiti@1.21.7", "", { "bin": { "jiti": "bin/jiti.js" } }, "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A=="], - - "js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="], - - "js-yaml": ["js-yaml@4.1.1", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA=="], - - "jsdoc-type-pratt-parser": ["jsdoc-type-pratt-parser@4.8.0", "", {}, "sha512-iZ8Bdb84lWRuGHamRXFyML07r21pcwBrLkHEuHgEY5UbCouBwv7ECknDRKzsQIXMiqpPymqtIf8TC/shYKB5rw=="], - - "jsdom": ["jsdom@27.3.0", "", { "dependencies": { "@acemir/cssom": "^0.9.28", "@asamuzakjp/dom-selector": "^6.7.6", "cssstyle": "^5.3.4", "data-urls": "^6.0.0", "decimal.js": "^10.6.0", "html-encoding-sniffer": "^4.0.0", "http-proxy-agent": "^7.0.2", "https-proxy-agent": "^7.0.6", "is-potential-custom-element-name": "^1.0.1", "parse5": "^8.0.0", "saxes": "^6.0.0", "symbol-tree": "^3.2.4", "tough-cookie": "^6.0.0", "w3c-xmlserializer": "^5.0.0", "webidl-conversions": "^8.0.0", "whatwg-encoding": "^3.1.1", "whatwg-mimetype": "^4.0.0", "whatwg-url": "^15.1.0", "ws": "^8.18.3", "xml-name-validator": "^5.0.0" }, "peerDependencies": { "canvas": "^3.0.0" }, "optionalPeers": ["canvas"] }, "sha512-GtldT42B8+jefDUC4yUKAvsaOrH7PDHmZxZXNgF2xMmymjUbRYJvpAybZAKEmXDGTM0mCsz8duOa4vTm5AY2Kg=="], - - "jsesc": ["jsesc@3.1.0", "", { "bin": { "jsesc": "bin/jsesc" } }, "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA=="], - - "json-buffer": ["json-buffer@3.0.1", "", {}, "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ=="], - - "json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="], - - "json-stable-stringify-without-jsonify": ["json-stable-stringify-without-jsonify@1.0.1", "", {}, "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw=="], - - "json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="], - - "jsonc-eslint-parser": ["jsonc-eslint-parser@2.4.2", "", { "dependencies": { "acorn": "^8.5.0", "eslint-visitor-keys": "^3.0.0", "espree": "^9.0.0", "semver": "^7.3.5" } }, "sha512-1e4qoRgnn448pRuMvKGsFFymUCquZV0mpGgOyIKNgD3JVDTsVJyRBGH/Fm0tBb8WsWGgmB1mDe6/yJMQM37DUA=="], - - "jsx-ast-utils": ["jsx-ast-utils@3.3.5", "", { "dependencies": { "array-includes": "^3.1.6", "array.prototype.flat": "^1.3.1", "object.assign": "^4.1.4", "object.values": "^1.1.6" } }, "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ=="], - - "keyv": ["keyv@4.5.4", "", { "dependencies": { "json-buffer": "3.0.1" } }, "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw=="], - - "language-subtag-registry": ["language-subtag-registry@0.3.23", "", {}, "sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ=="], - - "language-tags": ["language-tags@1.0.9", "", { "dependencies": { "language-subtag-registry": "^0.3.20" } }, "sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA=="], - - "levn": ["levn@0.4.1", "", { "dependencies": { "prelude-ls": "^1.2.1", "type-check": "~0.4.0" } }, "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ=="], - - "lilconfig": ["lilconfig@3.1.3", "", {}, "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw=="], - - "lines-and-columns": ["lines-and-columns@1.2.4", "", {}, "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg=="], - - "local-pkg": ["local-pkg@1.1.2", "", { "dependencies": { "mlly": "^1.7.4", "pkg-types": "^2.3.0", "quansync": "^0.2.11" } }, "sha512-arhlxbFRmoQHl33a0Zkle/YWlmNwoyt6QNZEIJcqNbdrsix5Lvc4HyyI3EnwxTYlZYc32EbYrQ8SzEZ7dqgg9A=="], - - "locate-path": ["locate-path@6.0.0", "", { "dependencies": { "p-locate": "^5.0.0" } }, "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw=="], - - "lodash": ["lodash@4.17.21", "", {}, "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="], - - "lodash.merge": ["lodash.merge@4.6.2", "", {}, "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ=="], - - "longest-streak": ["longest-streak@3.1.0", "", {}, "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g=="], - - "loose-envify": ["loose-envify@1.4.0", "", { "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, "bin": { "loose-envify": "cli.js" } }, "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q=="], - - "lowlight": ["lowlight@1.20.0", "", { "dependencies": { "fault": "^1.0.0", "highlight.js": "~10.7.0" } }, "sha512-8Ktj+prEb1RoCPkEOrPMYUN/nCggB7qAWe3a7OpMjWQkh3l2RD5wKRQ+o8Q8YuI9RG/xs95waaI/E6ym/7NsTw=="], - - "lru-cache": ["lru-cache@11.2.4", "", {}, "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg=="], - - "lucide-react": ["lucide-react@0.561.0", "", { "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-Y59gMY38tl4/i0qewcqohPdEbieBy7SovpBL9IFebhc2mDd8x4PZSOsiFRkpPcOq6bj1r/mjH/Rk73gSlIJP2A=="], - - "magic-string": ["magic-string@0.30.21", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.5" } }, "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ=="], - - "markdown-table": ["markdown-table@3.0.4", "", {}, "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw=="], - - "math-intrinsics": ["math-intrinsics@1.1.0", "", {}, "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g=="], - - "mdast-util-find-and-replace": ["mdast-util-find-and-replace@3.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "escape-string-regexp": "^5.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg=="], - - "mdast-util-from-markdown": ["mdast-util-from-markdown@2.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "mdast-util-to-string": "^4.0.0", "micromark": "^4.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA=="], - - "mdast-util-frontmatter": ["mdast-util-frontmatter@2.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "escape-string-regexp": "^5.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "micromark-extension-frontmatter": "^2.0.0" } }, "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA=="], - - "mdast-util-gfm": ["mdast-util-gfm@3.1.0", "", { "dependencies": { "mdast-util-from-markdown": "^2.0.0", "mdast-util-gfm-autolink-literal": "^2.0.0", "mdast-util-gfm-footnote": "^2.0.0", "mdast-util-gfm-strikethrough": "^2.0.0", "mdast-util-gfm-table": "^2.0.0", "mdast-util-gfm-task-list-item": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ=="], - - "mdast-util-gfm-autolink-literal": ["mdast-util-gfm-autolink-literal@2.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "ccount": "^2.0.0", "devlop": "^1.0.0", "mdast-util-find-and-replace": "^3.0.0", "micromark-util-character": "^2.0.0" } }, "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ=="], - - "mdast-util-gfm-footnote": ["mdast-util-gfm-footnote@2.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0" } }, "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ=="], - - "mdast-util-gfm-strikethrough": ["mdast-util-gfm-strikethrough@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg=="], - - "mdast-util-gfm-table": ["mdast-util-gfm-table@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "markdown-table": "^3.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg=="], - - "mdast-util-gfm-task-list-item": ["mdast-util-gfm-task-list-item@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ=="], - - "mdast-util-phrasing": ["mdast-util-phrasing@4.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "unist-util-is": "^6.0.0" } }, "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w=="], - - "mdast-util-to-markdown": ["mdast-util-to-markdown@2.1.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "longest-streak": "^3.0.0", "mdast-util-phrasing": "^4.0.0", "mdast-util-to-string": "^4.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "unist-util-visit": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA=="], - - "mdast-util-to-string": ["mdast-util-to-string@4.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0" } }, "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg=="], - - "mdn-data": ["mdn-data@2.12.2", "", {}, "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA=="], - - "merge2": ["merge2@1.4.1", "", {}, "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="], - - "micromark": ["micromark@4.0.2", "", { "dependencies": { "@types/debug": "^4.0.0", "debug": "^4.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA=="], - - "micromark-core-commonmark": ["micromark-core-commonmark@2.0.3", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-destination": "^2.0.0", "micromark-factory-label": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-factory-title": "^2.0.0", "micromark-factory-whitespace": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-html-tag-name": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg=="], - - "micromark-extension-frontmatter": ["micromark-extension-frontmatter@2.0.0", "", { "dependencies": { "fault": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg=="], - - "micromark-extension-gfm": ["micromark-extension-gfm@3.0.0", "", { "dependencies": { "micromark-extension-gfm-autolink-literal": "^2.0.0", "micromark-extension-gfm-footnote": "^2.0.0", "micromark-extension-gfm-strikethrough": "^2.0.0", "micromark-extension-gfm-table": "^2.0.0", "micromark-extension-gfm-tagfilter": "^2.0.0", "micromark-extension-gfm-task-list-item": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w=="], - - "micromark-extension-gfm-autolink-literal": ["micromark-extension-gfm-autolink-literal@2.1.0", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw=="], - - "micromark-extension-gfm-footnote": ["micromark-extension-gfm-footnote@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw=="], - - "micromark-extension-gfm-strikethrough": ["micromark-extension-gfm-strikethrough@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw=="], - - "micromark-extension-gfm-table": ["micromark-extension-gfm-table@2.1.1", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg=="], - - "micromark-extension-gfm-tagfilter": ["micromark-extension-gfm-tagfilter@2.0.0", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg=="], - - "micromark-extension-gfm-task-list-item": ["micromark-extension-gfm-task-list-item@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw=="], - - "micromark-factory-destination": ["micromark-factory-destination@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA=="], - - "micromark-factory-label": ["micromark-factory-label@2.0.1", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg=="], - - "micromark-factory-space": ["micromark-factory-space@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg=="], - - "micromark-factory-title": ["micromark-factory-title@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw=="], - - "micromark-factory-whitespace": ["micromark-factory-whitespace@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ=="], - - "micromark-util-character": ["micromark-util-character@2.1.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q=="], - - "micromark-util-chunked": ["micromark-util-chunked@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA=="], - - "micromark-util-classify-character": ["micromark-util-classify-character@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q=="], - - "micromark-util-combine-extensions": ["micromark-util-combine-extensions@2.0.1", "", { "dependencies": { "micromark-util-chunked": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg=="], - - "micromark-util-decode-numeric-character-reference": ["micromark-util-decode-numeric-character-reference@2.0.2", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw=="], - - "micromark-util-decode-string": ["micromark-util-decode-string@2.0.1", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ=="], - - "micromark-util-encode": ["micromark-util-encode@2.0.1", "", {}, "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw=="], - - "micromark-util-html-tag-name": ["micromark-util-html-tag-name@2.0.1", "", {}, "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA=="], - - "micromark-util-normalize-identifier": ["micromark-util-normalize-identifier@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q=="], - - "micromark-util-resolve-all": ["micromark-util-resolve-all@2.0.1", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg=="], - - "micromark-util-sanitize-uri": ["micromark-util-sanitize-uri@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ=="], - - "micromark-util-subtokenize": ["micromark-util-subtokenize@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA=="], - - "micromark-util-symbol": ["micromark-util-symbol@2.0.1", "", {}, "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q=="], - - "micromark-util-types": ["micromark-util-types@2.0.2", "", {}, "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA=="], - - "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], - - "mini-svg-data-uri": ["mini-svg-data-uri@1.4.4", "", { "bin": { "mini-svg-data-uri": "cli.js" } }, "sha512-r9deDe9p5FJUPZAk3A59wGH7Ii9YrjjWw0jmw/liSbHl2CHiyXj6FcDXDu2K3TjVAXqiJdaw3xxwlZZr9E6nHg=="], - - "minimatch": ["minimatch@3.1.2", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw=="], - - "minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], - - "mlly": ["mlly@1.8.0", "", { "dependencies": { "acorn": "^8.15.0", "pathe": "^2.0.3", "pkg-types": "^1.3.1", "ufo": "^1.6.1" } }, "sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g=="], - - "motion": ["motion@12.23.26", "", { "dependencies": { "framer-motion": "^12.23.26", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-Ll8XhVxY8LXMVYTCfme27WH2GjBrCIzY4+ndr5QKxsK+YwCtOi2B/oBi5jcIbik5doXuWT/4KKDOVAZJkeY5VQ=="], - - "motion-dom": ["motion-dom@12.23.23", "", { "dependencies": { "motion-utils": "^12.23.6" } }, "sha512-n5yolOs0TQQBRUFImrRfs/+6X4p3Q4n1dUEqt/H58Vx7OW6RF+foWEgmTVDhIWJIMXOuNNL0apKH2S16en9eiA=="], - - "motion-utils": ["motion-utils@12.23.6", "", {}, "sha512-eAWoPgr4eFEOFfg2WjIsMoqJTW6Z8MTUCgn/GZ3VRpClWBdnbjryiA3ZSNLyxCTmCQx4RmYX6jX1iWHbenUPNQ=="], - - "ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], - - "mz": ["mz@2.7.0", "", { "dependencies": { "any-promise": "^1.0.0", "object-assign": "^4.0.1", "thenify-all": "^1.0.0" } }, "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q=="], - - "nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="], - - "napi-postinstall": ["napi-postinstall@0.3.4", "", { "bin": { "napi-postinstall": "lib/cli.js" } }, "sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ=="], - - "natural-compare": ["natural-compare@1.4.0", "", {}, "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw=="], - - "natural-orderby": ["natural-orderby@5.0.0", "", {}, "sha512-kKHJhxwpR/Okycz4HhQKKlhWe4ASEfPgkSWNmKFHd7+ezuQlxkA5cM3+XkBPvm1gmHen3w53qsYAv+8GwRrBlg=="], - - "next": ["next@15.5.8", "", { "dependencies": { "@next/env": "15.5.8", "@swc/helpers": "0.5.15", "caniuse-lite": "^1.0.30001579", "postcss": "8.4.31", "styled-jsx": "5.1.6" }, "optionalDependencies": { "@next/swc-darwin-arm64": "15.5.7", "@next/swc-darwin-x64": "15.5.7", "@next/swc-linux-arm64-gnu": "15.5.7", "@next/swc-linux-arm64-musl": "15.5.7", "@next/swc-linux-x64-gnu": "15.5.7", "@next/swc-linux-x64-musl": "15.5.7", "@next/swc-win32-arm64-msvc": "15.5.7", "@next/swc-win32-x64-msvc": "15.5.7", "sharp": "^0.34.3" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", "@playwright/test": "^1.51.1", "babel-plugin-react-compiler": "*", "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", "sass": "^1.3.0" }, "optionalPeers": ["@opentelemetry/api", "@playwright/test", "babel-plugin-react-compiler", "sass"], "bin": { "next": "dist/bin/next" } }, "sha512-Tma2R50eiM7Fx6fbDeHiThq7sPgl06mBr76j6Ga0lMFGrmaLitFsy31kykgb8Z++DR2uIEKi2RZ0iyjIwFd15Q=="], - - "next-themes": ["next-themes@0.4.6", "", { "peerDependencies": { "react": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc", "react-dom": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc" } }, "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA=="], - - "node-releases": ["node-releases@2.0.27", "", {}, "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA=="], - - "normalize-path": ["normalize-path@3.0.0", "", {}, "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA=="], - - "nth-check": ["nth-check@2.1.1", "", { "dependencies": { "boolbase": "^1.0.0" } }, "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w=="], - - "nuqs": ["nuqs@2.8.5", "", { "dependencies": { "@standard-schema/spec": "1.0.0" }, "peerDependencies": { "@remix-run/react": ">=2", "@tanstack/react-router": "^1", "next": ">=14.2.0", "react": ">=18.2.0 || ^19.0.0-0", "react-router": "^5 || ^6 || ^7", "react-router-dom": "^5 || ^6 || ^7" }, "optionalPeers": ["@remix-run/react", "@tanstack/react-router", "next", "react-router", "react-router-dom"] }, "sha512-ndhnNB9eLX/bsiGFkBNsrfOWf3BCbzBMD+b5GkD5o2Q96Q+llHnoUlZsrO3tgJKZZV7LLlVCvFKdj+sjBITRzg=="], - - "object-assign": ["object-assign@4.1.1", "", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="], - - "object-deep-merge": ["object-deep-merge@2.0.0", "", {}, "sha512-3DC3UMpeffLTHiuXSy/UG4NOIYTLlY9u3V82+djSCLYClWobZiS4ivYzpIUWrRY/nfsJ8cWsKyG3QfyLePmhvg=="], - - "object-hash": ["object-hash@3.0.0", "", {}, "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw=="], - - "object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="], - - "object-keys": ["object-keys@1.1.1", "", {}, "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA=="], - - "object.assign": ["object.assign@4.1.7", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0", "has-symbols": "^1.1.0", "object-keys": "^1.1.1" } }, "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw=="], - - "object.entries": ["object.entries@1.1.9", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.4", "define-properties": "^1.2.1", "es-object-atoms": "^1.1.1" } }, "sha512-8u/hfXFRBD1O0hPUjioLhoWFHRmt6tKA4/vZPyckBr18l1KE9uHrFaFaUi8MDRTpi4uak2goyPTSNJLXX2k2Hw=="], - - "object.fromentries": ["object.fromentries@2.0.8", "", { "dependencies": { "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-abstract": "^1.23.2", "es-object-atoms": "^1.0.0" } }, "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ=="], - - "object.groupby": ["object.groupby@1.0.3", "", { "dependencies": { "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-abstract": "^1.23.2" } }, "sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ=="], - - "object.values": ["object.values@1.2.1", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" } }, "sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA=="], - - "optionator": ["optionator@0.9.4", "", { "dependencies": { "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", "levn": "^0.4.1", "prelude-ls": "^1.2.1", "type-check": "^0.4.0", "word-wrap": "^1.2.5" } }, "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g=="], - - "own-keys": ["own-keys@1.0.1", "", { "dependencies": { "get-intrinsic": "^1.2.6", "object-keys": "^1.1.1", "safe-push-apply": "^1.0.0" } }, "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg=="], - - "p-limit": ["p-limit@3.1.0", "", { "dependencies": { "yocto-queue": "^0.1.0" } }, "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ=="], - - "p-locate": ["p-locate@5.0.0", "", { "dependencies": { "p-limit": "^3.0.2" } }, "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw=="], - - "package-manager-detector": ["package-manager-detector@1.6.0", "", {}, "sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA=="], - - "parent-module": ["parent-module@1.0.1", "", { "dependencies": { "callsites": "^3.0.0" } }, "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g=="], - - "parse-entities": ["parse-entities@4.0.2", "", { "dependencies": { "@types/unist": "^2.0.0", "character-entities-legacy": "^3.0.0", "character-reference-invalid": "^2.0.0", "decode-named-character-reference": "^1.0.0", "is-alphanumerical": "^2.0.0", "is-decimal": "^2.0.0", "is-hexadecimal": "^2.0.0" } }, "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw=="], - - "parse-gitignore": ["parse-gitignore@2.0.0", "", {}, "sha512-RmVuCHWsfu0QPNW+mraxh/xjQVw/lhUCUru8Zni3Ctq3AoMhpDTq0OVdKS6iesd6Kqb7viCV3isAL43dciOSog=="], - - "parse-imports-exports": ["parse-imports-exports@0.2.4", "", { "dependencies": { "parse-statements": "1.0.11" } }, "sha512-4s6vd6dx1AotCx/RCI2m7t7GCh5bDRUtGNvRfHSP2wbBQdMi67pPe7mtzmgwcaQ8VKK/6IB7Glfyu3qdZJPybQ=="], - - "parse-statements": ["parse-statements@1.0.11", "", {}, "sha512-HlsyYdMBnbPQ9Jr/VgJ1YF4scnldvJpJxCVx6KgqPL4dxppsWrJHCIIxQXMJrqGnsRkNPATbeMJ8Yxu7JMsYcA=="], - - "parse5": ["parse5@8.0.0", "", { "dependencies": { "entities": "^6.0.0" } }, "sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA=="], - - "path-exists": ["path-exists@4.0.0", "", {}, "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="], - - "path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="], - - "path-parse": ["path-parse@1.0.7", "", {}, "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="], - - "pathe": ["pathe@2.0.3", "", {}, "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w=="], - - "picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="], - - "picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="], - - "pify": ["pify@2.3.0", "", {}, "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog=="], - - "pirates": ["pirates@4.0.7", "", {}, "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA=="], - - "pkg-types": ["pkg-types@2.3.0", "", { "dependencies": { "confbox": "^0.2.2", "exsolve": "^1.0.7", "pathe": "^2.0.3" } }, "sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig=="], - - "pluralize": ["pluralize@8.0.0", "", {}, "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA=="], - - "pnpm-workspace-yaml": ["pnpm-workspace-yaml@1.4.3", "", { "dependencies": { "yaml": "^2.8.2" } }, "sha512-Q8B3SWuuISy/Ciag4DFP7MCrJX07wfaekcqD2o/msdIj4x8Ql3bZ/NEKOXV7mTVh7m1YdiFWiMi9xH+0zuEGHw=="], - - "possible-typed-array-names": ["possible-typed-array-names@1.1.0", "", {}, "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg=="], - - "postcss": ["postcss@8.5.6", "", { "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg=="], - - "postcss-import": ["postcss-import@15.1.0", "", { "dependencies": { "postcss-value-parser": "^4.0.0", "read-cache": "^1.0.0", "resolve": "^1.1.7" }, "peerDependencies": { "postcss": "^8.0.0" } }, "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew=="], - - "postcss-js": ["postcss-js@4.1.0", "", { "dependencies": { "camelcase-css": "^2.0.1" }, "peerDependencies": { "postcss": "^8.4.21" } }, "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw=="], - - "postcss-load-config": ["postcss-load-config@6.0.1", "", { "dependencies": { "lilconfig": "^3.1.1" }, "peerDependencies": { "jiti": ">=1.21.0", "postcss": ">=8.0.9", "tsx": "^4.8.1", "yaml": "^2.4.2" }, "optionalPeers": ["jiti", "postcss", "tsx", "yaml"] }, "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g=="], - - "postcss-nested": ["postcss-nested@6.2.0", "", { "dependencies": { "postcss-selector-parser": "^6.1.1" }, "peerDependencies": { "postcss": "^8.2.14" } }, "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ=="], - - "postcss-selector-parser": ["postcss-selector-parser@6.1.2", "", { "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" } }, "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg=="], - - "postcss-value-parser": ["postcss-value-parser@4.2.0", "", {}, "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="], - - "prelude-ls": ["prelude-ls@1.2.1", "", {}, "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g=="], - - "prettier": ["prettier@3.7.4", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA=="], - - "prettier-linter-helpers": ["prettier-linter-helpers@1.0.0", "", { "dependencies": { "fast-diff": "^1.1.2" } }, "sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w=="], - - "prismjs": ["prismjs@1.30.0", "", {}, "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw=="], - - "prop-types": ["prop-types@15.8.1", "", { "dependencies": { "loose-envify": "^1.4.0", "object-assign": "^4.1.1", "react-is": "^16.13.1" } }, "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg=="], - - "property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], - - "punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="], - - "quansync": ["quansync@0.2.11", "", {}, "sha512-AifT7QEbW9Nri4tAwR5M/uzpBuqfZf+zwaEM/QkzEjj7NBuFD2rBuy0K3dE+8wltbezDV7JMA0WfnCPYRSYbXA=="], - - "queue-microtask": ["queue-microtask@1.2.3", "", {}, "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="], - - "react": ["react@19.2.3", "", {}, "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA=="], - - "react-chartjs-2": ["react-chartjs-2@5.3.1", "", { "peerDependencies": { "chart.js": "^4.1.1", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-h5IPXKg9EXpjoBzUfyWJvllMjG2mQ4EiuHQFhms/AjUm0XSZHhyRy2xVmLXHKrtcdrPO4mnGqRtYoD0vp95A0A=="], - - "react-code-blocks": ["react-code-blocks@0.1.6", "", { "dependencies": { "@babel/runtime": "^7.10.4", "react-syntax-highlighter": "^15.5.0", "styled-components": "^6.1.0", "tslib": "^2.6.0" }, "peerDependencies": { "react": ">=16" } }, "sha512-ENNuxG07yO+OuX1ChRje3ieefPRz6yrIpHmebQlaFQgzcAHbUfVeTINpOpoI9bSRSObeYo/OdHsporeToZ7fcg=="], - - "react-datepicker": ["react-datepicker@9.0.0", "", { "dependencies": { "@floating-ui/react": "^0.27.15", "clsx": "^2.1.1", "date-fns": "^4.1.0" }, "peerDependencies": { "date-fns-tz": "^3.0.0", "react": "^16.9.0 || ^17 || ^18 || ^19 || ^19.0.0-rc", "react-dom": "^16.9.0 || ^17 || ^18 || ^19 || ^19.0.0-rc" }, "optionalPeers": ["date-fns-tz"] }, "sha512-LGzKgBk5NUEcXUeSEJY2ICCCmPusm9UGRkNKRXCPgpUzoMx2CCsa0mOHTyv+itQeW7tR/jgGEpq5q/afZjGEFg=="], - - "react-day-picker": ["react-day-picker@9.12.0", "", { "dependencies": { "@date-fns/tz": "^1.4.1", "date-fns": "^4.1.0", "date-fns-jalali": "^4.1.0-0" }, "peerDependencies": { "react": ">=16.8.0" } }, "sha512-t8OvG/Zrciso5CQJu5b1A7yzEmebvST+S3pOVQJWxwjjVngyG/CA2htN/D15dLI4uTEuLLkbZyS4YYt480FAtA=="], - - "react-dom": ["react-dom@19.2.3", "", { "dependencies": { "scheduler": "^0.27.0" }, "peerDependencies": { "react": "^19.2.3" } }, "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg=="], - - "react-icons": ["react-icons@5.5.0", "", { "peerDependencies": { "react": "*" } }, "sha512-MEFcXdkP3dLo8uumGI5xN3lDFNsRtrjbOEKDLD7yv76v4wpnEq2Lt2qeHaQOr34I/wPN3s3+N08WkQ+CW37Xiw=="], - - "react-is": ["react-is@18.3.1", "", {}, "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg=="], - - "react-redux": ["react-redux@9.2.0", "", { "dependencies": { "@types/use-sync-external-store": "^0.0.6", "use-sync-external-store": "^1.4.0" }, "peerDependencies": { "@types/react": "^18.2.25 || ^19", "react": "^18.0 || ^19", "redux": "^5.0.0" }, "optionalPeers": ["@types/react", "redux"] }, "sha512-ROY9fvHhwOD9ySfrF0wmvu//bKCQ6AeZZq1nJNtbDC+kk5DuSuNX/n6YWYF/SYy7bSba4D4FSz8DJeKY/S/r+g=="], - - "react-refresh": ["react-refresh@0.18.0", "", {}, "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw=="], - - "react-remove-scroll": ["react-remove-scroll@2.7.2", "", { "dependencies": { "react-remove-scroll-bar": "^2.3.7", "react-style-singleton": "^2.2.3", "tslib": "^2.1.0", "use-callback-ref": "^1.3.3", "use-sidecar": "^1.1.3" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Iqb9NjCCTt6Hf+vOdNIZGdTiH1QSqr27H/Ek9sv/a97gfueI/5h1s3yRi1nngzMUaOOToin5dI1dXKdXiF+u0Q=="], - - "react-remove-scroll-bar": ["react-remove-scroll-bar@2.3.8", "", { "dependencies": { "react-style-singleton": "^2.2.2", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" }, "optionalPeers": ["@types/react"] }, "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q=="], - - "react-style-singleton": ["react-style-singleton@2.2.3", "", { "dependencies": { "get-nonce": "^1.0.0", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ=="], - - "react-syntax-highlighter": ["react-syntax-highlighter@16.1.0", "", { "dependencies": { "@babel/runtime": "^7.28.4", "highlight.js": "^10.4.1", "highlightjs-vue": "^1.0.0", "lowlight": "^1.17.0", "prismjs": "^1.30.0", "refractor": "^5.0.0" }, "peerDependencies": { "react": ">= 0.14.0" } }, "sha512-E40/hBiP5rCNwkeBN1vRP+xow1X0pndinO+z3h7HLsHyjztbyjfzNWNKuAsJj+7DLam9iT4AaaOZnueCU+Nplg=="], - - "react-use-measure": ["react-use-measure@2.1.7", "", { "peerDependencies": { "react": ">=16.13", "react-dom": ">=16.13" }, "optionalPeers": ["react-dom"] }, "sha512-KrvcAo13I/60HpwGO5jpW7E9DfusKyLPLvuHlUyP5zqnmAPhNc6qTRjUQrdTADl0lpPpDVU2/Gg51UlOGHXbdg=="], - - "read-cache": ["read-cache@1.0.0", "", { "dependencies": { "pify": "^2.3.0" } }, "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA=="], - - "readdirp": ["readdirp@3.6.0", "", { "dependencies": { "picomatch": "^2.2.1" } }, "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA=="], - - "recharts": ["recharts@3.6.0", "", { "dependencies": { "@reduxjs/toolkit": "1.x.x || 2.x.x", "clsx": "^2.1.1", "decimal.js-light": "^2.5.1", "es-toolkit": "^1.39.3", "eventemitter3": "^5.0.1", "immer": "^10.1.1", "react-redux": "8.x.x || 9.x.x", "reselect": "5.1.1", "tiny-invariant": "^1.3.3", "use-sync-external-store": "^1.2.2", "victory-vendor": "^37.0.2" }, "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", "react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", "react-is": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-L5bjxvQRAe26RlToBAziKUB7whaGKEwD3znoM6fz3DrTowCIC/FnJYnuq1GEzB8Zv2kdTfaxQfi5GoH0tBinyg=="], - - "redux": ["redux@5.0.1", "", {}, "sha512-M9/ELqF6fy8FwmkpnF0S3YKOqMyoWJ4+CS5Efg2ct3oY9daQvd/Pc71FpGZsVsbl3Cpb+IIcjBDUnnyBdQbq4w=="], - - "redux-thunk": ["redux-thunk@3.1.0", "", { "peerDependencies": { "redux": "^5.0.0" } }, "sha512-NW2r5T6ksUKXCabzhL9z+h206HQw/NJkcLm1GPImRQ8IzfXwRGqjVhKJGauHirT0DAuyy6hjdnMZaRoAcy0Klw=="], - - "refa": ["refa@0.12.1", "", { "dependencies": { "@eslint-community/regexpp": "^4.8.0" } }, "sha512-J8rn6v4DBb2nnFqkqwy6/NnTYMcgLA+sLr0iIO41qpv0n+ngb7ksag2tMRl0inb1bbO/esUwzW1vbJi7K0sI0g=="], - - "reflect.getprototypeof": ["reflect.getprototypeof@1.0.10", "", { "dependencies": { "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-abstract": "^1.23.9", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0", "get-intrinsic": "^1.2.7", "get-proto": "^1.0.1", "which-builtin-type": "^1.2.1" } }, "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw=="], - - "refractor": ["refractor@5.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/prismjs": "^1.0.0", "hastscript": "^9.0.0", "parse-entities": "^4.0.0" } }, "sha512-QXOrHQF5jOpjjLfiNk5GFnWhRXvxjUVnlFxkeDmewR5sXkr3iM46Zo+CnRR8B+MDVqkULW4EcLVcRBNOPXHosw=="], - - "regexp-ast-analysis": ["regexp-ast-analysis@0.7.1", "", { "dependencies": { "@eslint-community/regexpp": "^4.8.0", "refa": "^0.12.1" } }, "sha512-sZuz1dYW/ZsfG17WSAG7eS85r5a0dDsvg+7BiiYR5o6lKCAtUrEwdmRmaGF6rwVj3LcmAeYkOWKEPlbPzN3Y3A=="], - - "regexp-tree": ["regexp-tree@0.1.27", "", { "bin": { "regexp-tree": "bin/regexp-tree" } }, "sha512-iETxpjK6YoRWJG5o6hXLwvjYAoW+FEZn9os0PD/b6AP6xQwsa/Y7lCVgIixBbUPMfhu+i2LtdeAqVTgGlQarfA=="], - - "regexp.prototype.flags": ["regexp.prototype.flags@1.5.4", "", { "dependencies": { "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-errors": "^1.3.0", "get-proto": "^1.0.1", "gopd": "^1.2.0", "set-function-name": "^2.0.2" } }, "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA=="], - - "regjsparser": ["regjsparser@0.13.0", "", { "dependencies": { "jsesc": "~3.1.0" }, "bin": { "regjsparser": "bin/parser" } }, "sha512-NZQZdC5wOE/H3UT28fVGL+ikOZcEzfMGk/c3iN9UGxzWHMa1op7274oyiUVrAG4B2EuFhus8SvkaYnhvW92p9Q=="], - - "require-from-string": ["require-from-string@2.0.2", "", {}, "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw=="], - - "reselect": ["reselect@5.1.1", "", {}, "sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w=="], - - "reserved-identifiers": ["reserved-identifiers@1.2.0", "", {}, "sha512-yE7KUfFvaBFzGPs5H3Ops1RevfUEsDc5Iz65rOwWg4lE8HJSYtle77uul3+573457oHvBKuHYDl/xqUkKpEEdw=="], - - "resolve": ["resolve@1.22.11", "", { "dependencies": { "is-core-module": "^2.16.1", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, "bin": { "resolve": "bin/resolve" } }, "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ=="], - - "resolve-from": ["resolve-from@4.0.0", "", {}, "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g=="], - - "resolve-pkg-maps": ["resolve-pkg-maps@1.0.0", "", {}, "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw=="], - - "reusify": ["reusify@1.1.0", "", {}, "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw=="], - - "rollup": ["rollup@4.53.3", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.53.3", "@rollup/rollup-android-arm64": "4.53.3", "@rollup/rollup-darwin-arm64": "4.53.3", "@rollup/rollup-darwin-x64": "4.53.3", "@rollup/rollup-freebsd-arm64": "4.53.3", "@rollup/rollup-freebsd-x64": "4.53.3", "@rollup/rollup-linux-arm-gnueabihf": "4.53.3", "@rollup/rollup-linux-arm-musleabihf": "4.53.3", "@rollup/rollup-linux-arm64-gnu": "4.53.3", "@rollup/rollup-linux-arm64-musl": "4.53.3", "@rollup/rollup-linux-loong64-gnu": "4.53.3", "@rollup/rollup-linux-ppc64-gnu": "4.53.3", "@rollup/rollup-linux-riscv64-gnu": "4.53.3", "@rollup/rollup-linux-riscv64-musl": "4.53.3", "@rollup/rollup-linux-s390x-gnu": "4.53.3", "@rollup/rollup-linux-x64-gnu": "4.53.3", "@rollup/rollup-linux-x64-musl": "4.53.3", "@rollup/rollup-openharmony-arm64": "4.53.3", "@rollup/rollup-win32-arm64-msvc": "4.53.3", "@rollup/rollup-win32-ia32-msvc": "4.53.3", "@rollup/rollup-win32-x64-gnu": "4.53.3", "@rollup/rollup-win32-x64-msvc": "4.53.3", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA=="], - - "run-parallel": ["run-parallel@1.2.0", "", { "dependencies": { "queue-microtask": "^1.2.2" } }, "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA=="], - - "safe-array-concat": ["safe-array-concat@1.1.3", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.2", "get-intrinsic": "^1.2.6", "has-symbols": "^1.1.0", "isarray": "^2.0.5" } }, "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q=="], - - "safe-push-apply": ["safe-push-apply@1.0.0", "", { "dependencies": { "es-errors": "^1.3.0", "isarray": "^2.0.5" } }, "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA=="], - - "safe-regex-test": ["safe-regex-test@1.1.0", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "is-regex": "^1.2.1" } }, "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw=="], - - "safer-buffer": ["safer-buffer@2.1.2", "", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="], - - "saxes": ["saxes@6.0.0", "", { "dependencies": { "xmlchars": "^2.2.0" } }, "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA=="], - - "scheduler": ["scheduler@0.27.0", "", {}, "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q=="], - - "scslre": ["scslre@0.3.0", "", { "dependencies": { "@eslint-community/regexpp": "^4.8.0", "refa": "^0.12.0", "regexp-ast-analysis": "^0.7.0" } }, "sha512-3A6sD0WYP7+QrjbfNA2FN3FsOaGGFoekCVgTyypy53gPxhbkCIjtO6YWgdrfM+n/8sI8JeXZOIxsHjMTNxQ4nQ=="], - - "semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], - - "set-function-length": ["set-function-length@1.2.2", "", { "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", "function-bind": "^1.1.2", "get-intrinsic": "^1.2.4", "gopd": "^1.0.1", "has-property-descriptors": "^1.0.2" } }, "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg=="], - - "set-function-name": ["set-function-name@2.0.2", "", { "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", "functions-have-names": "^1.2.3", "has-property-descriptors": "^1.0.2" } }, "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ=="], - - "set-proto": ["set-proto@1.0.0", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0" } }, "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw=="], - - "shallowequal": ["shallowequal@1.1.0", "", {}, "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ=="], - - "sharp": ["sharp@0.34.5", "", { "dependencies": { "@img/colour": "^1.0.0", "detect-libc": "^2.1.2", "semver": "^7.7.3" }, "optionalDependencies": { "@img/sharp-darwin-arm64": "0.34.5", "@img/sharp-darwin-x64": "0.34.5", "@img/sharp-libvips-darwin-arm64": "1.2.4", "@img/sharp-libvips-darwin-x64": "1.2.4", "@img/sharp-libvips-linux-arm": "1.2.4", "@img/sharp-libvips-linux-arm64": "1.2.4", "@img/sharp-libvips-linux-ppc64": "1.2.4", "@img/sharp-libvips-linux-riscv64": "1.2.4", "@img/sharp-libvips-linux-s390x": "1.2.4", "@img/sharp-libvips-linux-x64": "1.2.4", "@img/sharp-libvips-linuxmusl-arm64": "1.2.4", "@img/sharp-libvips-linuxmusl-x64": "1.2.4", "@img/sharp-linux-arm": "0.34.5", "@img/sharp-linux-arm64": "0.34.5", "@img/sharp-linux-ppc64": "0.34.5", "@img/sharp-linux-riscv64": "0.34.5", "@img/sharp-linux-s390x": "0.34.5", "@img/sharp-linux-x64": "0.34.5", "@img/sharp-linuxmusl-arm64": "0.34.5", "@img/sharp-linuxmusl-x64": "0.34.5", "@img/sharp-wasm32": "0.34.5", "@img/sharp-win32-arm64": "0.34.5", "@img/sharp-win32-ia32": "0.34.5", "@img/sharp-win32-x64": "0.34.5" } }, "sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg=="], - - "shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="], - - "shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="], - - "side-channel": ["side-channel@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3", "side-channel-list": "^1.0.0", "side-channel-map": "^1.0.1", "side-channel-weakmap": "^1.0.2" } }, "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw=="], - - "side-channel-list": ["side-channel-list@1.0.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3" } }, "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA=="], - - "side-channel-map": ["side-channel-map@1.0.1", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3" } }, "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA=="], - - "side-channel-weakmap": ["side-channel-weakmap@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3", "side-channel-map": "^1.0.1" } }, "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A=="], - - "sisteransi": ["sisteransi@1.0.5", "", {}, "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg=="], - - "sonner": ["sonner@2.0.7", "", { "peerDependencies": { "react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc" } }, "sha512-W6ZN4p58k8aDKA4XPcx2hpIQXBRAgyiWVkYhT7CvK6D3iAu7xjvVyhQHg2/iaKJZ1XVJ4r7XuwGL+WGEK37i9w=="], - - "source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="], - - "space-separated-tokens": ["space-separated-tokens@2.0.2", "", {}, "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q=="], - - "spdx-exceptions": ["spdx-exceptions@2.5.0", "", {}, "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w=="], - - "spdx-expression-parse": ["spdx-expression-parse@4.0.0", "", { "dependencies": { "spdx-exceptions": "^2.1.0", "spdx-license-ids": "^3.0.0" } }, "sha512-Clya5JIij/7C6bRR22+tnGXbc4VKlibKSVj2iHvVeX5iMW7s1SIQlqu699JkODJJIhh/pUu8L0/VLh8xflD+LQ=="], - - "spdx-license-ids": ["spdx-license-ids@3.0.22", "", {}, "sha512-4PRT4nh1EImPbt2jASOKHX7PB7I+e4IWNLvkKFDxNhJlfjbYlleYQh285Z/3mPTHSAK/AvdMmw5BNNuYH8ShgQ=="], - - "stable-hash": ["stable-hash@0.0.5", "", {}, "sha512-+L3ccpzibovGXFK+Ap/f8LOS0ahMrHTf3xu7mMLSpEGU0EO9ucaysSylKo9eRDFNhWve/y275iPmIZ4z39a9iA=="], - - "stop-iteration-iterator": ["stop-iteration-iterator@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "internal-slot": "^1.1.0" } }, "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ=="], - - "string-ts": ["string-ts@2.3.1", "", {}, "sha512-xSJq+BS52SaFFAVxuStmx6n5aYZU571uYUnUrPXkPFCfdHyZMMlbP2v2Wx5sNBnAVzq/2+0+mcBLBa3Xa5ubYw=="], - - "string.prototype.includes": ["string.prototype.includes@2.0.1", "", { "dependencies": { "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-abstract": "^1.23.3" } }, "sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg=="], - - "string.prototype.matchall": ["string.prototype.matchall@4.0.12", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "es-abstract": "^1.23.6", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0", "get-intrinsic": "^1.2.6", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "internal-slot": "^1.1.0", "regexp.prototype.flags": "^1.5.3", "set-function-name": "^2.0.2", "side-channel": "^1.1.0" } }, "sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA=="], - - "string.prototype.repeat": ["string.prototype.repeat@1.0.0", "", { "dependencies": { "define-properties": "^1.1.3", "es-abstract": "^1.17.5" } }, "sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w=="], - - "string.prototype.trim": ["string.prototype.trim@1.2.10", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.2", "define-data-property": "^1.1.4", "define-properties": "^1.2.1", "es-abstract": "^1.23.5", "es-object-atoms": "^1.0.0", "has-property-descriptors": "^1.0.2" } }, "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA=="], - - "string.prototype.trimend": ["string.prototype.trimend@1.0.9", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.2", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" } }, "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ=="], - - "string.prototype.trimstart": ["string.prototype.trimstart@1.0.8", "", { "dependencies": { "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" } }, "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg=="], - - "strip-bom": ["strip-bom@3.0.0", "", {}, "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA=="], - - "strip-indent": ["strip-indent@4.1.1", "", {}, "sha512-SlyRoSkdh1dYP0PzclLE7r0M9sgbFKKMFXpFRUMNuKhQSbC6VQIGzq3E0qsfvGJaUFJPGv6Ws1NZ/haTAjfbMA=="], - - "strip-json-comments": ["strip-json-comments@3.1.1", "", {}, "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig=="], - - "styled-components": ["styled-components@6.1.19", "", { "dependencies": { "@emotion/is-prop-valid": "1.2.2", "@emotion/unitless": "0.8.1", "@types/stylis": "4.2.5", "css-to-react-native": "3.2.0", "csstype": "3.1.3", "postcss": "8.4.49", "shallowequal": "1.1.0", "stylis": "4.3.2", "tslib": "2.6.2" }, "peerDependencies": { "react": ">= 16.8.0", "react-dom": ">= 16.8.0" } }, "sha512-1v/e3Dl1BknC37cXMhwGomhO8AkYmN41CqyX9xhUDxry1ns3BFQy2lLDRQXJRdVVWB9OHemv/53xaStimvWyuA=="], - - "styled-jsx": ["styled-jsx@5.1.6", "", { "dependencies": { "client-only": "0.0.1" }, "peerDependencies": { "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0" } }, "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA=="], - - "stylis": ["stylis@4.3.2", "", {}, "sha512-bhtUjWd/z6ltJiQwg0dUfxEJ+W+jdqQd8TbWLWyeIJHlnsqmGLRFFd8e5mA0AZi/zx90smXRlN66YMTcaSFifg=="], - - "sucrase": ["sucrase@3.35.1", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.2", "commander": "^4.0.0", "lines-and-columns": "^1.1.6", "mz": "^2.7.0", "pirates": "^4.0.1", "tinyglobby": "^0.2.11", "ts-interface-checker": "^0.1.9" }, "bin": { "sucrase": "bin/sucrase", "sucrase-node": "bin/sucrase-node" } }, "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw=="], - - "supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - - "supports-preserve-symlinks-flag": ["supports-preserve-symlinks-flag@1.0.0", "", {}, "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w=="], - - "symbol-tree": ["symbol-tree@3.2.4", "", {}, "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw=="], - - "synckit": ["synckit@0.11.11", "", { "dependencies": { "@pkgr/core": "^0.2.9" } }, "sha512-MeQTA1r0litLUf0Rp/iisCaL8761lKAZHaimlbGK4j0HysC4PLfqygQj9srcs0m2RdtDYnF8UuYyKpbjHYp7Jw=="], - - "tabbable": ["tabbable@6.3.0", "", {}, "sha512-EIHvdY5bPLuWForiR/AN2Bxngzpuwn1is4asboytXtpTgsArc+WmSJKVLlhdh71u7jFcryDqB2A8lQvj78MkyQ=="], - - "tailwind-merge": ["tailwind-merge@3.4.0", "", {}, "sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g=="], - - "tailwindcss": ["tailwindcss@3.4.19", "", { "dependencies": { "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", "chokidar": "^3.6.0", "didyoumean": "^1.2.2", "dlv": "^1.1.3", "fast-glob": "^3.3.2", "glob-parent": "^6.0.2", "is-glob": "^4.0.3", "jiti": "^1.21.7", "lilconfig": "^3.1.3", "micromatch": "^4.0.8", "normalize-path": "^3.0.0", "object-hash": "^3.0.0", "picocolors": "^1.1.1", "postcss": "^8.4.47", "postcss-import": "^15.1.0", "postcss-js": "^4.0.1", "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", "postcss-nested": "^6.2.0", "postcss-selector-parser": "^6.1.2", "resolve": "^1.22.8", "sucrase": "^3.35.0" }, "bin": { "tailwind": "lib/cli.js", "tailwindcss": "lib/cli.js" } }, "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ=="], - - "tailwindcss-animate": ["tailwindcss-animate@1.0.7", "", { "peerDependencies": { "tailwindcss": ">=3.0.0 || insiders" } }, "sha512-bl6mpH3T7I3UFxuvDEXLxy/VuFxBk5bbzplh7tXI68mwMokNYd1t9qPBHlnyTwfa4JGC4zP516I1hYYtQ/vspA=="], - - "tailwindcss-animated": ["tailwindcss-animated@1.1.2", "", { "peerDependencies": { "tailwindcss": ">=3.1.0" } }, "sha512-SI4owS5ojserhgEYIZA/uFVdNjU2GMB2P3sjtjmFA52VxoUi+Hht6oR5+RdT+CxrX9cNNYEa+vbTWHvN9zbj3w=="], - - "tapable": ["tapable@2.3.0", "", {}, "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg=="], - - "thenify": ["thenify@3.3.1", "", { "dependencies": { "any-promise": "^1.0.0" } }, "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw=="], - - "thenify-all": ["thenify-all@1.6.0", "", { "dependencies": { "thenify": ">= 3.1.0 < 4" } }, "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA=="], - - "tiny-invariant": ["tiny-invariant@1.3.3", "", {}, "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg=="], - - "tinyexec": ["tinyexec@1.0.2", "", {}, "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg=="], - - "tinyglobby": ["tinyglobby@0.2.15", "", { "dependencies": { "fdir": "^6.5.0", "picomatch": "^4.0.3" } }, "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ=="], - - "tldts": ["tldts@7.0.19", "", { "dependencies": { "tldts-core": "^7.0.19" }, "bin": { "tldts": "bin/cli.js" } }, "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA=="], - - "tldts-core": ["tldts-core@7.0.19", "", {}, "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A=="], - - "to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="], - - "to-valid-identifier": ["to-valid-identifier@1.0.0", "", { "dependencies": { "@sindresorhus/base62": "^1.0.0", "reserved-identifiers": "^1.0.0" } }, "sha512-41wJyvKep3yT2tyPqX/4blcfybknGB4D+oETKLs7Q76UiPqRpUJK3hr1nxelyYO0PHKVzJwlu0aCeEAsGI6rpw=="], - - "toml-eslint-parser": ["toml-eslint-parser@0.10.1", "", { "dependencies": { "eslint-visitor-keys": "^3.0.0" } }, "sha512-9mjy3frhioGIVGcwamlVlUyJ9x+WHw/TXiz9R4YOlmsIuBN43r9Dp8HZ35SF9EKjHrn3BUZj04CF+YqZ2oJ+7w=="], - - "tough-cookie": ["tough-cookie@6.0.0", "", { "dependencies": { "tldts": "^7.0.5" } }, "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w=="], - - "tr46": ["tr46@6.0.0", "", { "dependencies": { "punycode": "^2.3.1" } }, "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw=="], - - "ts-api-utils": ["ts-api-utils@2.1.0", "", { "peerDependencies": { "typescript": ">=4.8.4" } }, "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ=="], - - "ts-declaration-location": ["ts-declaration-location@1.0.7", "", { "dependencies": { "picomatch": "^4.0.2" }, "peerDependencies": { "typescript": ">=4.0.0" } }, "sha512-EDyGAwH1gO0Ausm9gV6T2nUvBgXT5kGoCMJPllOaooZ+4VvJiKBdZE7wK18N1deEowhcUptS+5GXZK8U/fvpwA=="], - - "ts-interface-checker": ["ts-interface-checker@0.1.13", "", {}, "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA=="], - - "ts-pattern": ["ts-pattern@5.9.0", "", {}, "sha512-6s5V71mX8qBUmlgbrfL33xDUwO0fq48rxAu2LBE11WBeGdpCPOsXksQbZJHvHwhrd3QjUusd3mAOM5Gg0mFBLg=="], - - "tsconfig-paths": ["tsconfig-paths@3.15.0", "", { "dependencies": { "@types/json5": "^0.0.29", "json5": "^1.0.2", "minimist": "^1.2.6", "strip-bom": "^3.0.0" } }, "sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg=="], - - "tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], - - "type-check": ["type-check@0.4.0", "", { "dependencies": { "prelude-ls": "^1.2.1" } }, "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew=="], - - "typed-array-buffer": ["typed-array-buffer@1.0.3", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-typed-array": "^1.1.14" } }, "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw=="], - - "typed-array-byte-length": ["typed-array-byte-length@1.0.3", "", { "dependencies": { "call-bind": "^1.0.8", "for-each": "^0.3.3", "gopd": "^1.2.0", "has-proto": "^1.2.0", "is-typed-array": "^1.1.14" } }, "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg=="], - - "typed-array-byte-offset": ["typed-array-byte-offset@1.0.4", "", { "dependencies": { "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", "for-each": "^0.3.3", "gopd": "^1.2.0", "has-proto": "^1.2.0", "is-typed-array": "^1.1.15", "reflect.getprototypeof": "^1.0.9" } }, "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ=="], - - "typed-array-length": ["typed-array-length@1.0.7", "", { "dependencies": { "call-bind": "^1.0.7", "for-each": "^0.3.3", "gopd": "^1.0.1", "is-typed-array": "^1.1.13", "possible-typed-array-names": "^1.0.0", "reflect.getprototypeof": "^1.0.6" } }, "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg=="], - - "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], - - "ufo": ["ufo@1.6.1", "", {}, "sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA=="], - - "unbox-primitive": ["unbox-primitive@1.1.0", "", { "dependencies": { "call-bound": "^1.0.3", "has-bigints": "^1.0.2", "has-symbols": "^1.1.0", "which-boxed-primitive": "^1.1.1" } }, "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw=="], - - "undici-types": ["undici-types@7.16.0", "", {}, "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw=="], - - "unist-util-is": ["unist-util-is@6.0.1", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g=="], - - "unist-util-stringify-position": ["unist-util-stringify-position@4.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ=="], - - "unist-util-visit": ["unist-util-visit@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg=="], - - "unist-util-visit-parents": ["unist-util-visit-parents@6.0.2", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" } }, "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ=="], - - "unrs-resolver": ["unrs-resolver@1.11.1", "", { "dependencies": { "napi-postinstall": "^0.3.0" }, "optionalDependencies": { "@unrs/resolver-binding-android-arm-eabi": "1.11.1", "@unrs/resolver-binding-android-arm64": "1.11.1", "@unrs/resolver-binding-darwin-arm64": "1.11.1", "@unrs/resolver-binding-darwin-x64": "1.11.1", "@unrs/resolver-binding-freebsd-x64": "1.11.1", "@unrs/resolver-binding-linux-arm-gnueabihf": "1.11.1", "@unrs/resolver-binding-linux-arm-musleabihf": "1.11.1", "@unrs/resolver-binding-linux-arm64-gnu": "1.11.1", "@unrs/resolver-binding-linux-arm64-musl": "1.11.1", "@unrs/resolver-binding-linux-ppc64-gnu": "1.11.1", "@unrs/resolver-binding-linux-riscv64-gnu": "1.11.1", "@unrs/resolver-binding-linux-riscv64-musl": "1.11.1", "@unrs/resolver-binding-linux-s390x-gnu": "1.11.1", "@unrs/resolver-binding-linux-x64-gnu": "1.11.1", "@unrs/resolver-binding-linux-x64-musl": "1.11.1", "@unrs/resolver-binding-wasm32-wasi": "1.11.1", "@unrs/resolver-binding-win32-arm64-msvc": "1.11.1", "@unrs/resolver-binding-win32-ia32-msvc": "1.11.1", "@unrs/resolver-binding-win32-x64-msvc": "1.11.1" } }, "sha512-bSjt9pjaEBnNiGgc9rUiHGKv5l4/TGzDmYw3RhnkJGtLhbnnA/5qJj7x3dNDCRx/PJxu774LlH8lCOlB4hEfKg=="], - - "update-browserslist-db": ["update-browserslist-db@1.2.2", "", { "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" }, "peerDependencies": { "browserslist": ">= 4.21.0" }, "bin": { "update-browserslist-db": "cli.js" } }, "sha512-E85pfNzMQ9jpKkA7+TJAi4TJN+tBCuWh5rUcS/sv6cFi+1q9LYDwDI5dpUL0u/73EElyQ8d3TEaeW4sPedBqYA=="], - - "uri-js": ["uri-js@4.4.1", "", { "dependencies": { "punycode": "^2.1.0" } }, "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg=="], - - "use-callback-ref": ["use-callback-ref@1.3.3", "", { "dependencies": { "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg=="], - - "use-sidecar": ["use-sidecar@1.1.3", "", { "dependencies": { "detect-node-es": "^1.1.0", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ=="], - - "use-sync-external-store": ["use-sync-external-store@1.6.0", "", { "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w=="], - - "util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="], - - "victory-vendor": ["victory-vendor@37.3.6", "", { "dependencies": { "@types/d3-array": "^3.0.3", "@types/d3-ease": "^3.0.0", "@types/d3-interpolate": "^3.0.1", "@types/d3-scale": "^4.0.2", "@types/d3-shape": "^3.1.0", "@types/d3-time": "^3.0.0", "@types/d3-timer": "^3.0.0", "d3-array": "^3.1.6", "d3-ease": "^3.0.1", "d3-interpolate": "^3.0.1", "d3-scale": "^4.0.2", "d3-shape": "^3.1.0", "d3-time": "^3.0.0", "d3-timer": "^3.0.1" } }, "sha512-SbPDPdDBYp+5MJHhBCAyI7wKM3d5ivekigc2Dk2s7pgbZ9wIgIBYGVw4zGHBml/qTFbexrofXW6Gu4noGxrOwQ=="], - - "vite": ["vite@7.2.7", "", { "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.5.0", "picomatch": "^4.0.3", "postcss": "^8.5.6", "rollup": "^4.43.0", "tinyglobby": "^0.2.15" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "peerDependencies": { "@types/node": "^20.19.0 || >=22.12.0", "jiti": ">=1.21.0", "less": "^4.0.0", "lightningcss": "^1.21.0", "sass": "^1.70.0", "sass-embedded": "^1.70.0", "stylus": ">=0.54.8", "sugarss": "^5.0.0", "terser": "^5.16.0", "tsx": "^4.8.1", "yaml": "^2.4.2" }, "optionalPeers": ["@types/node", "jiti", "less", "lightningcss", "sass", "sass-embedded", "stylus", "sugarss", "terser", "tsx", "yaml"], "bin": { "vite": "bin/vite.js" } }, "sha512-ITcnkFeR3+fI8P1wMgItjGrR10170d8auB4EpMLPqmx6uxElH3a/hHGQabSHKdqd4FXWO1nFIp9rRn7JQ34ACQ=="], - - "vue-eslint-parser": ["vue-eslint-parser@10.2.0", "", { "dependencies": { "debug": "^4.4.0", "eslint-scope": "^8.2.0", "eslint-visitor-keys": "^4.2.0", "espree": "^10.3.0", "esquery": "^1.6.0", "semver": "^7.6.3" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0" } }, "sha512-CydUvFOQKD928UzZhTp4pr2vWz1L+H99t7Pkln2QSPdvmURT0MoC4wUccfCnuEaihNsu9aYYyk+bep8rlfkUXw=="], - - "w3c-xmlserializer": ["w3c-xmlserializer@5.0.0", "", { "dependencies": { "xml-name-validator": "^5.0.0" } }, "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA=="], - - "webidl-conversions": ["webidl-conversions@8.0.0", "", {}, "sha512-n4W4YFyz5JzOfQeA8oN7dUYpR+MBP3PIUsn2jLjWXwK5ASUzt0Jc/A5sAUZoCYFJRGF0FBKJ+1JjN43rNdsQzA=="], - - "whatwg-encoding": ["whatwg-encoding@3.1.1", "", { "dependencies": { "iconv-lite": "0.6.3" } }, "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ=="], - - "whatwg-mimetype": ["whatwg-mimetype@4.0.0", "", {}, "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg=="], - - "whatwg-url": ["whatwg-url@15.1.0", "", { "dependencies": { "tr46": "^6.0.0", "webidl-conversions": "^8.0.0" } }, "sha512-2ytDk0kiEj/yu90JOAp44PVPUkO9+jVhyf+SybKlRHSDlvOOZhdPIrr7xTH64l4WixO2cP+wQIcgujkGBPPz6g=="], - - "which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="], - - "which-boxed-primitive": ["which-boxed-primitive@1.1.1", "", { "dependencies": { "is-bigint": "^1.1.0", "is-boolean-object": "^1.2.1", "is-number-object": "^1.1.1", "is-string": "^1.1.1", "is-symbol": "^1.1.1" } }, "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA=="], - - "which-builtin-type": ["which-builtin-type@1.2.1", "", { "dependencies": { "call-bound": "^1.0.2", "function.prototype.name": "^1.1.6", "has-tostringtag": "^1.0.2", "is-async-function": "^2.0.0", "is-date-object": "^1.1.0", "is-finalizationregistry": "^1.1.0", "is-generator-function": "^1.0.10", "is-regex": "^1.2.1", "is-weakref": "^1.0.2", "isarray": "^2.0.5", "which-boxed-primitive": "^1.1.0", "which-collection": "^1.0.2", "which-typed-array": "^1.1.16" } }, "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q=="], - - "which-collection": ["which-collection@1.0.2", "", { "dependencies": { "is-map": "^2.0.3", "is-set": "^2.0.3", "is-weakmap": "^2.0.2", "is-weakset": "^2.0.3" } }, "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw=="], - - "which-typed-array": ["which-typed-array@1.1.19", "", { "dependencies": { "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", "call-bound": "^1.0.4", "for-each": "^0.3.5", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-tostringtag": "^1.0.2" } }, "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw=="], - - "word-wrap": ["word-wrap@1.2.5", "", {}, "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA=="], - - "ws": ["ws@8.18.3", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg=="], - - "xml-name-validator": ["xml-name-validator@5.0.0", "", {}, "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg=="], - - "xmlchars": ["xmlchars@2.2.0", "", {}, "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw=="], - - "xtend": ["xtend@4.0.2", "", {}, "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="], - - "yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="], - - "yaml": ["yaml@2.8.2", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A=="], - - "yaml-eslint-parser": ["yaml-eslint-parser@1.3.2", "", { "dependencies": { "eslint-visitor-keys": "^3.0.0", "yaml": "^2.0.0" } }, "sha512-odxVsHAkZYYglR30aPYRY4nUGJnoJ2y1ww2HDvZALo0BDETv9kWbi16J52eHs+PWRNmF4ub6nZqfVOeesOvntg=="], - - "yocto-queue": ["yocto-queue@0.1.0", "", {}, "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q=="], - - "zod": ["zod@4.2.1", "", {}, "sha512-0wZ1IRqGGhMP76gLqz8EyfBXKk0J2qo2+H3fi4mcUP/KtTocoX08nmIAHl1Z2kJIZbZee8KOpBCSNPRgauucjw=="], - - "zod-validation-error": ["zod-validation-error@4.0.2", "", { "peerDependencies": { "zod": "^3.25.0 || ^4.0.0" } }, "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ=="], - - "zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="], - - "@babel/core/semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], - - "@babel/helper-compilation-targets/lru-cache": ["lru-cache@5.1.1", "", { "dependencies": { "yallist": "^3.0.2" } }, "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w=="], - - "@babel/helper-compilation-targets/semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], - - "@es-joy/jsdoccomment/jsdoc-type-pratt-parser": ["jsdoc-type-pratt-parser@7.0.0", "", {}, "sha512-c7YbokssPOSHmqTbSAmTtnVgAVa/7lumWNYqomgd5KOMyPrRve2anx6lonfOsXEQacqF9FKVUj7bLg4vRSvdYA=="], - - "@eslint-community/eslint-plugin-eslint-comments/ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="], - - "@eslint-community/eslint-utils/eslint-visitor-keys": ["eslint-visitor-keys@3.4.3", "", {}, "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag=="], - - "@eslint-react/ast/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "@eslint-react/ast/@typescript-eslint/utils": ["@typescript-eslint/utils@8.50.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", "@typescript-eslint/scope-manager": "8.50.0", "@typescript-eslint/types": "8.50.0", "@typescript-eslint/typescript-estree": "8.50.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-87KgUXET09CRjGCi2Ejxy3PULXna63/bMYv72tCAlDJC3Yqwln0HiFJ3VJMst2+mEtNtZu5oFvX4qJGjKsnAgg=="], - - "@eslint-react/core/@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@8.50.0", "", { "dependencies": { "@typescript-eslint/types": "8.50.0", "@typescript-eslint/visitor-keys": "8.50.0" } }, "sha512-xCwfuCZjhIqy7+HKxBLrDVT5q/iq7XBVBXLn57RTIIpelLtEIZHXAF/Upa3+gaCpeV1NNS5Z9A+ID6jn50VD4A=="], - - "@eslint-react/core/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "@eslint-react/core/@typescript-eslint/utils": ["@typescript-eslint/utils@8.50.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", "@typescript-eslint/scope-manager": "8.50.0", "@typescript-eslint/types": "8.50.0", "@typescript-eslint/typescript-estree": "8.50.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-87KgUXET09CRjGCi2Ejxy3PULXna63/bMYv72tCAlDJC3Yqwln0HiFJ3VJMst2+mEtNtZu5oFvX4qJGjKsnAgg=="], - - "@eslint-react/var/@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@8.50.0", "", { "dependencies": { "@typescript-eslint/types": "8.50.0", "@typescript-eslint/visitor-keys": "8.50.0" } }, "sha512-xCwfuCZjhIqy7+HKxBLrDVT5q/iq7XBVBXLn57RTIIpelLtEIZHXAF/Upa3+gaCpeV1NNS5Z9A+ID6jn50VD4A=="], - - "@eslint-react/var/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "@eslint-react/var/@typescript-eslint/utils": ["@typescript-eslint/utils@8.50.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", "@typescript-eslint/scope-manager": "8.50.0", "@typescript-eslint/types": "8.50.0", "@typescript-eslint/typescript-estree": "8.50.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-87KgUXET09CRjGCi2Ejxy3PULXna63/bMYv72tCAlDJC3Yqwln0HiFJ3VJMst2+mEtNtZu5oFvX4qJGjKsnAgg=="], - - "@eslint/eslintrc/globals": ["globals@14.0.0", "", {}, "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ=="], - - "@eslint/eslintrc/ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="], - - "@radix-ui/react-collection/@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.3", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A=="], - - "@radix-ui/react-dialog/@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.3", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A=="], - - "@radix-ui/react-label/@radix-ui/react-primitive": ["@radix-ui/react-primitive@2.1.4", "", { "dependencies": { "@radix-ui/react-slot": "1.2.4" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg=="], - - "@radix-ui/react-menu/@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.3", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A=="], - - "@radix-ui/react-popover/@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.3", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A=="], - - "@radix-ui/react-primitive/@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.3", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A=="], - - "@radix-ui/react-select/@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.3", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A=="], - - "@radix-ui/react-separator/@radix-ui/react-primitive": ["@radix-ui/react-primitive@2.1.4", "", { "dependencies": { "@radix-ui/react-slot": "1.2.4" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg=="], - - "@radix-ui/react-tooltip/@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.3", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A=="], - - "@reduxjs/toolkit/immer": ["immer@11.0.1", "", {}, "sha512-naDCyggtcBWANtIrjQEajhhBEuL9b0Zg4zmlWK2CzS6xCWSE39/vvf4LqnMjUAWHBhot4m9MHCM/Z+mfWhUkiA=="], - - "@typescript-eslint/eslint-plugin/@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@8.50.0", "", { "dependencies": { "@typescript-eslint/types": "8.50.0", "@typescript-eslint/visitor-keys": "8.50.0" } }, "sha512-xCwfuCZjhIqy7+HKxBLrDVT5q/iq7XBVBXLn57RTIIpelLtEIZHXAF/Upa3+gaCpeV1NNS5Z9A+ID6jn50VD4A=="], - - "@typescript-eslint/eslint-plugin/@typescript-eslint/type-utils": ["@typescript-eslint/type-utils@8.50.0", "", { "dependencies": { "@typescript-eslint/types": "8.50.0", "@typescript-eslint/typescript-estree": "8.50.0", "@typescript-eslint/utils": "8.50.0", "debug": "^4.3.4", "ts-api-utils": "^2.1.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-7OciHT2lKCewR0mFoBrvZJ4AXTMe/sYOe87289WAViOocEmDjjv8MvIOT2XESuKj9jp8u3SZYUSh89QA4S1kQw=="], - - "@typescript-eslint/eslint-plugin/@typescript-eslint/utils": ["@typescript-eslint/utils@8.50.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", "@typescript-eslint/scope-manager": "8.50.0", "@typescript-eslint/types": "8.50.0", "@typescript-eslint/typescript-estree": "8.50.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-87KgUXET09CRjGCi2Ejxy3PULXna63/bMYv72tCAlDJC3Yqwln0HiFJ3VJMst2+mEtNtZu5oFvX4qJGjKsnAgg=="], - - "@typescript-eslint/parser/@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@8.50.0", "", { "dependencies": { "@typescript-eslint/types": "8.50.0", "@typescript-eslint/visitor-keys": "8.50.0" } }, "sha512-xCwfuCZjhIqy7+HKxBLrDVT5q/iq7XBVBXLn57RTIIpelLtEIZHXAF/Upa3+gaCpeV1NNS5Z9A+ID6jn50VD4A=="], - - "@typescript-eslint/parser/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "@typescript-eslint/project-service/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "@typescript-eslint/scope-manager/@typescript-eslint/visitor-keys": ["@typescript-eslint/visitor-keys@8.49.0", "", { "dependencies": { "@typescript-eslint/types": "8.49.0", "eslint-visitor-keys": "^4.2.1" } }, "sha512-LlKaciDe3GmZFphXIc79THF/YYBugZ7FS1pO581E/edlVVNbZKDy93evqmrfQ9/Y4uN0vVhX4iuchq26mK/iiA=="], - - "@typescript-eslint/type-utils/@typescript-eslint/typescript-estree": ["@typescript-eslint/typescript-estree@8.49.0", "", { "dependencies": { "@typescript-eslint/project-service": "8.49.0", "@typescript-eslint/tsconfig-utils": "8.49.0", "@typescript-eslint/types": "8.49.0", "@typescript-eslint/visitor-keys": "8.49.0", "debug": "^4.3.4", "minimatch": "^9.0.4", "semver": "^7.6.0", "tinyglobby": "^0.2.15", "ts-api-utils": "^2.1.0" }, "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-jrLdRuAbPfPIdYNppHJ/D0wN+wwNfJ32YTAm10eJVsFmrVpXQnDWBn8niCSMlWjvml8jsce5E/O+86IQtTbJWA=="], - - "@typescript-eslint/typescript-estree/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "@typescript-eslint/typescript-estree/minimatch": ["minimatch@9.0.5", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow=="], - - "@typescript-eslint/utils/@typescript-eslint/typescript-estree": ["@typescript-eslint/typescript-estree@8.49.0", "", { "dependencies": { "@typescript-eslint/project-service": "8.49.0", "@typescript-eslint/tsconfig-utils": "8.49.0", "@typescript-eslint/types": "8.49.0", "@typescript-eslint/visitor-keys": "8.49.0", "debug": "^4.3.4", "minimatch": "^9.0.4", "semver": "^7.6.0", "tinyglobby": "^0.2.15", "ts-api-utils": "^2.1.0" }, "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-jrLdRuAbPfPIdYNppHJ/D0wN+wwNfJ32YTAm10eJVsFmrVpXQnDWBn8niCSMlWjvml8jsce5E/O+86IQtTbJWA=="], - - "@typescript-eslint/visitor-keys/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "@vue/compiler-core/entities": ["entities@4.5.0", "", {}, "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw=="], - - "anymatch/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], - - "chokidar/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], - - "clean-regexp/escape-string-regexp": ["escape-string-regexp@1.0.5", "", {}, "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg=="], - - "cmdk/@radix-ui/react-primitive": ["@radix-ui/react-primitive@2.1.4", "", { "dependencies": { "@radix-ui/react-slot": "1.2.4" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg=="], - - "eslint/ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="], - - "eslint-config-next/@next/eslint-plugin-next": ["@next/eslint-plugin-next@15.5.8", "", { "dependencies": { "fast-glob": "3.3.1" } }, "sha512-PBv6j6YxyC9cFgZKSGFlFydQ+lzzR3Fs1GBr9Z2YzoZK7dH/K8ebRtZiN4pV+b8MbSJiHjZYTKVPKF/UzNgrOA=="], - - "eslint-config-next/eslint-plugin-react-hooks": ["eslint-plugin-react-hooks@5.2.0", "", { "peerDependencies": { "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" } }, "sha512-+f15FfK64YQwZdJNELETdn5ibXEUQmW1DZL6KXhNnc2heoy/sg9VJJeT7n8TlMWouzWqSWavFkIhHyIbIAEapg=="], - - "eslint-import-resolver-node/debug": ["debug@3.2.7", "", { "dependencies": { "ms": "^2.1.1" } }, "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="], - - "eslint-module-utils/debug": ["debug@3.2.7", "", { "dependencies": { "ms": "^2.1.1" } }, "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="], - - "eslint-plugin-es-x/eslint-compat-utils": ["eslint-compat-utils@0.5.1", "", { "dependencies": { "semver": "^7.5.4" }, "peerDependencies": { "eslint": ">=6.0.0" } }, "sha512-3z3vFexKIEnjHE3zCMRo6fn/e44U7T1khUjg+Hp0ZQMCigh28rALD0nPFBcGZuiLC5rLZa2ubQHDRln09JfU2Q=="], - - "eslint-plugin-import/debug": ["debug@3.2.7", "", { "dependencies": { "ms": "^2.1.1" } }, "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="], - - "eslint-plugin-import/semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], - - "eslint-plugin-jsdoc/@es-joy/jsdoccomment": ["@es-joy/jsdoccomment@0.76.0", "", { "dependencies": { "@types/estree": "^1.0.8", "@typescript-eslint/types": "^8.46.0", "comment-parser": "1.4.1", "esquery": "^1.6.0", "jsdoc-type-pratt-parser": "~6.10.0" } }, "sha512-g+RihtzFgGTx2WYCuTHbdOXJeAlGnROws0TeALx9ow/ZmOROOZkVg5wp/B44n0WJgI4SQFP1eWM2iRPlU2Y14w=="], - - "eslint-plugin-n/globals": ["globals@15.15.0", "", {}, "sha512-7ACyT3wmyp3I61S4fG682L0VA2RGD9otkqGJIwNUMF1SWUombIIk+af1unuDYgMm082aHYwD+mzJvv9Iu8dsgg=="], - - "eslint-plugin-n/ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="], - - "eslint-plugin-react/resolve": ["resolve@2.0.0-next.5", "", { "dependencies": { "is-core-module": "^2.13.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, "bin": { "resolve": "bin/resolve" } }, "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA=="], - - "eslint-plugin-react/semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], - - "eslint-plugin-react-naming-convention/@typescript-eslint/utils": ["@typescript-eslint/utils@8.50.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", "@typescript-eslint/scope-manager": "8.50.0", "@typescript-eslint/types": "8.50.0", "@typescript-eslint/typescript-estree": "8.50.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-87KgUXET09CRjGCi2Ejxy3PULXna63/bMYv72tCAlDJC3Yqwln0HiFJ3VJMst2+mEtNtZu5oFvX4qJGjKsnAgg=="], - - "eslint-plugin-react-web-api/@typescript-eslint/utils": ["@typescript-eslint/utils@8.50.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", "@typescript-eslint/scope-manager": "8.50.0", "@typescript-eslint/types": "8.50.0", "@typescript-eslint/typescript-estree": "8.50.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-87KgUXET09CRjGCi2Ejxy3PULXna63/bMYv72tCAlDJC3Yqwln0HiFJ3VJMst2+mEtNtZu5oFvX4qJGjKsnAgg=="], - - "eslint-plugin-vue/postcss-selector-parser": ["postcss-selector-parser@7.1.1", "", { "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" } }, "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg=="], - - "eslint-plugin-vue/xml-name-validator": ["xml-name-validator@4.0.0", "", {}, "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw=="], - - "fast-glob/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], - - "jsonc-eslint-parser/eslint-visitor-keys": ["eslint-visitor-keys@3.4.3", "", {}, "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag=="], - - "jsonc-eslint-parser/espree": ["espree@9.6.1", "", { "dependencies": { "acorn": "^8.9.0", "acorn-jsx": "^5.3.2", "eslint-visitor-keys": "^3.4.1" } }, "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ=="], - - "mdast-util-find-and-replace/escape-string-regexp": ["escape-string-regexp@5.0.0", "", {}, "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="], - - "mdast-util-frontmatter/escape-string-regexp": ["escape-string-regexp@5.0.0", "", {}, "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="], - - "micromark-extension-frontmatter/fault": ["fault@2.0.1", "", { "dependencies": { "format": "^0.2.0" } }, "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ=="], - - "micromatch/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], - - "mlly/pkg-types": ["pkg-types@1.3.1", "", { "dependencies": { "confbox": "^0.1.8", "mlly": "^1.7.4", "pathe": "^2.0.1" } }, "sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ=="], - - "next/postcss": ["postcss@8.4.31", "", { "dependencies": { "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" } }, "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ=="], - - "parse-entities/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="], - - "prop-types/react-is": ["react-is@16.13.1", "", {}, "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="], - - "react-code-blocks/react-syntax-highlighter": ["react-syntax-highlighter@15.6.6", "", { "dependencies": { "@babel/runtime": "^7.3.1", "highlight.js": "^10.4.1", "highlightjs-vue": "^1.0.0", "lowlight": "^1.17.0", "prismjs": "^1.30.0", "refractor": "^3.6.0" }, "peerDependencies": { "react": ">= 0.14.0" } }, "sha512-DgXrc+AZF47+HvAPEmn7Ua/1p10jNoVZVI/LoPiYdtY+OM+/nG5yefLHKJwdKqY1adMuHFbeyBaG9j64ML7vTw=="], - - "readdirp/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], - - "styled-components/csstype": ["csstype@3.1.3", "", {}, "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="], - - "styled-components/postcss": ["postcss@8.4.49", "", { "dependencies": { "nanoid": "^3.3.7", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA=="], - - "styled-components/tslib": ["tslib@2.6.2", "", {}, "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q=="], - - "tailwindcss/fast-glob": ["fast-glob@3.3.3", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.8" } }, "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg=="], - - "toml-eslint-parser/eslint-visitor-keys": ["eslint-visitor-keys@3.4.3", "", {}, "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag=="], - - "tsconfig-paths/json5": ["json5@1.0.2", "", { "dependencies": { "minimist": "^1.2.0" }, "bin": { "json5": "lib/cli.js" } }, "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA=="], - - "yaml-eslint-parser/eslint-visitor-keys": ["eslint-visitor-keys@3.4.3", "", {}, "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag=="], - - "@eslint-react/ast/@typescript-eslint/utils/@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@8.50.0", "", { "dependencies": { "@typescript-eslint/types": "8.50.0", "@typescript-eslint/visitor-keys": "8.50.0" } }, "sha512-xCwfuCZjhIqy7+HKxBLrDVT5q/iq7XBVBXLn57RTIIpelLtEIZHXAF/Upa3+gaCpeV1NNS5Z9A+ID6jn50VD4A=="], - - "@typescript-eslint/eslint-plugin/@typescript-eslint/scope-manager/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "@typescript-eslint/eslint-plugin/@typescript-eslint/type-utils/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "@typescript-eslint/eslint-plugin/@typescript-eslint/utils/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "@typescript-eslint/type-utils/@typescript-eslint/typescript-estree/@typescript-eslint/project-service": ["@typescript-eslint/project-service@8.49.0", "", { "dependencies": { "@typescript-eslint/tsconfig-utils": "^8.49.0", "@typescript-eslint/types": "^8.49.0", "debug": "^4.3.4" }, "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-/wJN0/DKkmRUMXjZUXYZpD1NEQzQAAn9QWfGwo+Ai8gnzqH7tvqS7oNVdTjKqOcPyVIdZdyCMoqN66Ia789e7g=="], - - "@typescript-eslint/type-utils/@typescript-eslint/typescript-estree/@typescript-eslint/tsconfig-utils": ["@typescript-eslint/tsconfig-utils@8.49.0", "", { "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-8prixNi1/6nawsRYxet4YOhnbW+W9FK/bQPxsGB1D3ZrDzbJ5FXw5XmzxZv82X3B+ZccuSxo/X8q9nQ+mFecWA=="], - - "@typescript-eslint/type-utils/@typescript-eslint/typescript-estree/@typescript-eslint/visitor-keys": ["@typescript-eslint/visitor-keys@8.49.0", "", { "dependencies": { "@typescript-eslint/types": "8.49.0", "eslint-visitor-keys": "^4.2.1" } }, "sha512-LlKaciDe3GmZFphXIc79THF/YYBugZ7FS1pO581E/edlVVNbZKDy93evqmrfQ9/Y4uN0vVhX4iuchq26mK/iiA=="], - - "@typescript-eslint/type-utils/@typescript-eslint/typescript-estree/minimatch": ["minimatch@9.0.5", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow=="], - - "@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], - - "@typescript-eslint/utils/@typescript-eslint/typescript-estree/@typescript-eslint/project-service": ["@typescript-eslint/project-service@8.49.0", "", { "dependencies": { "@typescript-eslint/tsconfig-utils": "^8.49.0", "@typescript-eslint/types": "^8.49.0", "debug": "^4.3.4" }, "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-/wJN0/DKkmRUMXjZUXYZpD1NEQzQAAn9QWfGwo+Ai8gnzqH7tvqS7oNVdTjKqOcPyVIdZdyCMoqN66Ia789e7g=="], - - "@typescript-eslint/utils/@typescript-eslint/typescript-estree/@typescript-eslint/tsconfig-utils": ["@typescript-eslint/tsconfig-utils@8.49.0", "", { "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-8prixNi1/6nawsRYxet4YOhnbW+W9FK/bQPxsGB1D3ZrDzbJ5FXw5XmzxZv82X3B+ZccuSxo/X8q9nQ+mFecWA=="], - - "@typescript-eslint/utils/@typescript-eslint/typescript-estree/@typescript-eslint/visitor-keys": ["@typescript-eslint/visitor-keys@8.49.0", "", { "dependencies": { "@typescript-eslint/types": "8.49.0", "eslint-visitor-keys": "^4.2.1" } }, "sha512-LlKaciDe3GmZFphXIc79THF/YYBugZ7FS1pO581E/edlVVNbZKDy93evqmrfQ9/Y4uN0vVhX4iuchq26mK/iiA=="], - - "@typescript-eslint/utils/@typescript-eslint/typescript-estree/minimatch": ["minimatch@9.0.5", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow=="], - - "eslint-plugin-jsdoc/@es-joy/jsdoccomment/jsdoc-type-pratt-parser": ["jsdoc-type-pratt-parser@6.10.0", "", {}, "sha512-+LexoTRyYui5iOhJGn13N9ZazL23nAHGkXsa1p/C8yeq79WRfLBag6ZZ0FQG2aRoc9yfo59JT9EYCQonOkHKkQ=="], - - "eslint-plugin-react-naming-convention/@typescript-eslint/utils/@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@8.50.0", "", { "dependencies": { "@typescript-eslint/types": "8.50.0", "@typescript-eslint/visitor-keys": "8.50.0" } }, "sha512-xCwfuCZjhIqy7+HKxBLrDVT5q/iq7XBVBXLn57RTIIpelLtEIZHXAF/Upa3+gaCpeV1NNS5Z9A+ID6jn50VD4A=="], - - "eslint-plugin-react-naming-convention/@typescript-eslint/utils/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "eslint-plugin-react-web-api/@typescript-eslint/utils/@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@8.50.0", "", { "dependencies": { "@typescript-eslint/types": "8.50.0", "@typescript-eslint/visitor-keys": "8.50.0" } }, "sha512-xCwfuCZjhIqy7+HKxBLrDVT5q/iq7XBVBXLn57RTIIpelLtEIZHXAF/Upa3+gaCpeV1NNS5Z9A+ID6jn50VD4A=="], - - "eslint-plugin-react-web-api/@typescript-eslint/utils/@typescript-eslint/types": ["@typescript-eslint/types@8.50.0", "", {}, "sha512-iX1mgmGrXdANhhITbpp2QQM2fGehBse9LbTf0sidWK6yg/NE+uhV5dfU1g6EYPlcReYmkE9QLPq/2irKAmtS9w=="], - - "mlly/pkg-types/confbox": ["confbox@0.1.8", "", {}, "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w=="], - - "react-code-blocks/react-syntax-highlighter/refractor": ["refractor@3.6.0", "", { "dependencies": { "hastscript": "^6.0.0", "parse-entities": "^2.0.0", "prismjs": "~1.27.0" } }, "sha512-MY9W41IOWxxk31o+YvFCNyNzdkc9M20NoZK5vq6jkv4I/uh2zkWcfudj0Q1fovjUQJrNewS9NMzeTtqPf+n5EA=="], - - "tailwindcss/fast-glob/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], - - "@typescript-eslint/type-utils/@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], - - "@typescript-eslint/utils/@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], - - "react-code-blocks/react-syntax-highlighter/refractor/hastscript": ["hastscript@6.0.0", "", { "dependencies": { "@types/hast": "^2.0.0", "comma-separated-tokens": "^1.0.0", "hast-util-parse-selector": "^2.0.0", "property-information": "^5.0.0", "space-separated-tokens": "^1.0.0" } }, "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w=="], - - "react-code-blocks/react-syntax-highlighter/refractor/parse-entities": ["parse-entities@2.0.0", "", { "dependencies": { "character-entities": "^1.0.0", "character-entities-legacy": "^1.0.0", "character-reference-invalid": "^1.0.0", "is-alphanumerical": "^1.0.0", "is-decimal": "^1.0.0", "is-hexadecimal": "^1.0.0" } }, "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ=="], - - "react-code-blocks/react-syntax-highlighter/refractor/prismjs": ["prismjs@1.27.0", "", {}, "sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA=="], - - "react-code-blocks/react-syntax-highlighter/refractor/hastscript/@types/hast": ["@types/hast@2.3.10", "", { "dependencies": { "@types/unist": "^2" } }, "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw=="], - - "react-code-blocks/react-syntax-highlighter/refractor/hastscript/comma-separated-tokens": ["comma-separated-tokens@1.0.8", "", {}, "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw=="], - - "react-code-blocks/react-syntax-highlighter/refractor/hastscript/hast-util-parse-selector": ["hast-util-parse-selector@2.2.5", "", {}, "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ=="], - - "react-code-blocks/react-syntax-highlighter/refractor/hastscript/property-information": ["property-information@5.6.0", "", { "dependencies": { "xtend": "^4.0.0" } }, "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA=="], - - "react-code-blocks/react-syntax-highlighter/refractor/hastscript/space-separated-tokens": ["space-separated-tokens@1.1.5", "", {}, "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA=="], - - "react-code-blocks/react-syntax-highlighter/refractor/parse-entities/character-entities": ["character-entities@1.2.4", "", {}, "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw=="], - - "react-code-blocks/react-syntax-highlighter/refractor/parse-entities/character-entities-legacy": ["character-entities-legacy@1.1.4", "", {}, "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA=="], - - "react-code-blocks/react-syntax-highlighter/refractor/parse-entities/character-reference-invalid": ["character-reference-invalid@1.1.4", "", {}, "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg=="], - - "react-code-blocks/react-syntax-highlighter/refractor/parse-entities/is-alphanumerical": ["is-alphanumerical@1.0.4", "", { "dependencies": { "is-alphabetical": "^1.0.0", "is-decimal": "^1.0.0" } }, "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A=="], - - "react-code-blocks/react-syntax-highlighter/refractor/parse-entities/is-decimal": ["is-decimal@1.0.4", "", {}, "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw=="], - - "react-code-blocks/react-syntax-highlighter/refractor/parse-entities/is-hexadecimal": ["is-hexadecimal@1.0.4", "", {}, "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw=="], - - "react-code-blocks/react-syntax-highlighter/refractor/hastscript/@types/hast/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="], - - "react-code-blocks/react-syntax-highlighter/refractor/parse-entities/is-alphanumerical/is-alphabetical": ["is-alphabetical@1.0.4", "", {}, "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg=="], - } -} diff --git a/frontend/components.json b/frontend/components.json deleted file mode 100644 index eb538c1c0..000000000 --- a/frontend/components.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "$schema": "https://ui.shadcn.com/schema.json", - "style": "default", - "rsc": true, - "tsx": true, - "tailwind": { - "config": "tailwind.config.ts", - "css": "@/styles/globals.css", - "baseColor": "slate", - "cssVariables": true, - "prefix": "" - }, - "aliases": { - "components": "@/components", - "utils": "@/lib/utils" - }, - "registries": { - "@animate-ui": "https://animate-ui.com/r/{name}.json" - } -} diff --git a/frontend/eslint.config.mjs b/frontend/eslint.config.mjs deleted file mode 100644 index 5fbe5be8a..000000000 --- a/frontend/eslint.config.mjs +++ /dev/null @@ -1,41 +0,0 @@ -import antfu from "@antfu/eslint-config"; - -export default antfu( - { - type: "app", - typescript: true, - formatters: true, - next: true, - stylistic: { - indent: 2, - semi: true, - quotes: "double", - }, - ignores: ["src/components/ui/**", "README.md", "public/json/**"], - }, - { - rules: { - "ts/no-redeclare": "off", - "ts/consistent-type-definitions": ["error", "type"], - "no-console": ["warn"], - "antfu/no-top-level-await": ["off"], - "node/prefer-global/process": ["off"], - "node/no-process-env": ["error"], - "perfectionist/sort-imports": [ - "error", - { - type: "line-length", - order: "desc", - }, - ], - - "unicorn/filename-case": [ - "error", - { - case: "kebabCase", - ignore: ["README.md"], - }, - ], - }, - }, -); diff --git a/frontend/next.config.mjs b/frontend/next.config.mjs deleted file mode 100644 index 48199ca6a..000000000 --- a/frontend/next.config.mjs +++ /dev/null @@ -1,29 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - webpack: (config) => { - config.resolve.alias.canvas = false; - - return config; - }, - images: { - remotePatterns: [ - { - protocol: "https", - hostname: "**", - }, - ], - }, - - env: { - BASE_PATH: "ProxmoxVE", - }, - - eslint: { - ignoreDuringBuilds: true, - }, - - output: "export", - basePath: `/ProxmoxVE`, -}; - -export default nextConfig; diff --git a/frontend/package.json b/frontend/package.json deleted file mode 100644 index 4f1119683..000000000 --- a/frontend/package.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "name": "proxmox-helper-scripts-website", - "type": "module", - "version": "1.0.0", - "private": true, - "author": { - "name": "Bram Suurd", - "url": "https://github.com/community-scripts" - }, - "license": "MIT", - "scripts": { - "dev": "next dev --turbopack", - "build": "next build", - "start": "next start", - "lint": "eslint . --fix", - "typecheck": "tsc --noEmit" - }, - "dependencies": { - "@radix-ui/react-accordion": "^1.2.12", - "@radix-ui/react-dialog": "^1.1.15", - "@radix-ui/react-dropdown-menu": "^2.1.16", - "@radix-ui/react-icons": "^1.3.2", - "@radix-ui/react-label": "^2.1.8", - "@radix-ui/react-navigation-menu": "^1.2.14", - "@radix-ui/react-popover": "^1.1.15", - "@radix-ui/react-scroll-area": "^1.2.10", - "@radix-ui/react-select": "^2.2.6", - "@radix-ui/react-separator": "^1.1.8", - "@radix-ui/react-slot": "^1.2.4", - "@radix-ui/react-switch": "^1.2.6", - "@radix-ui/react-tabs": "^1.1.13", - "@radix-ui/react-tooltip": "^1.2.8", - "@tanstack/react-query": "^5.90.12", - "@types/react-syntax-highlighter": "^15.5.13", - "chart.js": "^4.5.1", - "chartjs-plugin-datalabels": "^2.2.0", - "class-variance-authority": "^0.7.1", - "clsx": "^2.1.1", - "cmdk": "^1.1.1", - "date-fns": "^4.1.0", - "framer-motion": "^12.23.26", - "fuse.js": "^7.1.0", - "lucide-react": "^0.561.0", - "mini-svg-data-uri": "^1.4.4", - "motion": "^12.23.26", - "next": "15.5.8", - "next-themes": "^0.4.6", - "nuqs": "^2.8.5", - "react": "19.2.3", - "react-chartjs-2": "^5.3.1", - "react-code-blocks": "^0.1.6", - "react-datepicker": "^9.0.0", - "react-day-picker": "^9.12.0", - "react-dom": "19.2.3", - "react-icons": "^5.5.0", - "react-syntax-highlighter": "^16.1.0", - "react-use-measure": "^2.1.7", - "recharts": "3.6.0", - "sharp": "^0.34.5", - "sonner": "^2.0.7", - "tailwind-merge": "^3.4.0", - "zod": "^4.2.1" - }, - "devDependencies": { - "@antfu/eslint-config": "^6.7.1", - "@eslint-react/eslint-plugin": "^2.3.13", - "@next/eslint-plugin-next": "^15.5.8", - "@tanstack/eslint-plugin-query": "^5.91.2", - "@types/node": "^25.0.2", - "@types/react": "npm:types-react@19.0.0-rc.1", - "@types/react-dom": "npm:types-react-dom@19.0.0-rc.1", - "@typescript-eslint/eslint-plugin": "^8.50.0", - "@typescript-eslint/parser": "^8.50.0", - "@vitejs/plugin-react": "^5.1.2", - "eslint": "^9.39.2", - "eslint-config-next": "15.5.8", - "eslint-plugin-format": "^1.1.0", - "eslint-plugin-react-hooks": "^7.0.1", - "eslint-plugin-react-refresh": "^0.4.25", - "jsdom": "^27.3.0", - "postcss": "^8.5.6", - "tailwindcss": "^3.4.17", - "tailwindcss-animate": "^1.0.7", - "tailwindcss-animated": "^1.1.2", - "typescript": "^5.9.3" - } -} diff --git a/frontend/postcss.config.mjs b/frontend/postcss.config.mjs deleted file mode 100644 index 1a69fd2a4..000000000 --- a/frontend/postcss.config.mjs +++ /dev/null @@ -1,8 +0,0 @@ -/** @type {import('postcss-load-config').Config} */ -const config = { - plugins: { - tailwindcss: {}, - }, -}; - -export default config; diff --git a/frontend/public/defaultimg.png b/frontend/public/defaultimg.png deleted file mode 100644 index 14d13fdba..000000000 Binary files a/frontend/public/defaultimg.png and /dev/null differ diff --git a/frontend/public/json/2fauth.json b/frontend/public/json/2fauth.json deleted file mode 100644 index 2bdfd2da5..000000000 --- a/frontend/public/json/2fauth.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "2FAuth", - "slug": "2fauth", - "categories": [ - 6 - ], - "date_created": "2024-12-20", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.2fauth.app/", - "website": "https://2fauth.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/2fauth.webp", - "config_path": "cat /opt/2fauth/.env", - "description": "2FAuth is a web based self-hosted alternative to One Time Passcode (OTP) generators like Google Authenticator, designed for both mobile and desktop. It aims to ease you perform your 2FA authentication steps whatever the device you handle, with a clean and suitable interface.", - "install_methods": [ - { - "type": "default", - "script": "ct/2fauth.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Database credentials: `cat ~/2FAuth.creds`", - "type": "info" - }, - { - "text": "The very first account created is automatically set up as an administrator account.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/actualbudget.json b/frontend/public/json/actualbudget.json deleted file mode 100644 index d64122463..000000000 --- a/frontend/public/json/actualbudget.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Actual Budget", - "slug": "actualbudget", - "categories": [ - 23 - ], - "date_created": "2025-05-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5006, - "documentation": "https://github.com/community-scripts/ProxmoxVE/discussions/807", - "website": "https://actualbudget.org/", - "config_path": "/opt/actualbudget-data/config.json", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/actual-budget.webp", - "description": "Actual Budget is a super fast and privacy-focused app for managing your finances. At its heart is the well proven and much loved Envelope Budgeting methodology.", - "install_methods": [ - { - "type": "default", - "script": "ct/actualbudget.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/add-iptag.json b/frontend/public/json/add-iptag.json deleted file mode 100644 index e97c46ca9..000000000 --- a/frontend/public/json/add-iptag.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "PVE LXC Tag", - "slug": "add-iptag", - "categories": [ - 1 - ], - "date_created": "2025-06-16", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "/opt/iptag/iptag.conf", - "description": "This script automatically adds IP address as tags to LXC containers or VM's using a systemd service. The service also updates the tags if a LXC/VM IP address is changed.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/add-iptag.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - }, - { - "text": "Configuration: `nano /opt/iptag/iptag.conf`. iptag Service must be restarted after change. See here for full documentation: `https://github.com/community-scripts/ProxmoxVE/discussions/5790`", - "type": "info" - }, - { - "text": "The Proxmox Node must contain ipcalc and net-tools. `apt-get install -y ipcalc net-tools`", - "type": "warning" - }, - { - "text": "You can execute the ip tool manually with `iptag-run`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/add-netbird-lxc.json b/frontend/public/json/add-netbird-lxc.json deleted file mode 100644 index 7675ff56a..000000000 --- a/frontend/public/json/add-netbird-lxc.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "NetBird", - "slug": "add-netbird-lxc", - "categories": [ - 1 - ], - "date_created": "2024-05-19", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://docs.netbird.io/", - "website": "https://netbird.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/netbird.webp", - "config_path": "", - "description": "NetBird combines a configuration-free peer-to-peer private network and a centralized access control system in a single platform, making it easy to create secure private networks for your organization or home.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/add-netbird-lxc.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After the script finishes, reboot the LXC then run `netbird up` in the LXC console", - "type": "info" - }, - { - "text": "Execute within the Proxmox main shell", - "type": "info" - }, - { - "text": "The script only works in Debian/Ubuntu, not in Alpine!", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/add-tailscale-lxc.json b/frontend/public/json/add-tailscale-lxc.json deleted file mode 100644 index e374749b5..000000000 --- a/frontend/public/json/add-tailscale-lxc.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Tailscale", - "slug": "add-tailscale-lxc", - "categories": [ - 1 - ], - "date_created": "2024-05-02", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://tailscale.com/kb/1017/install", - "website": "https://tailscale.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tailscale.webp", - "config_path": "", - "description": "Tailscale is a software-defined networking solution that enables secure communication between devices over the internet. It creates a virtual private network (VPN) that enables devices to communicate with each other as if they were on the same local network. Tailscale works even when the devices are separated by firewalls or subnets, and provides secure and encrypted communication between devices. With Tailscale, users can connect devices, servers, computers, and cloud instances to create a secure network, making it easier to manage and control access to resources. Tailscale is designed to be easy to set up and use, providing a streamlined solution for secure communication between devices over the internet.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/add-tailscale-lxc.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After the script finishes, reboot the LXC then run `tailscale up` in the LXC console", - "type": "info" - }, - { - "text": "Execute within the Proxmox host shell", - "type": "info" - } - ] -} diff --git a/frontend/public/json/adguard.json b/frontend/public/json/adguard.json deleted file mode 100644 index 863f9abd7..000000000 --- a/frontend/public/json/adguard.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "AdGuard Home", - "slug": "adguard", - "categories": [ - 5 - ], - "date_created": "2024-04-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/AdguardTeam/AdGuardHome/wiki/Getting-Started", - "website": "https://adguard.com/en/adguard-home/overview.html", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/adguard-home.webp", - "config_path": "/opt/AdGuardHome/AdGuardHome.yaml", - "description": "AdGuard Home is an open-source, self-hosted network-wide ad blocker. It blocks advertisements, trackers, phishing and malware websites, and provides protection against online threats. AdGuard Home is a DNS-based solution, which means it blocks ads and malicious content at the network level, before it even reaches your device. It runs on your home network and can be easily configured and managed through a web-based interface. It provides detailed statistics and logs, allowing you to see which websites are being blocked, and why. AdGuard Home is designed to be fast, lightweight, and easy to use, making it an ideal solution for home users who want to block ads, protect their privacy, and improve the speed and security of their online experience.", - "install_methods": [ - { - "type": "default", - "script": "ct/adguard.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-adguard.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "AdGuard Home can only be updated via the user interface.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/adguardhome-sync.json b/frontend/public/json/adguardhome-sync.json deleted file mode 100644 index b745d35f3..000000000 --- a/frontend/public/json/adguardhome-sync.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "name": "AdGuardHome-Sync", - "slug": "adguardhome-sync", - "categories": [ - 5 - ], - "date_created": "2025-12-13", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://github.com/bakito/adguardhome-sync", - "website": "https://github.com/bakito/adguardhome-sync", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/adguardhome-sync.webp", - "config_path": "/opt/adguardhome-sync/adguardhome-sync.yaml", - "description": "Synchronize AdGuardHome config to one or multiple replica instances. Syncs General Settings, Filters, Rewrites, Services, Clients, DNS Config, DHCP Config and Theme.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/adguardhome-sync.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - }, - { - "type": "alpine", - "script": "tools/addon/adguardhome-sync.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Edit /opt/adguardhome-sync/adguardhome-sync.yaml to configure your AdGuardHome instances", - "type": "info" - }, - { - "text": "Origin = Primary instance, Replicas = Instances to sync to", - "type": "info" - }, - { - "text": "Update with: update_adguardhome-sync", - "type": "info" - } - ] -} diff --git a/frontend/public/json/adventurelog.json b/frontend/public/json/adventurelog.json deleted file mode 100644 index b28bb6d0e..000000000 --- a/frontend/public/json/adventurelog.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "AdventureLog", - "slug": "adventurelog", - "categories": [ - 24 - ], - "date_created": "2024-10-26", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://adventurelog.app/docs/intro/adventurelog_overview.html", - "website": "https://adventurelog.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/adventurelog.webp", - "config_path": "/opt/adventurelog/backend/server/.env", - "description": "Adventure Log is an app designed to track outdoor activities and personal achievements, allowing users to log their adventures with photos, notes, and location data. It focuses on enhancing outdoor experiences by preserving memories and sharing them with others.", - "install_methods": [ - { - "type": "default", - "script": "ct/adventurelog.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 7, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "AdventureLog uses an initial local IP, if you change your LXC-IP, you need to change the IP here: `/opt/adventurelog/backend/server/.env` and here: `/opt/adventurelog/frontend/.env`", - "type": "warning" - }, - { - "text": "Use `cat ~/adventurelog.creds` to see login credentials.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/agentdvr.json b/frontend/public/json/agentdvr.json deleted file mode 100644 index 3ff645f23..000000000 --- a/frontend/public/json/agentdvr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "AgentDVR", - "slug": "agentdvr", - "categories": [ - 15 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 8090, - "documentation": "https://www.ispyconnect.com/docs/agent/about", - "website": "https://www.ispyconnect.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/agent-dvr.webp", - "config_path": "/opt/agentdvr/agent/Media/XML/config.json", - "description": "AgentDVR a new video surveillance solution for the Internet Of Things.", - "install_methods": [ - { - "type": "default", - "script": "ct/agentdvr.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/all-templates.json b/frontend/public/json/all-templates.json deleted file mode 100644 index 86f167681..000000000 --- a/frontend/public/json/all-templates.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "All Templates", - "slug": "all-templates", - "categories": [ - 1 - ], - "date_created": "2024-05-02", - "type": "addon", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "A script designed to allow for the creation of one of the many free LXC templates. Great for creating system LXCs.\r\nThe script creates a `*.creds` file in the Proxmox root directory with the password of the newly created LXC.\r\nPlease take note that if you plan to use this script for creating TurnKey LXCs, you'll need to modify the hostname after creation.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/all-templates.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Resource and network settings are adjustable post LXC creation.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/alpine-it-tools.json b/frontend/public/json/alpine-it-tools.json deleted file mode 100644 index 5c4e12736..000000000 --- a/frontend/public/json/alpine-it-tools.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "Alpine-IT-Tools", - "slug": "alpine-it-tools", - "categories": [ - 20 - ], - "date_created": "2025-01-30", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": null, - "website": "https://sharevb-it-tools.vercel.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/it-tools.webp", - "config_path": "", - "description": "IT-Tools is a web-based suite of utilities designed to streamline and simplify various IT tasks, providing tools for developers and system administrators to manage their workflows efficiently.", - "install_methods": [ - { - "type": "default", - "script": "ct/alpine-it-tools.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - }, - { - "type": "alpine", - "script": "ct/alpine-it-tools.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/alpine.json b/frontend/public/json/alpine.json deleted file mode 100644 index a8030e51d..000000000 --- a/frontend/public/json/alpine.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Alpine", - "slug": "alpine", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": "https://www.alpinelinux.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/alpine-linux.webp", - "config_path": "", - "description": "A security-oriented, lightweight Linux distribution based on musl and BusyBox.\r\nBy default, the root password is set to alpine. If you choose to use advanced settings, you will need to define a password, autologin is currently unavailable.", - "install_methods": [ - { - "type": "default", - "script": "ct/alpine.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": "alpine" - }, - "notes": [ - { - "text": "To Update Alpine: `apk -U upgrade`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/ampache.json b/frontend/public/json/ampache.json deleted file mode 100644 index 435c7707e..000000000 --- a/frontend/public/json/ampache.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Ampache", - "slug": "ampache", - "categories": [ - 13 - ], - "date_created": "2026-01-30", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/ampache/ampache/wiki", - "website": "https://ampache.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ampache.webp", - "config_path": "/opt/ampache/config/ampache.cfg.php", - "description": "Ampache is a web-based audio streaming application and file manager that allows you to access your music & videos from anywhere. It features a powerful music catalog, multiple user support, transcoding, streaming, and more.", - "install_methods": [ - { - "type": "default", - "script": "ct/ampache.sh", - "resources": { - "cpu": 4, - "ram": 2048, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Complete the web-based setup at http://IP/install.php", - "type": "info" - }, - { - "text": "Database credentials are stored in `~/ampache.creds` - use only the MySQL username and password from this file", - "type": "info" - }, - { - "text": "During installation, only check 'Create Tables' - leave 'Create Database' and 'Create Database User' unchecked", - "type": "info" - } - ] -} diff --git a/frontend/public/json/apache-cassandra.json b/frontend/public/json/apache-cassandra.json deleted file mode 100644 index c5b9806eb..000000000 --- a/frontend/public/json/apache-cassandra.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Apache-Cassandra", - "slug": "apache-cassandra", - "categories": [ - 8 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://cassandra.apache.org/doc/latest/", - "website": "https://cassandra.apache.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/apache-cassandra.webp", - "config_path": "/etc/cassandra/cassandra.yaml", - "description": "Apache-Cassandra is an open source NoSQL distributed database trusted by thousands of companies for scalability and high availability without compromising performance.", - "install_methods": [ - { - "type": "default", - "script": "ct/apache-cassandra.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/apache-couchdb.json b/frontend/public/json/apache-couchdb.json deleted file mode 100644 index 21bb6620d..000000000 --- a/frontend/public/json/apache-couchdb.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Apache CouchDB", - "slug": "apache-couchdb", - "categories": [ - 8 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5984, - "documentation": "https://docs.couchdb.org/en/stable/", - "website": "https://couchdb.apache.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/couchdb.webp", - "config_path": "etc/default.ini", - "description": "Apache CouchDB Seamless multi-master sync, that scales from Big Data to Mobile, with an Intuitive HTTP/JSON API and designed for Reliability.", - "install_methods": [ - { - "type": "default", - "script": "ct/apache-couchdb.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Login Credentials: `cat ~/couchdb.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/apache-guacamole.json b/frontend/public/json/apache-guacamole.json deleted file mode 100644 index a591a54f3..000000000 --- a/frontend/public/json/apache-guacamole.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Apache Guacamole", - "slug": "apache-guacamole", - "categories": [ - 0 - ], - "date_created": "2024-12-19", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://guacamole.apache.org/doc/gug/", - "website": "https://guacamole.apache.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/apache-guacamole.webp", - "config_path": "/etc/guacamole/guacd.conf", - "description": "Apache Guacamole is a clientless remote desktop gateway. It supports standard protocols like VNC, RDP, and SSH.", - "install_methods": [ - { - "type": "default", - "script": "ct/apache-guacamole.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "guacadmin", - "password": "guacadmin" - }, - "notes": [] -} diff --git a/frontend/public/json/apache-tika.json b/frontend/public/json/apache-tika.json deleted file mode 100644 index d777f2599..000000000 --- a/frontend/public/json/apache-tika.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Apache Tika", - "slug": "apache-tika", - "categories": [ - 12 - ], - "date_created": "2025-02-05", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9998, - "documentation": "https://cwiki.apache.org/confluence/display/tika", - "website": "https://tika.apache.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/apache-tika.webp", - "config_path": "/opt/apache-tika/tika-config.xml", - "description": "The Apache Tika™ toolkit detects and extracts metadata and text from over a thousand different file types (such as PPT, XLS, and PDF). All of these file types can be parsed through a single interface, making Tika useful for search engine indexing, content analysis, translation, and much more.", - "install_methods": [ - { - "type": "default", - "script": "ct/apache-tika.sh", - "resources": { - "cpu": 1, - "ram": 2024, - "hdd": 10, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Configuration file is not created at install time. Example is at: `https://cwiki.apache.org/confluence/display/TIKA/TikaServer+in+Tika+2.x`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/apache-tomcat.json b/frontend/public/json/apache-tomcat.json deleted file mode 100644 index fd05e1fd5..000000000 --- a/frontend/public/json/apache-tomcat.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Apache Tomcat", - "slug": "apache-tomcat", - "categories": [ - 10 - ], - "date_created": "2025-03-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://cwiki.apache.org/confluence/display/TOMCAT", - "website": "https://tomcat.apache.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/apache-tomcat.webp", - "config_path": "", - "description": "Apache Tomcat is an open-source application server that runs Java Servlets and JavaServer Pages (JSP). It allows developers to deploy and manage Java web applications by handling HTTP requests and serving dynamic content. Tomcat is widely used for lightweight web applications and supports various Java EE features like WebSockets and JNDI.", - "install_methods": [ - { - "type": "default", - "script": "ct/apache-tomcat.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "User can select which Adoptium JDK should be used for the selected Tomcat version (9, 10.1 or 11). ", - "type": "info" - } - ] -} diff --git a/frontend/public/json/apt-cacher-ng.json b/frontend/public/json/apt-cacher-ng.json deleted file mode 100644 index 76b4a8f14..000000000 --- a/frontend/public/json/apt-cacher-ng.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Apt-Cacher-NG", - "slug": "apt-cacher-ng", - "categories": [ - 4 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3142, - "documentation": "https://www.unix-ag.uni-kl.de/~bloch/acng/html/index.html", - "website": "https://www.unix-ag.uni-kl.de/~bloch/acng/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linux.webp", - "config_path": "/etc/apt-cacher-ng/acng.conf", - "description": "Apt-Cacher-NG is a caching proxy. Specialized for package files from Linux distributors, primarily for Debian (and Debian based) distributions.", - "install_methods": [ - { - "type": "default", - "script": "ct/apt-cacher-ng.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/arcane.json b/frontend/public/json/arcane.json deleted file mode 100644 index 5d4fcf9d0..000000000 --- a/frontend/public/json/arcane.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Arcane", - "slug": "arcane", - "categories": [ - 3 - ], - "date_created": "2026-02-24", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 3552, - "documentation": "https://getarcane.app/docs", - "website": "https://getarcane.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/arcane.webp", - "config_path": "/opt/arcane/.env", - "description": "Arcane is designed to be an easy and modern Docker management platform, built with everybody in mind. The goal of Arcane is to be built for and by the community to make sure nobody feels left out or behind with their specific features or processes. ", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/arcane.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": "arcane", - "password": "arcane-admin" - }, - "notes": [ - { - "text": "This is an addon script intended to be used on top of an existing Docker container.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/archivebox.json b/frontend/public/json/archivebox.json deleted file mode 100644 index 6628da7c3..000000000 --- a/frontend/public/json/archivebox.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "ArchiveBox", - "slug": "archivebox", - "categories": [ - 12 - ], - "date_created": "2024-10-19", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://github.com/ArchiveBox/ArchiveBox/wiki", - "website": "https://archivebox.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/archivebox.webp", - "config_path": "/opt/archivebox/data/ArchiveBox.conf", - "description": "ArchiveBox is an open source tool that lets organizations & individuals archive both public & private web content while retaining control over their data. It can be used to save copies of bookmarks, preserve evidence for legal cases, backup photos from FB/Insta/Flickr or media from YT/Soundcloud/etc., save research papers, and more...", - "install_methods": [ - { - "type": "default", - "script": "ct/archivebox.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 8, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": "archivebox", - "password": "helper-scripts.com" - }, - "notes": [] -} diff --git a/frontend/public/json/archlinux-vm.json b/frontend/public/json/archlinux-vm.json deleted file mode 100644 index ec1e50098..000000000 --- a/frontend/public/json/archlinux-vm.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Arch Linux", - "slug": "archlinux-vm", - "categories": [ - 2 - ], - "date_created": "2025-01-27", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://wiki.archlinux.org/title/Main_page", - "website": "https://archlinux.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/arch-linux.webp", - "config_path": "", - "description": "Arch Linux is a highly customizable, independent Linux distribution that gives users complete control over their system. Known for its rolling release model, Arch Linux is always up-to-date with the latest software. It's favored by experienced users who appreciate its minimalist approach, demanding a hands-on installation and configuration process. This level of control and flexibility makes it a popular choice for those who want to tailor their Linux system to their exact needs.", - "install_methods": [ - { - "type": "default", - "script": "vm/archlinux-vm.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "doesnt work with lvm and lvmthin disks!", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/argus.json b/frontend/public/json/argus.json deleted file mode 100644 index 060a78d7d..000000000 --- a/frontend/public/json/argus.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Argus", - "slug": "argus", - "categories": [ - 11 - ], - "date_created": "2025-05-26", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://release-argus.io/docs/overview/", - "website": "https://release-argus.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/argus.webp", - "config_path": "/opt/argus/config.yml", - "description": "Argus will query websites at a user defined interval for new software releases and then trigger Gotify/Slack/Other notification(s) and/or WebHook(s) when one has been found. For example, you could set it to monitor the Argus repo (release-argus/argus). This will query the GitHub API and track the tag_name variable. When this variable changes from what it was on a previous query, a GitHub-style WebHook could be sent that triggers something (like AWX) to update Argus on your server.", - "install_methods": [ - { - "type": "default", - "script": "ct/argus.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 3, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/aria2.json b/frontend/public/json/aria2.json deleted file mode 100644 index 74850e468..000000000 --- a/frontend/public/json/aria2.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Aria2", - "slug": "aria2", - "categories": [ - 11 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6880, - "documentation": "https://aria2.github.io/manual/en/html/index.html", - "website": "https://aria2.github.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/aria2.webp", - "config_path": "/root/aria2.daemon", - "description": "Aria2 is a lightweight multi-protocol & multi-source, cross platform download utility operated in command-line. It supports HTTP/HTTPS, FTP, SFTP, BitTorrent and Metalink.", - "install_methods": [ - { - "type": "default", - "script": "ct/aria2.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Within the LXC console, run `cat rpc.secret` to display the rpc-secret. Copy this token and paste it into the Aria2 RPC Secret Token box within the AriaNG Settings. Then, click the reload AriaNG button.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/asterisk.json b/frontend/public/json/asterisk.json deleted file mode 100644 index 826379d01..000000000 --- a/frontend/public/json/asterisk.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Asterisk", - "slug": "asterisk", - "categories": [ - 0 - ], - "date_created": "2025-05-14", - "type": "ct", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": "https://docs.asterisk.org/", - "config_path": "/etc/asterisk", - "website": "https://asterisk.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/asterisk.webp", - "description": "Asterisk is an open-source framework for building communications applications, most commonly used as a phone system (PBX). Developed by Digium (now part of Sangoma), it turns a standard computer into a powerful telephony server.", - "install_methods": [ - { - "type": "default", - "script": "ct/asterisk.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/audiobookshelf.json b/frontend/public/json/audiobookshelf.json deleted file mode 100644 index c6667326a..000000000 --- a/frontend/public/json/audiobookshelf.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Audiobookshelf", - "slug": "audiobookshelf", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 13378, - "documentation": "https://www.audiobookshelf.org/guides/", - "website": "https://www.audiobookshelf.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/audiobookshelf.webp", - "config_path": "/usr/share/audiobookshelf/config", - "description": "Audiobookshelf is a Self-hosted audiobook and podcast server.", - "install_methods": [ - { - "type": "default", - "script": "ct/audiobookshelf.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/authelia.json b/frontend/public/json/authelia.json deleted file mode 100644 index a6b92e1dc..000000000 --- a/frontend/public/json/authelia.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Authelia", - "slug": "authelia", - "categories": [ - 6 - ], - "date_created": "2025-02-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 443, - "documentation": "https://www.authelia.com/integration/deployment/bare-metal/", - "website": "https://www.authelia.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/authelia.webp", - "config_path": "/etc/authelia/configuration.yml", - "description": "Authelia is an open-source authentication and authorization server and portal fulfilling the identity and access management (IAM) role of information security in providing multi-factor authentication and single sign-on (SSO) for your applications via a web portal. It acts as a companion for common reverse proxies.", - "install_methods": [ - { - "type": "default", - "script": "ct/authelia.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "authelia", - "password": "authelia" - }, - "notes": [ - { - "text": "During installation, you will have to input your domain (ex. domain.com). Authelia will use auth.domain.com", - "type": "info" - } - ] -} diff --git a/frontend/public/json/autobrr.json b/frontend/public/json/autobrr.json deleted file mode 100644 index 948184e1f..000000000 --- a/frontend/public/json/autobrr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Autobrr", - "slug": "autobrr", - "categories": [ - 14 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7474, - "documentation": "https://autobrr.com/configuration/autobrr", - "website": "https://autobrr.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/autobrr.webp", - "config_path": "/root/.config/autobrr/config.toml", - "description": "Autobrr is a torrent downloading tool that automates the process of downloading torrents. It is designed to be modern and user-friendly, providing users with a convenient and efficient way to download torrent files. With Autobrr, you can schedule and manage your torrent downloads, and have the ability to automatically download torrents based on certain conditions, such as time of day or availability of seeds. This can save you time and effort, allowing you to focus on other tasks while your torrents are being downloaded in the background.", - "install_methods": [ - { - "type": "default", - "script": "ct/autobrr.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/autocaliweb.json b/frontend/public/json/autocaliweb.json deleted file mode 100644 index f0dc3e45e..000000000 --- a/frontend/public/json/autocaliweb.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Autocaliweb", - "slug": "autocaliweb", - "categories": [ - 13 - ], - "date_created": "2025-09-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8083, - "documentation": "https://codeberg.org/gelbphoenix/autocaliweb/wiki", - "config_path": "/etc/autocaliweb", - "website": "https://codeberg.org/gelbphoenix/autocaliweb", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/autocaliweb.webp", - "description": "A modern web management system for eBooks, eComics and PDFs", - "install_methods": [ - { - "type": "default", - "script": "ct/autocaliweb.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin123" - }, - "notes": [] -} diff --git a/frontend/public/json/babybuddy.json b/frontend/public/json/babybuddy.json deleted file mode 100644 index efacaaf85..000000000 --- a/frontend/public/json/babybuddy.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Baby Buddy", - "slug": "babybuddy", - "categories": [ - 23 - ], - "date_created": "2025-05-21", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.baby-buddy.net/", - "website": "https://github.com/babybuddy/babybuddy", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/baby-buddy.webp", - "config_path": "/opt/babybuddy/babybuddy/settings/production.py", - "description": "Baby Buddy is an open-source web application designed to assist caregivers in tracking various aspects of a baby's daily routine, including sleep, feedings, diaper changes, tummy time, and more. By recording this data, caregivers can better understand and anticipate their baby's needs, reducing guesswork in daily care. The application offers a user-friendly dashboard for data entry and visualization, supports multiple users, and provides features like timers and reminders. Additionally, Baby Buddy can be integrated with platforms like Home Assistant and Grafana for enhanced functionality.", - "install_methods": [ - { - "type": "default", - "script": "ct/babybuddy.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [ - { - "text": "for private SSL setup visit: `https://github.com/babybuddy/babybuddy/blob/master/docs/setup/ssl.md`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/backrest.json b/frontend/public/json/backrest.json deleted file mode 100644 index 40baf6608..000000000 --- a/frontend/public/json/backrest.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Backrest", - "slug": "backrest", - "categories": [ - 7 - ], - "date_created": "2025-05-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9898, - "documentation": "https://garethgeorge.github.io/backrest/introduction/getting-started", - "website": "https://garethgeorge.github.io/backrest", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/backrest.webp", - "config_path": "/opt/backrest/config/config.json | /opt/backrest/.env", - "description": "Backrest is a web-accessible backup solution built on top of restic and providing a WebUI which wraps the restic CLI and makes it easy to create repos, browse snapshots, and restore files. Additionally, Backrest can run in the background and take an opinionated approach to scheduling snapshots and orchestrating repo health operations.", - "install_methods": [ - { - "type": "default", - "script": "ct/backrest.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "type": "info", - "text": "`cat ~/.ssh/id_ed25519.pub` to view ssh public key. This key is used to authenticate with sftp targets. You can add this key on the sftp server." - } - ] -} diff --git a/frontend/public/json/baikal.json b/frontend/public/json/baikal.json deleted file mode 100644 index b2739b3dc..000000000 --- a/frontend/public/json/baikal.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Baïkal", - "slug": "baikal", - "categories": [ - 0 - ], - "date_created": "2025-01-31", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://sabre.io/dav/", - "website": "https://sabre.io/baikal/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/baikal.webp", - "config_path": "/opt/baikal/config/baikal.yaml", - "description": "Baïkal is a lightweight CalDAV+CardDAV server. It offers an extensive web interface with easy management of users, address books and calendars.", - "install_methods": [ - { - "type": "default", - "script": "ct/baikal.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "Admin", - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/bar-assistant.json b/frontend/public/json/bar-assistant.json deleted file mode 100644 index 78b7018fd..000000000 --- a/frontend/public/json/bar-assistant.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Bar-Assistant", - "slug": "bar-assistant", - "categories": [ - 24 - ], - "date_created": "2025-07-14", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.barassistant.app/", - "website": "https://barassistant.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/bar-assistant.webp", - "config_path": "/opt/bar-assistant", - "description": "Bar Assistant is all-in-one solution for managing your home bar. Compared to other recipe management software that usually tries to be more for general use, Bar Assistant is made specifically for managing cocktail recipes. This means that there are a lot of cocktail-oriented features, like ingredient substitutes, first-class ingredients, ABV calculations, unit switching and more..", - "install_methods": [ - { - "type": "default", - "script": "ct/bar-assistant.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/bazarr.json b/frontend/public/json/bazarr.json deleted file mode 100644 index d42b13855..000000000 --- a/frontend/public/json/bazarr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Bazarr", - "slug": "bazarr", - "categories": [ - 14 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6767, - "documentation": "https://wiki.bazarr.media/", - "website": "https://www.bazarr.media/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/bazarr.webp", - "config_path": "/opt/bazarr/data/config/config.yaml", - "description": "Bazarr is a companion application to Sonarr and Radarr that manages and downloads subtitles based on your requirements.", - "install_methods": [ - { - "type": "default", - "script": "ct/bazarr.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/bentopdf.json b/frontend/public/json/bentopdf.json deleted file mode 100644 index 1c71cbc69..000000000 --- a/frontend/public/json/bentopdf.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "BentoPDF", - "slug": "bentopdf", - "categories": [ - 12 - ], - "date_created": "2025-10-30", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://github.com/alam00000/bentopdf", - "website": "https://www.bentopdf.com", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/bentopdf.webp", - "config_path": "", - "description": "A privacy-first, 100% client-side PDF Toolkit. No signups/accounts, works in the browser, online or offline.", - "install_methods": [ - { - "type": "default", - "script": "ct/bentopdf.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/beszel.json b/frontend/public/json/beszel.json deleted file mode 100644 index 1e7dd9c01..000000000 --- a/frontend/public/json/beszel.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Beszel", - "slug": "beszel", - "categories": [ - 9 - ], - "date_created": "2025-01-20", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8090, - "documentation": "https://beszel.dev/guide/what-is-beszel", - "website": "https://beszel.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/beszel.webp", - "config_path": "", - "description": "A lightweight server monitoring platform that provides Docker statistics, historical data, and alert functions\n ", - "install_methods": [ - { - "type": "default", - "script": "ct/beszel.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/bichon.json b/frontend/public/json/bichon.json deleted file mode 100644 index ad49d0b1e..000000000 --- a/frontend/public/json/bichon.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Bichon", - "slug": "bichon", - "categories": [ - 7 - ], - "date_created": "2026-02-07", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 15630, - "documentation": "https://github.com/rustmailer/bichon/wiki", - "config_path": "/opt/bichon/bichon.env", - "website": "https://github.com/rustmailer/bichon", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/bichon.webp", - "description": "Bichon is an open-source email archiving system that synchronizes emails from IMAP servers, indexes them for full-text search, and provides a REST API for programmatic access. Unlike email clients, Bichon is designed for archiving and searching rather than sending/receiving emails. It runs as a standalone server application that continuously synchronizes configured email accounts and maintains a searchable local archive.", - "install_methods": [ - { - "type": "default", - "script": "ct/bichon.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin@bichon" - }, - "notes": [ - { - "text": "The Disk space initially allocated by the script is only a placeholder, as we can't know how much space you will ever need. You should increase it to match your workload.", - "type": "info" - }, - { - "text": "Please copy your `BICHON_ENCRYPT_PASSWORD` from `/opt/bichon/bichon.env` to a safe place.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/bitmagnet.json b/frontend/public/json/bitmagnet.json deleted file mode 100644 index c50577637..000000000 --- a/frontend/public/json/bitmagnet.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "Bitmagnet", - "slug": "bitmagnet", - "categories": [ - 11 - ], - "date_created": "2025-05-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3333, - "documentation": "https://bitmagnet.io/setup.html", - "website": "https://bitmagnet.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/bitmagnet.webp", - "config_path": "`/opt/bitmagnet/config.yml` or `/opt/bitmagnet/.env`", - "description": "A self-hosted BitTorrent indexer, DHT crawler, content classifier and torrent search engine with web UI, GraphQL API and Servarr stack integration.", - "install_methods": [ - { - "type": "default", - "script": "ct/bitmagnet.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-bitmagnet.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 3, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "During installation you will be asked to enter your TMDB API key, if you wanna use it. Make sure you have it ready.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/blocky.json b/frontend/public/json/blocky.json deleted file mode 100644 index 8a5deb492..000000000 --- a/frontend/public/json/blocky.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Blocky", - "slug": "blocky", - "categories": [ - 5 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 4000, - "documentation": "https://0xerr0r.github.io/blocky/latest/configuration/", - "website": "https://0xerr0r.github.io/blocky/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/blocky.webp", - "config_path": "/opt/blocky/config.yml", - "description": "Blocky is a software tool designed for blocking unwanted ads and trackers on local networks. It functions as a DNS proxy and runs on the Go programming language. Blocky intercepts requests to advertisements and other unwanted content and blocks them before they reach the end user. This results in a cleaner, faster, and more secure online experience for users connected to the local network. Blocky is open-source, easy to configure and can be run on a variety of devices, making it a versatile solution for small to medium-sized local networks.", - "install_methods": [ - { - "type": "default", - "script": "ct/blocky.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/booklore.json b/frontend/public/json/booklore.json deleted file mode 100644 index b65c5e0e7..000000000 --- a/frontend/public/json/booklore.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "BookLore", - "slug": "booklore", - "categories": [ - 13 - ], - "date_created": "2025-06-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6060, - "documentation": "https://booklore-app.github.io/booklore-docs/docs/getting-started", - "website": "https://github.com/booklore-app/booklore", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/booklore.webp", - "config_path": "/opt/booklore_storage/.env", - "description": "BookLore is a self-hosted digital library for managing and reading books, offering a beautiful interface and support for metadata management. Built with a modern tech stack, it provides support for importing, organizing, and reading EPUBs and PDFs, while also managing cover images and book metadata.", - "install_methods": [ - { - "type": "default", - "script": "ct/booklore.sh", - "resources": { - "cpu": 3, - "ram": 3072, - "hdd": 7, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Starting Booklore (Web UI) may take up to 2 minutes after a restart or fresh installation.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/bookstack.json b/frontend/public/json/bookstack.json deleted file mode 100644 index 63f7fdf7c..000000000 --- a/frontend/public/json/bookstack.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "BookStack", - "slug": "bookstack", - "categories": [ - 12 - ], - "date_created": "2024-11-05", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://www.bookstackapp.com/docs/", - "website": "https://www.bookstackapp.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/bookstack.webp", - "config_path": "/opt/bookstack/.env", - "description": "BookStack is a user-friendly documentation platform that offers a simple and intuitive experience. New users should be able to create content with basic word-processing skills. While the platform provides advanced features, they do not interfere with the core simplicity of the user experience.", - "install_methods": [ - { - "type": "default", - "script": "ct/bookstack.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin@admin.com", - "password": "password" - }, - "notes": [ - { - "text": "Bookstack works only with static IP. If you change the IP of your LXC, you need to edit the .env file", - "type": "warning" - }, - { - "text": "To see database credentials, type `cat ~/bookstack.creds` in LXC console", - "type": "info" - } - ] -} diff --git a/frontend/public/json/bunkerweb.json b/frontend/public/json/bunkerweb.json deleted file mode 100644 index 101b282be..000000000 --- a/frontend/public/json/bunkerweb.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "BunkerWeb", - "slug": "bunkerweb", - "categories": [ - 6 - ], - "date_created": "2024-06-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://docs.bunkerweb.io/latest/", - "website": "https://www.bunkerweb.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/bunkerweb.webp", - "config_path": "/etc/bunkerweb/variables.env", - "description": "BunkerWeb is a security-focused web server that enhances web application protection. It guards against common web vulnerabilities like SQL injection, XSS, and CSRF. It features simple setup and configuration using a YAML file, customizable security rules, and provides detailed logs for traffic monitoring and threat detection.", - "install_methods": [ - { - "type": "default", - "script": "ct/bunkerweb.sh", - "resources": { - "cpu": 2, - "ram": 8192, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/byparr.json b/frontend/public/json/byparr.json deleted file mode 100644 index 0c7a99cb4..000000000 --- a/frontend/public/json/byparr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Byparr", - "slug": "byparr", - "categories": [ - 14 - ], - "date_created": "2026-01-21", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8191, - "documentation": "https://github.com/ThePhaseless/Byparr/blob/master/README.md", - "website": "https://github.com/ThePhaseless/Byparr", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/byparr.webp", - "config_path": "/etc/systemd/system/byparr.service", - "description": "Byparr is a proxy server to bypass Cloudflare and DDoS-GUARD protection.", - "install_methods": [ - { - "type": "default", - "script": "ct/byparr.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/bytestash.json b/frontend/public/json/bytestash.json deleted file mode 100644 index 2ac9bebfb..000000000 --- a/frontend/public/json/bytestash.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "ByteStash", - "slug": "bytestash", - "categories": [ - 20 - ], - "date_created": "2025-02-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/jordan-dalby/ByteStash/wiki", - "website": "https://github.com/jordan-dalby/ByteStash", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/bytestash.webp", - "config_path": "", - "description": "ByteStash is a self-hosted web application designed to store, organise, and manage your code snippets efficiently. With support for creating, editing, and filtering snippets, ByteStash helps you keep track of your code in one secure place.", - "install_methods": [ - { - "type": "default", - "script": "ct/bytestash.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/caddy.json b/frontend/public/json/caddy.json deleted file mode 100644 index ecfbc7fb9..000000000 --- a/frontend/public/json/caddy.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "name": "Caddy", - "slug": "caddy", - "categories": [ - 21 - ], - "date_created": "2025-09-17", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://caddyserver.com/docs/", - "website": "https://caddyserver.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/caddy.webp", - "config_path": "/etc/caddy/Caddyfile", - "description": "Caddy is a powerful, extensible platform to serve your sites, services, and apps, written in Go.", - "install_methods": [ - { - "type": "default", - "script": "ct/caddy.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 6, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-caddy.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 3, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "xcaddy needs to be updated manually after a caddy update!", - "type": "warning" - }, - { - "text": "if you need an internal module run: `caddy add-package PACKAGENAME`", - "type": "info" - }, - { - "text": "if you need an external module run: `xcaddy build --with github.com/caddy-dns/cloudflare`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/calibre-web.json b/frontend/public/json/calibre-web.json deleted file mode 100644 index 65650c21c..000000000 --- a/frontend/public/json/calibre-web.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Calibre-Web", - "slug": "calibre-web", - "categories": [ - 4 - ], - "date_created": "2026-02-20", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8083, - "documentation": "https://github.com/janeczku/calibre-web/wiki", - "website": "https://github.com/janeczku/calibre-web", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/calibre-web.webp", - "config_path": "/opt/calibre-web/app.db", - "description": "Web app for browsing, reading and downloading eBooks from a Calibre database. Provides an attractive interface with mobile support, user management, and eBook conversion capabilities.", - "install_methods": [ - { - "type": "default", - "script": "ct/calibre-web.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin123" - }, - "notes": [ - { - "text": "Upload your Calibre library metadata.db during first setup wizard.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/casaos.json b/frontend/public/json/casaos.json deleted file mode 100644 index cf340e7ed..000000000 --- a/frontend/public/json/casaos.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "CasaOS", - "slug": "casaos", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://wiki.casaos.io/en/home", - "website": "https://www.casaos.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/casaos.webp", - "config_path": "", - "description": "CasaOS is a software that aims to make it easy for users to create a personal cloud system at home. It uses the Docker ecosystem to provide a simple, user-friendly experience for managing various applications and services.", - "install_methods": [ - { - "type": "default", - "script": "ct/casaos.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "If the LXC is created Privileged, the script will automatically set up USB passthrough.", - "type": "warning" - }, - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/changedetection.json b/frontend/public/json/changedetection.json deleted file mode 100644 index 0f7e4014e..000000000 --- a/frontend/public/json/changedetection.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Change Detection", - "slug": "changedetection", - "categories": [ - 24 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5000, - "documentation": "https://changedetection.io/tutorials", - "website": "https://changedetection.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/changedetection.webp", - "config_path": "/opt/changedetection/url-watches.json", - "description": "Change Detection is a service that allows you to monitor changes to web pages and receive notifications when changes occur. It can be used for a variety of purposes such as keeping track of online price changes, monitoring news websites for updates, or tracking changes to online forums.", - "install_methods": [ - { - "type": "default", - "script": "ct/changedetection.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 10, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/channels.json b/frontend/public/json/channels.json deleted file mode 100644 index eadbd2485..000000000 --- a/frontend/public/json/channels.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Channels DVR Server", - "slug": "channels", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": false, - "privileged": true, - "interface_port": 8089, - "documentation": "https://getchannels.com/docs/getting-started/quick-start-guide/", - "website": "https://getchannels.com/dvr-server/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/channels-dvr.webp", - "config_path": "", - "description": "Channels DVR Server runs on your computer or NAS device at home. There's no cloud to worry about. Your tv shows and movies will always be available.", - "install_methods": [ - { - "type": "default", - "script": "ct/channels.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/checkmate.json b/frontend/public/json/checkmate.json deleted file mode 100644 index d36149df7..000000000 --- a/frontend/public/json/checkmate.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Checkmate", - "slug": "checkmate", - "categories": [ - 9 - ], - "date_created": "2026-02-07", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5173, - "documentation": "https://github.com/bluewave-labs/Checkmate#readme", - "website": "https://github.com/bluewave-labs/Checkmate", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/checkmate.webp", - "config_path": "/opt/checkmate/server/.env", - "description": "Checkmate is an open source uptime and infrastructure monitoring application that helps you track the availability and performance of your services.", - "install_methods": [ - { - "type": "default", - "script": "ct/checkmate.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 10, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Create your admin account on first login via the web interface.", - "type": "info" - }, - { - "text": "Server API runs on port 52345, Client UI on port 5173.", - "type": "info" - }, - { - "text": "For PageSpeed monitoring, add a Google PageSpeed API key to the server .env file.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/checkmk.json b/frontend/public/json/checkmk.json deleted file mode 100644 index ec0a757fd..000000000 --- a/frontend/public/json/checkmk.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Checkmk", - "slug": "checkmk", - "categories": [ - 9 - ], - "date_created": "2024-12-19", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.checkmk.com/", - "website": "https://checkmk.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/checkmk.webp", - "config_path": "", - "description": "Checkmk is an IT monitoring software that tracks the health and performance of your systems, networks, servers, applications, and cloud services. It provides real-time insights, alerts for issues, and tools for troubleshooting, helping ensure smooth operations across your infrastructure.", - "install_methods": [ - { - "type": "default", - "script": "ct/checkmk.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Login Credentials : `cat ~/checkmk.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/clean-lxcs.json b/frontend/public/json/clean-lxcs.json deleted file mode 100644 index b1d5eaa27..000000000 --- a/frontend/public/json/clean-lxcs.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PVE LXC Cleaner", - "slug": "clean-lxcs", - "categories": [ - 1 - ], - "date_created": "2024-04-29", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linuxcontainers.webp", - "config_path": "", - "description": "This script provides options to delete logs and cache, and repopulate apt lists for Ubuntu and Debian systems.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/clean-lxcs.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - } - ] -} diff --git a/frontend/public/json/clean-orphaned-lvm.json b/frontend/public/json/clean-orphaned-lvm.json deleted file mode 100644 index da7f40599..000000000 --- a/frontend/public/json/clean-orphaned-lvm.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PVE Clean Orphaned LVM", - "slug": "clean-orphaned-lvm", - "categories": [ - 1 - ], - "date_created": "2025-01-29", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "This script helps Proxmox users identify and remove orphaned LVM volumes that are no longer associated with any VM or LXC container. It scans all LVM volumes, detects unused ones, and provides an interactive prompt to delete them safely. System-critical volumes like root, swap, and data are excluded to prevent accidental deletion.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/clean-orphaned-lvm.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - } - ] -} diff --git a/frontend/public/json/cleanuparr.json b/frontend/public/json/cleanuparr.json deleted file mode 100644 index 15ac368ba..000000000 --- a/frontend/public/json/cleanuparr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Cleanuparr", - "slug": "cleanuparr", - "categories": [ - 14 - ], - "date_created": "2025-07-25", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 11011, - "documentation": "https://cleanuparr.github.io/Cleanuparr/docs/", - "website": "https://github.com/Cleanuparr/Cleanuparr", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cleanuparr.webp", - "config_path": "/opt/cleanuparr/config", - "description": "Cleanuparr is a tool for automating the cleanup of unwanted or blocked files in Sonarr, Radarr, and supported download clients like qBittorrent, Transmission, and Deluge. It removes incomplete, blocked, or malicious downloads and can trigger replacement searches to ensure your media library stays complete and up-to-date.", - "install_methods": [ - { - "type": "default", - "script": "ct/cleanuparr.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/cloudflare-ddns.json b/frontend/public/json/cloudflare-ddns.json deleted file mode 100644 index 973393e6b..000000000 --- a/frontend/public/json/cloudflare-ddns.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Cloudflare-DDNS", - "slug": "cloudflare-ddns", - "categories": [ - 4 - ], - "date_created": "2025-05-22", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://github.com/favonia/cloudflare-ddns/blob/main/README.markdown", - "config_path": "/etc/systemd/system/cloudflare-ddns.service", - "website": "https://github.com/favonia/cloudflare-ddns", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cloudflare.webp", - "description": "A feature-rich and robust Cloudflare DDNS updater with a small footprint. The program will detect your machine’s public IP addresses and update DNS records using the Cloudflare API", - "install_methods": [ - { - "type": "default", - "script": "ct/cloudflare-ddns.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 2, - "os": "Debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To setup the updater you must have the Cloudflare Token and the domains, please read the Github documentation at `https://github.com/favonia/cloudflare-ddns?tab=readme-ov-file#-step-1-updating-the-compose-file` (only the expandable section)", - "type": "warning" - }, - { - "text": "To update the configuration edit `/etc/systemd/system/cloudflare-ddns.service`. After edit please restart with `systemctl restart cloudflare-ddns`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/cloudflared.json b/frontend/public/json/cloudflared.json deleted file mode 100644 index bdfee67e9..000000000 --- a/frontend/public/json/cloudflared.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Cloudflared", - "slug": "cloudflared", - "categories": [ - 4 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/", - "website": "https://www.cloudflare.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cloudflare.webp", - "config_path": "/usr/local/etc/cloudflared/config.yml", - "description": "Cloudflared is a command-line tool that allows you to securely access resources on the Cloudflare network, such as websites and APIs, from your local computer. It works by creating a secure tunnel between your computer and the Cloudflare network, allowing you to access resources as if they were on your local network.", - "install_methods": [ - { - "type": "default", - "script": "ct/cloudflared.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After install, run: cloudflared tunnel login && cloudflared tunnel create ", - "type": "info" - }, - { - "text": "Or create tunnel via Cloudflare Zero Trust Dashboard", - "type": "info" - } - ] -} diff --git a/frontend/public/json/cloudreve.json b/frontend/public/json/cloudreve.json deleted file mode 100644 index 031b980dd..000000000 --- a/frontend/public/json/cloudreve.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Cloudreve", - "slug": "cloudreve", - "categories": [ - 12 - ], - "date_created": "2025-07-17", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5212, - "documentation": "https://docs.cloudreve.org/en/", - "website": "https://cloudreve.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cloudreve.webp", - "config_path": "/opt/cloudreve/data/conf.ini", - "description": "Cloudreve is an open-source, community-driven cloud storage system that provides file sharing, synchronization, and management features. It supports a wide range of storage backends and integrates with various notification and logging platforms.", - "install_methods": [ - { - "type": "default", - "script": "ct/cloudreve.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 10, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After Installation: Register your user -> Login -> Dashboard -> Accept Primary URL.", - "type": "warn" - } - ] -} diff --git a/frontend/public/json/cockpit.json b/frontend/public/json/cockpit.json deleted file mode 100644 index b678a1a7f..000000000 --- a/frontend/public/json/cockpit.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Cockpit", - "slug": "cockpit", - "categories": [ - 10 - ], - "date_created": "2024-10-20", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9090, - "documentation": "https://cockpit-project.org/documentation.html", - "website": "https://cockpit-project.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cockpit.webp", - "config_path": "/etc/cockpit/cockpit.conf", - "description": "Cockpit is a web-based graphical interface for managing Linux servers. It allows users to perform tasks like configuring networks, managing storage, and monitoring system performance directly through a web browser. It integrates with existing system tools, making it suitable for both beginners and experienced admins.", - "install_methods": [ - { - "type": "default", - "script": "ct/cockpit.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Set a root password if using autologin. This will be the Cockpit password. To set root password run `sudo passwd root`", - "type": "info" - }, - { - "text": "If you plan on using 45Drives extension with NFS, you must setup LXC as privileged. Some features of 45Drives don't work on Debian 13, so Debian 12 must be used.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/coder-code-server.json b/frontend/public/json/coder-code-server.json deleted file mode 100644 index c1ac8d4ba..000000000 --- a/frontend/public/json/coder-code-server.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "name": "Coder Code Server", - "slug": "coder-code-server", - "categories": [ - 1, - 20, - 11 - ], - "date_created": "2024-05-02", - "type": "addon", - "updateable": false, - "privileged": false, - "interface_port": 8680, - "documentation": "https://coder.com/docs/code-server", - "website": "https://coder.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/coder.webp", - "config_path": "", - "description": "Coder Code Server is an open-source project that enables you to run Visual Studio Code (VS Code) on a remote machine, such as a desktop PC or virtual server. It serves a web-based version of VS Code that you can access from any browser via a URL, allowing remote development without needing an SSH connection. Unlike the official VS Code Server used by vscode.dev for Remote Tunnels, code-server is developed by Coder and operates independently, providing similar capabilities through a self-hosted solution.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/coder-code-server.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within an existing LXC Console", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/comfyui.json b/frontend/public/json/comfyui.json deleted file mode 100644 index ee1dc2f17..000000000 --- a/frontend/public/json/comfyui.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "ComfyUI", - "slug": "comfyui", - "categories": [ - 20 - ], - "date_created": "2025-10-26", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/opt", - "interface_port": 8188, - "documentation": "https://github.com/comfyanonymous/ComfyUI", - "website": "https://www.comfy.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/comfyui.webp", - "description": "ComfyUI is a node-based interface and inference engine for generative AI. Users can combine various AI models and operations through nodes to achieve highly customizable and controllable content generation.", - "install_methods": [ - { - "type": "default", - "script": "ct/comfyui.sh", - "resources": { - "cpu": 4, - "ram": 8192, - "hdd": 25, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Application takes long time to install. Please be patient!", - "type": "warning" - }, - { - "text": "Please check that you have installed the drivers for your GPU.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/commafeed.json b/frontend/public/json/commafeed.json deleted file mode 100644 index efc71ac63..000000000 --- a/frontend/public/json/commafeed.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "CommaFeed", - "slug": "commafeed", - "categories": [ - 12 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8082, - "documentation": "https://athou.github.io/commafeed/documentation/", - "website": "https://www.commafeed.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/commafeed.webp", - "config_path": "", - "description": "CommaFeed is a Google Reader inspired self-hosted RSS reader.", - "install_methods": [ - { - "type": "default", - "script": "ct/commafeed.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [] -} diff --git a/frontend/public/json/configarr.json b/frontend/public/json/configarr.json deleted file mode 100644 index 0743e5db7..000000000 --- a/frontend/public/json/configarr.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Configarr", - "slug": "configarr", - "categories": [ - 14 - ], - "date_created": "2025-05-21", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8989, - "documentation": "https://configarr.raydak.de/docs/intro", - "config_path": "/opt/configarr/config.yml", - "website": "https://configarr.raydak.de/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/configarr.webp", - "description": "Configarr is an open-source tool designed to simplify configuration and synchronization for Sonarr and Radarr (and other experimental). It integrates with TRaSH Guides to automate updates of custom formats, quality profiles, and other settings, while also supporting user-defined configurations.", - "install_methods": [ - { - "type": "default", - "script": "ct/configarr.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "change secrets file /opt/configarr/secrets.yml", - "type": "info" - } - ] -} diff --git a/frontend/public/json/convertx.json b/frontend/public/json/convertx.json deleted file mode 100644 index 2c6d6b920..000000000 --- a/frontend/public/json/convertx.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "ConvertX", - "slug": "convertx", - "categories": [ - 9 - ], - "date_created": "2025-06-26", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/opt/convertx/.env", - "interface_port": 3000, - "documentation": "https://github.com/C4illin/ConvertX", - "website": "https://github.com/C4illin/ConvertX", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/convertx.webp", - "description": "ConvertX is a self-hosted online file converter supporting over 1000 formats, including images, audio, video, documents, and more, powered by FFmpeg, GraphicsMagick, and other libraries.", - "install_methods": [ - { - "type": "default", - "script": "ct/convertx.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 20, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Complete setup via the web interface at http://:3000. Create and secure the admin account immediately.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/coolify.json b/frontend/public/json/coolify.json deleted file mode 100644 index 82fe06409..000000000 --- a/frontend/public/json/coolify.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "name": "Coolify", - "slug": "coolify", - "categories": [ - 3 - ], - "date_created": "2025-12-09", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://coolify.io/docs", - "website": "https://coolify.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/coolify.webp", - "config_path": "/data/coolify", - "description": "Coolify is an open-source & self-hostable alternative to Heroku, Netlify, and Vercel. It helps you manage your servers, applications, and databases on your own hardware with Docker. Deploy any application from Git repositories, Docker images, or use pre-built templates.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/coolify.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This is an addon script intended to be used on top of an existing Docker container.", - "type": "info" - }, - { - "text": "Execute within an existing LXC console (Debian / Ubuntu / Alpine supported)", - "type": "info" - }, - { - "text": "Initial setup will be done via the web interface on first access.", - "type": "info" - }, - { - "text": "Coolify has built-in auto-updates. You can configure update frequency in Settings.", - "type": "info" - }, - { - "text": "To update via CLI, run the addon script again and select Update, or use: bash <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/addon/coolify.sh)", - "type": "info" - } - ] -} diff --git a/frontend/public/json/copyparty.json b/frontend/public/json/copyparty.json deleted file mode 100644 index e554a5ef2..000000000 --- a/frontend/public/json/copyparty.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Copyparty", - "slug": "copyparty", - "categories": [ - 11 - ], - "date_created": "2025-08-18", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 3923, - "documentation": "https://github.com/9001/copyparty?tab=readme-ov-file#the-browser", - "website": "https://github.com/9001/copyparty", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/copyparty.webp", - "config_path": "/etc/copyparty.conf", - "description": "Copyparty is a lightweight, portable HTTP file server with a browser-based interface. It supports drag-and-drop uploads, downloads, deduplication, media playback, and advanced search, making it ideal for quickly sharing and managing files.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/copyparty.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell or in LXC", - "type": "info" - }, - { - "text": "Update with: update_copyparty", - "type": "info" - } - ] -} diff --git a/frontend/public/json/cosmos.json b/frontend/public/json/cosmos.json deleted file mode 100644 index a885be694..000000000 --- a/frontend/public/json/cosmos.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "Cosmos", - "slug": "cosmos", - "categories": [ - 2, - 3 - ], - "date_created": "2025-02-07", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://cosmos-cloud.io/doc/1%20index/", - "website": "https://cosmos-cloud.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cosmos-server.webp", - "config_path": "/etc/sysconfig/CosmosCloud", - "description": "Cosmos Cloud is a self-hosting platform that automates maintenance and security. It offers an app marketplace, reverse proxy management, container control, VPN integration, real-time monitoring, and disk management. Security features include SSO, anti-DDoS, and encryption. It simplifies self-hosting for all users.", - "install_methods": [ - { - "type": "default", - "script": "ct/cosmos.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "type": "info", - "text": "The file `/etc/sysconfig/CosmosCloud` is optional. If you need custom settings, you can create it yourself." - }, - { - "type": "warning", - "text": "Requires FUSE support for mergerfs functionality. FUSE is enabled by default during installation." - } - ] -} diff --git a/frontend/public/json/crafty-controller.json b/frontend/public/json/crafty-controller.json deleted file mode 100644 index 11b6ac2c0..000000000 --- a/frontend/public/json/crafty-controller.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Crafty Controller", - "slug": "crafty-controller", - "categories": [ - 24 - ], - "date_created": "2025-02-01", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8443, - "documentation": "https://docs.craftycontrol.com/", - "website": "https://craftycontrol.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/crafty-controller.webp", - "config_path": "/opt/crafty-controller/crafty/crafty-4/app/config/config.json", - "description": "Crafty Controller is a free and open-source Minecraft launcher and manager that allows users to start and administer Minecraft servers from a user-friendly interface. The interface is run as a self-hosted web server that is accessible to devices on the local network by default and can be port forwarded to provide external access outside of your local network. Crafty is designed to be easy to install and use, requiring only a bit of technical knowledge and a desire to learn to get started. Crafty Controller is still actively being developed by Arcadia Technology and we are continually making major improvements to the software.\n\nCrafty Controller is a feature rich panel that allows you to create and run servers, manage players, run commands, change server settings, view and edit server files, and make backups. With the help of Crafty Controller managing a large number of Minecraft servers on separate versions is easy and intuitive to do.", - "install_methods": [ - { - "type": "default", - "script": "ct/crafty-controller.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 16, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": null - }, - "notes": [ - { - "text": "Show password: `cat ~/crafty-controller.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/cron-update-lxcs.json b/frontend/public/json/cron-update-lxcs.json deleted file mode 100644 index 5e235d721..000000000 --- a/frontend/public/json/cron-update-lxcs.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "PVE Cron LXC Updater", - "slug": "cron-update-lxcs", - "categories": [ - 1 - ], - "date_created": "2024-04-29", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "This script will add/remove a crontab schedule that updates the operating system of all LXCs every Sunday at midnight.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/cron-update-lxcs.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - }, - { - "text": "To exclude LXCs from updating, edit the crontab using `crontab -e` and add CTID as shown in the example below:\n\n\n\n`0 0 * * 0 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin /bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/pve/update-lxcs-cron.sh)\" -s 103 111 >>/var/log/update-lxcs-cron.log 2>/dev/null`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/cronicle.json b/frontend/public/json/cronicle.json deleted file mode 100644 index 80df80119..000000000 --- a/frontend/public/json/cronicle.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Cronicle Primary", - "slug": "cronicle", - "categories": [ - 19 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3012, - "documentation": "https://github.com/jhuckaby/Cronicle/blob/master/README.md", - "website": "https://github.com/jhuckaby/Cronicle", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/chronicle.webp", - "config_path": "/opt/cronicle/conf/config.json", - "description": "Cronicle is a task scheduling and management software that allows users to schedule and run tasks automatically on multiple servers. It has a web-based user interface that provides a convenient and centralized way to manage tasks and view their execution status. With Cronicle, users can schedule tasks to run at specific times, or on demand, and assign tasks to specific worker servers. The software provides real-time statistics and a live log viewer to help users monitor the progress of tasks. Cronicle is designed for use in large-scale environments, making it a valuable tool for automation and management of complex and time-sensitive tasks.", - "install_methods": [ - { - "type": "default", - "script": "ct/cronicle.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [ - { - "text": "Primary and Worker Private Keys Must Match in the config file", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/cronmaster.json b/frontend/public/json/cronmaster.json deleted file mode 100644 index 93d28ed43..000000000 --- a/frontend/public/json/cronmaster.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "CR*NMASTER", - "slug": "cronmaster", - "categories": [ - 1 - ], - "date_created": "2026-02-22", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/fccview/cronmaster", - "website": "https://github.com/fccview/cronmaster", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cr-nmaster.webp", - "config_path": "/opt/cronmaster/.env", - "description": "Self-hosted cron job scheduler with web UI, live logs, auth and prebuilt binaries provided upstream.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/cronmaster.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Credentials are saved to: /root/cronmaster.creds", - "type": "info" - }, - { - "text": "Update with: update_cronmaster", - "type": "info" - } - ] -} diff --git a/frontend/public/json/cross-seed.json b/frontend/public/json/cross-seed.json deleted file mode 100644 index 4ef7e37bd..000000000 --- a/frontend/public/json/cross-seed.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "cross-seed", - "slug": "cross-seed", - "categories": [ - 14 - ], - "date_created": "2025-02-07", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 2468, - "documentation": "https://www.cross-seed.org/docs/category/basics", - "website": "https://www.cross-seed.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cross-seed.webp", - "config_path": "~/.cross-seed/config.js", - "description": "cross-seed is an app designed to help you download torrents that you can cross seed based on your existing torrents. It is designed to match conservatively to minimize manual intervention.", - "install_methods": [ - { - "type": "default", - "script": "ct/cross-seed.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After the installation cross-seed will fail to start with an empty configuration. To fix this, edit the config file to properly configure cross-seed, then restart by running `systemctl restart cross-seed`.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/crowdsec.json b/frontend/public/json/crowdsec.json deleted file mode 100644 index e3f27966b..000000000 --- a/frontend/public/json/crowdsec.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "CrowdSec", - "slug": "crowdsec", - "categories": [ - 6 - ], - "date_created": "2024-05-02", - "type": "addon", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": "https://doc.crowdsec.net/", - "website": "https://crowdsec.net/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/crowdsec.webp", - "config_path": "", - "description": "CrowdSec is a free and open-source intrusion prevention system (IPS) designed to provide network security against malicious traffic. It is a collaborative IPS that analyzes behaviors and responses to attacks by sharing signals across a community of users. CrowdSec leverages the collective intelligence of its users to detect and respond to security threats in real-time. With CrowdSec, network administrators can set up protection against a wide range of threats, including malicious traffic, bots, and denial-of-service (DoS) attacks. The software is designed to be easy to use and integrate with existing security systems, making it a valuable tool for enhancing the security of any network.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/crowdsec.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within an existing LXC Console. Debian only!", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/cryptpad.json b/frontend/public/json/cryptpad.json deleted file mode 100644 index ea19b74c4..000000000 --- a/frontend/public/json/cryptpad.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "CryptPad", - "slug": "cryptpad", - "categories": [ - 12 - ], - "date_created": "2025-03-11", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.cryptpad.org/", - "website": "https://cryptpad.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cryptpad.webp", - "config_path": "/opt/cryptpad/config/config.js", - "description": "CryptPad is a collaboration suite that is end-to-end encrypted and open-source. It is designed to facilitate collaboration by synchronizing changes to documents in real time. Since all the user data is encrypted, in the event of a breach, attackers have no way of accessing the stored content", - "install_methods": [ - { - "type": "default", - "script": "ct/cryptpad.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After installation finishes, `systemctl status cryptpad.service` to get token URL which you can use to create admin account", - "type": "info" - } - ] -} diff --git a/frontend/public/json/daemonsync.json b/frontend/public/json/daemonsync.json deleted file mode 100644 index 188a40f1d..000000000 --- a/frontend/public/json/daemonsync.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Daemon Sync Server", - "slug": "daemonsync", - "categories": [ - 19 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8084, - "documentation": null, - "website": "https://daemonsync.me/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/daemon-sync.webp", - "config_path": "", - "description": "Sync files from app to server, share photos & videos, back up your data and stay secure inside local network.", - "install_methods": [ - { - "type": "default", - "script": "ct/daemonsync.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/databasus.json b/frontend/public/json/databasus.json deleted file mode 100644 index f0ff2c9a6..000000000 --- a/frontend/public/json/databasus.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Databasus", - "slug": "databasus", - "categories": [ - 7 - ], - "date_created": "2026-02-17", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/databasus/databasus", - "website": "https://github.com/databasus/databasus", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/databasus.webp", - "config_path": "/opt/databasus/.env", - "description": "Free, open source and self-hosted solution for automated PostgreSQL backups. With multiple storage options, notifications, scheduling, and a beautiful web interface for managing database backups across multiple PostgreSQL instances.", - "install_methods": [ - { - "type": "default", - "script": "ct/databasus.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin@localhost", - "password": "See /root/databasus.creds" - }, - "notes": [ - { - "text": "Supports PostgreSQL versions 12-18 with cloud and self-hosted instances", - "type": "info" - }, - { - "text": "Features: Scheduled backups, multiple storage providers, notifications, encryption", - "type": "info" - } - ] -} diff --git a/frontend/public/json/dawarich.json b/frontend/public/json/dawarich.json deleted file mode 100644 index 274ce70bc..000000000 --- a/frontend/public/json/dawarich.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Dawarich", - "slug": "dawarich", - "categories": [ - 9 - ], - "date_created": "2026-01-23", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://dawarich.app/docs/intro", - "website": "https://dawarich.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/dawarich.webp", - "config_path": "/opt/dawarich/.env", - "description": "Dawarich is a self-hosted alternative to Google Timeline (Google Maps Location History). It allows you to import your location history from Google Maps Timeline and Owntracks, view it on a map, and analyze your location data with statistics and visualizations.", - "install_methods": [ - { - "type": "default", - "script": "ct/dawarich.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 15, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "demo@dawarich.app", - "password": "password" - }, - "notes": [ - { - "text": "Default credentials: demo@dawarich.app / password - Change after first login!", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/ddclient.json b/frontend/public/json/ddclient.json deleted file mode 100644 index 402a82bbb..000000000 --- a/frontend/public/json/ddclient.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "ddclient", - "slug": "ddclient", - "categories": [ - 4 - ], - "date_created": "2026-03-05", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://ddclient.net/", - "website": "https://ddclient.net/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ddclient.webp", - "config_path": "/etc/ddclient.conf", - "description": "ddclient is a Perl client used to update dynamic DNS entries for accounts on a wide range of dynamic DNS service providers. It supports multiple protocols and providers, allowing automatic IP address updates for your domain names.", - "install_methods": [ - { - "type": "default", - "script": "ct/ddclient.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "type": "info", - "text": "After installation, edit `/etc/ddclient.conf` with your dynamic DNS provider credentials" - }, - { - "type": "info", - "text": "Sample configuration is created for Namecheap but can be modified for other providers" - } - ] -} diff --git a/frontend/public/json/debian-13-vm.json b/frontend/public/json/debian-13-vm.json deleted file mode 100644 index a294fbcbe..000000000 --- a/frontend/public/json/debian-13-vm.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Debian 13", - "slug": "debian-13-vm", - "categories": [ - 2 - ], - "date_created": "2025-08-19", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": "https://www.debian.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/debian.webp", - "config_path": "", - "description": "Debian 13 (Trixie) Linux is a distribution that emphasizes free software. It supports many hardware platforms", - "install_methods": [ - { - "type": "default", - "script": "vm/debian-13-vm.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "VM has no root password set. To login type in user `root` and just press enter", - "type": "info" - }, - { - "text": "For additional Debian commands and tips after installation, checkout: `https://github.com/community-scripts/ProxmoxVE/discussions/836`", - "type": "info" - }, - { - "text": "If you use Cloud-init, checkout cloud-init discussion: `https://github.com/community-scripts/ProxmoxVE/discussions/272`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/debian-vm.json b/frontend/public/json/debian-vm.json deleted file mode 100644 index ccbafec6a..000000000 --- a/frontend/public/json/debian-vm.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Debian 12", - "slug": "debian-vm", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": "https://www.debian.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/debian.webp", - "config_path": "", - "description": "Debian Linux is a distribution that emphasizes free software. It supports many hardware platforms", - "install_methods": [ - { - "type": "default", - "script": "vm/debian-vm.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "VM has no root password set. To login type in user `root` and just press enter", - "type": "info" - }, - { - "text": "For additional Debian commands and tips after installation, checkout: `https://github.com/community-scripts/ProxmoxVE/discussions/836`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/debian.json b/frontend/public/json/debian.json deleted file mode 100644 index a256869a0..000000000 --- a/frontend/public/json/debian.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Debian", - "slug": "debian", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": "https://www.debian.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/debian.webp", - "config_path": "", - "description": "Debian Linux is a distribution that emphasizes free software. It supports many hardware platforms.", - "install_methods": [ - { - "type": "default", - "script": "ct/debian.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/deconz.json b/frontend/public/json/deconz.json deleted file mode 100644 index c41957af2..000000000 --- a/frontend/public/json/deconz.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "deCONZ", - "slug": "deconz", - "categories": [ - 17 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 80, - "documentation": "https://github.com/dresden-elektronik/deconz-rest-plugin/wiki", - "website": "https://www.phoscon.de/en/conbee2/software#deconz", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/phoscon.webp", - "config_path": "", - "description": "deCONZ is a software for managing and controlling Zigbee-based smart home devices. It allows for setting up, configuring and visualizing the status of connected devices, as well as for triggering actions and automations. It works as a bridge between the Zigbee network and other home automation systems and can be used as a standalone solution or integrated into existing setups.", - "install_methods": [ - { - "type": "default", - "script": "ct/deconz.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/deluge.json b/frontend/public/json/deluge.json deleted file mode 100644 index c8887c87f..000000000 --- a/frontend/public/json/deluge.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Deluge", - "slug": "deluge", - "categories": [ - 11 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8112, - "documentation": "https://www.deluge-torrent.org/userguide/", - "website": "https://www.deluge-torrent.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/deluge.webp", - "config_path": "~/.config/deluge", - "description": "Deluge is a free, open-source, lightweight BitTorrent client. It supports various platforms including Windows, Linux, and macOS, and offers features such as peer exchange, DHT, and magnet links.", - "install_methods": [ - { - "type": "default", - "script": "ct/deluge.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": "deluge" - }, - "notes": [] -} diff --git a/frontend/public/json/discopanel.json b/frontend/public/json/discopanel.json deleted file mode 100644 index 2265bd3e1..000000000 --- a/frontend/public/json/discopanel.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "DiscoPanel", - "slug": "discopanel", - "categories": [ - 24 - ], - "date_created": "2025-12-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://discopanel.app/docs/", - "config_path": "", - "website": "https://discopanel.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/discopanel.webp", - "description": "The Minecraft Server Manager That *Actually* Works\nBuilt by someone who was done with bloated panels, endless menus, and tools that break the moment you need them most.\nSpin up servers in minutes, configure your proxy without headaches, and link your own DNS name effortlessly.\nFast setup, clean controls, zero nonsense—just a manager that gets out of your way and lets you play.", - "install_methods": [ - { - "type": "default", - "script": "ct/discopanel.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 15, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/dispatcharr.json b/frontend/public/json/dispatcharr.json deleted file mode 100644 index b28433db6..000000000 --- a/frontend/public/json/dispatcharr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Dispatcharr", - "slug": "dispatcharr", - "categories": [ - 14 - ], - "date_created": "2025-10-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9191, - "documentation": "https://dispatcharr.github.io/Dispatcharr-Docs/", - "website": "https://github.com/Dispatcharr/Dispatcharr", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/dispatcharr.webp", - "config_path": "/opt/dispatcharr/.env", - "description": "Dispatcharr is an open-source powerhouse for managing IPTV streams and EPG data with elegance and control. Born from necessity and built with passion, it started as a personal project by OkinawaBoss and evolved with contributions from legends like dekzter, SergeantPanda and Bucatini.", - "install_methods": [ - { - "type": "default", - "script": "ct/dispatcharr.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/docker-vm.json b/frontend/public/json/docker-vm.json deleted file mode 100644 index a4f786a58..000000000 --- a/frontend/public/json/docker-vm.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "Docker", - "slug": "docker-vm", - "categories": [ - 2, - 3 - ], - "date_created": "2025-01-20", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": "https://www.docker.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/docker.webp", - "config_path": "", - "description": "Docker is an open-source project for automating the deployment of applications as portable, self-sufficient containers. This Template includes Docker Engine and Docker Compose Plugin.", - "install_methods": [ - { - "type": "default", - "script": "vm/docker-vm.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 10, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": "root", - "password": "docker" - }, - "notes": [ - { - "text": "After the script completes, click on the VM, then on the Summary or Console tab to find the VM IP.", - "type": "info" - }, - { - "text": "This Script works on amd64 and arm64 Architecture.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/docker.json b/frontend/public/json/docker.json deleted file mode 100644 index 8dbcf0002..000000000 --- a/frontend/public/json/docker.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "name": "Docker", - "slug": "docker", - "categories": [ - 3 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": "https://www.docker.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/docker.webp", - "config_path": "", - "description": "Docker is an open-source project for automating the deployment of applications as portable, self-sufficient containers.", - "install_methods": [ - { - "type": "default", - "script": "ct/docker.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-docker.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 2, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "If the LXC is created Privileged, the script will automatically set up USB passthrough.", - "type": "warning" - }, - { - "text": "Run Compose V2 by replacing the hyphen (-) with a space, using `docker compose`, instead of `docker-compose`.", - "type": "warning" - }, - { - "text": "Options to Install Portainer and/or Docker Compose V2", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/dockge.json b/frontend/public/json/dockge.json deleted file mode 100644 index a0d60f184..000000000 --- a/frontend/public/json/dockge.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Dockge", - "slug": "dockge", - "categories": [ - 3 - ], - "date_created": "2024-05-02", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 5001, - "documentation": null, - "website": "https://github.com/louislam/dockge", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/dockge.webp", - "config_path": "/opt/dockge/compose.yaml", - "description": "Dockge is a fancy, easy-to-use and reactive self-hosted docker compose.yaml stack-oriented manager.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/dockge.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This is an addon script intended to be used on top of an existing Docker container.", - "type": "info" - }, - { - "text": "Execute within an existing LXC console (Debian / Ubuntu / Alpine supported)", - "type": "info" - }, - { - "text": "To update, run the addon script again and select Update, or use: bash <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/addon/dockge.sh)", - "type": "info" - } - ] -} diff --git a/frontend/public/json/docmost.json b/frontend/public/json/docmost.json deleted file mode 100644 index 485c8b834..000000000 --- a/frontend/public/json/docmost.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Docmost", - "slug": "docmost", - "categories": [ - 12 - ], - "date_created": "2025-02-18", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docmost.com/docs/installation", - "website": "https://docmost.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/docmost.webp", - "config_path": "/opt/docmost/.env", - "description": "Open-source collaborative wiki and documentation software. Create, collaborate, and share knowledge seamlessly with Docmost. Ideal for managing your wiki, knowledge-base, documentation and a lot more.", - "install_methods": [ - { - "type": "default", - "script": "ct/docmost.sh", - "resources": { - "cpu": 3, - "ram": 4096, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Use `cat ~/docmost.creds` to see database credentials.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/dokploy.json b/frontend/public/json/dokploy.json deleted file mode 100644 index a07a2ec19..000000000 --- a/frontend/public/json/dokploy.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "name": "Dokploy", - "slug": "dokploy", - "categories": [ - 3 - ], - "date_created": "2025-12-09", - "type": "addon", - "updateable": true, - "privileged": true, - "interface_port": 3000, - "documentation": "https://docs.dokploy.com/", - "website": "https://dokploy.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/png/dokploy.png", - "config_path": "/etc/dokploy", - "description": "Dokploy is a free, self-hostable Platform as a Service (PaaS) that simplifies the deployment and management of applications and databases. Built with Docker and Traefik, it offers features like automatic SSL, Docker Compose support, database backups, and a real-time monitoring dashboard.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/dokploy.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This is an addon script intended to be used on top of an existing Docker container.", - "type": "info" - }, - { - "text": "Execute within an existing LXC console (Debian / Ubuntu / Alpine supported)", - "type": "info" - }, - { - "text": "Initial setup will be done via the web interface on first access.", - "type": "info" - }, - { - "text": "Dokploy has built-in auto-updates via the web interface.", - "type": "info" - }, - { - "text": "To update via CLI, run the addon script again and select Update, or use: bash <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/addon/dokploy.sh)", - "type": "info" - } - ] -} diff --git a/frontend/public/json/dolibarr.json b/frontend/public/json/dolibarr.json deleted file mode 100644 index c4b543fba..000000000 --- a/frontend/public/json/dolibarr.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Dolibarr", - "slug": "dolibarr", - "categories": [ - 25 - ], - "date_created": "2025-02-20", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://wiki.dolibarr.org/index.php?title=Home", - "website": "https://www.dolibarr.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/dolibarr.webp", - "config_path": "/usr/share/dolibarr/htdocs/conf/conf.php.example", - "description": "Dolibarr ERP CRM is a modern software package to manage your company or foundation's activity (contacts, suppliers, invoices, orders, stocks, agenda, accounting, ...). it's an open source Web application (written in PHP) designed for businesses of any sizes, foundations and freelancers.", - "install_methods": [ - { - "type": "default", - "script": "ct/dolibarr.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 6, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Database credentials: `cat ~/dolibarr.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/domain-locker.json b/frontend/public/json/domain-locker.json deleted file mode 100644 index 83eebc913..000000000 --- a/frontend/public/json/domain-locker.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Domain Locker", - "slug": "domain-locker", - "categories": [ - 9 - ], - "date_created": "2025-11-17", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://domain-locker.com/about", - "config_path": "/opt/domain-locker.env", - "website": "https://github.com/Lissy93/domain-locker", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/domain-locker.webp", - "description": "The all-in-one tool, for keeping track of your domain name portfolio. Got domain names? Get Domain Locker! ", - "install_methods": [ - { - "type": "default", - "script": "ct/domain-locker.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Show DB credentials: `cat ~/Domain-Locker.creds`", - "type": "info" - }, - { - "text": "Domain-locker takes quite some time to build and a lot of ressources, RAM and Cores can be lowered after install.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/domain-monitor.json b/frontend/public/json/domain-monitor.json deleted file mode 100644 index b519bbf20..000000000 --- a/frontend/public/json/domain-monitor.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Domain Monitor", - "slug": "domain-monitor", - "categories": [ - 9 - ], - "date_created": "2025-11-11", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/Hosteroid/domain-monitor/blob/main/README.md", - "config_path": "/opt/domain-monitor/.env", - "website": "https://github.com/Hosteroid/domain-monitor", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/png/domain-monitor.png", - "description": "A self-hosted PHP domain expiration monitoring tool that tracks domain expiry dates, RDAP/WHOIS data, and SSL certificate validity. Supports alerts, multi-user setup, and cron automation. Built for developers, hosting providers, and IT admins who want full control without third-party services.", - "install_methods": [ - { - "type": "default", - "script": "ct/domain-monitor.sh", - "resources": { - "cpu": 2, - "ram": 512, - "hdd": 2, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/donetick.json b/frontend/public/json/donetick.json deleted file mode 100644 index b6e9107bd..000000000 --- a/frontend/public/json/donetick.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Donetick", - "slug": "donetick", - "categories": [ - 19 - ], - "date_created": "2025-11-03", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 2021, - "documentation": "https://docs.donetick.com/getting-started/", - "config_path": "/opt/donetick/config/selfhosted.yaml", - "website": "https://donetick.com", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/donetick.webp", - "description": "Donetick an open-source, user-friendly app for managing tasks and chores, featuring customizable options to help you and others stay organized", - "install_methods": [ - { - "type": "default", - "script": "ct/donetick.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/dotnetaspwebapi.json b/frontend/public/json/dotnetaspwebapi.json deleted file mode 100644 index e6dc869da..000000000 --- a/frontend/public/json/dotnetaspwebapi.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Dotnet ASP Web API", - "slug": "dotnetaspwebapi", - "categories": [ - 20 - ], - "date_created": "2025-01-15", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 80, - "documentation": "https://learn.microsoft.com/en-us/aspnet/core/host-and-deploy/linux-nginx?view=aspnetcore-9.0&tabs=linux-ubuntu", - "website": "https://learn.microsoft.com/en-us/aspnet/core/host-and-deploy/linux-nginx?view=aspnetcore-9.0&tabs=linux-ubuntu", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/asp-net-core.webp", - "config_path": "", - "description": "Automatically setup a ASP.NET server up, as well as a FTP server so you can publish to this container from Visual Studio.", - "install_methods": [ - { - "type": "default", - "script": "ct/dotnetaspwebapi.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 8, - "os": "Ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "FTP server credentials: `cat ~/ftp.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/drawio.json b/frontend/public/json/drawio.json deleted file mode 100644 index c74e1ffbe..000000000 --- a/frontend/public/json/drawio.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Draw.IO", - "slug": "drawio", - "categories": [ - 12 - ], - "date_created": "2026-02-11", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://www.drawio.com/doc/", - "website": "https://www.drawio.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/draw-io.webp", - "config_path": "", - "description": "draw.io is a configurable diagramming and whiteboarding application, jointly owned and developed by draw.io Ltd (previously named JGraph) and draw.io AG.", - "install_methods": [ - { - "type": "default", - "script": "ct/drawio.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/duplicati.json b/frontend/public/json/duplicati.json deleted file mode 100644 index b87a286c8..000000000 --- a/frontend/public/json/duplicati.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Duplicati", - "slug": "duplicati", - "categories": [ - 7 - ], - "date_created": "2025-02-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8200, - "documentation": "https://docs.duplicati.com/", - "website": "https://duplicati.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/duplicati.webp", - "config_path": "", - "description": "Duplicati is a free, open-source backup solution that offers zero-trust, fully encrypted backups for your data.", - "install_methods": [ - { - "type": "default", - "script": "ct/duplicati.sh", - "resources": { - "cpu": 1, - "ram": 1048, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Admin password and database encryption key: `cat ~/duplicati.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/ebusd.json b/frontend/public/json/ebusd.json deleted file mode 100644 index ccd9549fd..000000000 --- a/frontend/public/json/ebusd.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "ebusd", - "slug": "ebusd", - "categories": [ - 16 - ], - "date_created": "2026-02-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://github.com/john30/ebusd/wiki", - "website": "https://github.com/john30/ebusd", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ebusd.webp", - "config_path": "/etc/default/ebusd", - "description": "ebusd is a daemon for handling communication with eBUS devices connected to a 2-wire `energy bus` used by numerous heating systems.", - "install_methods": [ - { - "type": "default", - "script": "ct/ebusd.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "root", - "password": null - }, - "notes": [ - { - "text": "For required post installation actions, checkout: `https://github.com/community-scripts/ProxmoxVE/discussions/11352`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/elementsynapse.json b/frontend/public/json/elementsynapse.json deleted file mode 100644 index e1a304be8..000000000 --- a/frontend/public/json/elementsynapse.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Element Synapse", - "slug": "elementsynapse", - "categories": [ - 4 - ], - "date_created": "2025-02-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8008, - "documentation": "https://element-hq.github.io/synapse/latest/welcome_and_overview.html", - "website": "https://element.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/element.webp", - "config_path": "/etc/matrix-synapse/homeserver.yaml", - "description": "Synapse is an open source Matrix homeserver implementation, written and maintained by Element. Matrix is the open standard for secure and interoperable real time communications. You can directly run and manage the source code in this repository, available under an AGPL license. There is no support provided from Element unless you have a subscription.", - "install_methods": [ - { - "type": "default", - "script": "ct/elementsynapse.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "type": "info", - "text": "Type `cat ~/matrix.creds` to see admin username/password." - }, - { - "type": "info", - "text": "Synapse-Admin is running on port 5173" - }, - { - "type": "info", - "text": "For bridges Installation methods (WhatsApp, Signal, Discord, etc.), see: ´https://docs.mau.fi/bridges/go/setup.html´" - } - ] -} diff --git a/frontend/public/json/emby.json b/frontend/public/json/emby.json deleted file mode 100644 index 229f600f6..000000000 --- a/frontend/public/json/emby.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Emby Media Server", - "slug": "emby", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8096, - "documentation": "https://emby.media/support/articles/Home.html", - "website": "https://emby.media/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/emby.webp", - "config_path": "/var/lib/emby/config/system.xml", - "description": "Emby brings together your personal videos, music, photos, and live television.", - "install_methods": [ - { - "type": "default", - "script": "ct/emby.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "With Privileged/Unprivileged Hardware Acceleration Support", - "type": "info" - } - ] -} diff --git a/frontend/public/json/emqx.json b/frontend/public/json/emqx.json deleted file mode 100644 index 2bf776401..000000000 --- a/frontend/public/json/emqx.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "EMQX", - "slug": "emqx", - "categories": [ - 18 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 18083, - "documentation": "https://docs.emqx.com/en/emqx/latest/", - "website": "https://www.emqx.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/emqx.webp", - "config_path": "/etc/emqx", - "description": "EMQX is an open-source MQTT broker that features a high-performance, real-time message processing engine. It is designed to handle large-scale IoT deployments, providing fast and reliable message delivery for connected devices. EMQX is known for its scalability, reliability, and low latency, making it a popular choice for IoT and M2M applications. It also offers a wide range of features and plugins for enhanced security, monitoring, and management.", - "install_methods": [ - { - "type": "default", - "script": "ct/emqx.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 6, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "public" - }, - "notes": [ - { - "text": "Setup-Steps: Access Control ➡ Authentication ➡ Create ➡ Next ➡ Next ➡ Create ➡ Users ➡ Add ➡ Username / Password (to authenicate with MQTT) ➡ Save. You're now ready to enjoy a high-performance MQTT Broker.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/endurain.json b/frontend/public/json/endurain.json deleted file mode 100644 index 7873b4c35..000000000 --- a/frontend/public/json/endurain.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Endurain", - "slug": "endurain", - "categories": [ - 24 - ], - "date_created": "2025-12-05", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://docs.endurain.com/", - "website": "https://github.com/joaovitoriasilva/endurain", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/endurain.webp", - "config_path": "/opt/endurain/.env", - "description": "Endurain is a self-hosted fitness tracking service designed to give users full control over their data and hosting environment. It's similar to Strava but focused on privacy and customization", - "install_methods": [ - { - "type": "default", - "script": "ct/endurain.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [ - { - "text": "When using a reverse proxy, edit `/opt/endurain/frontend/app/dist/env.js`.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/ersatztv.json b/frontend/public/json/ersatztv.json deleted file mode 100644 index 2ebf7e8a0..000000000 --- a/frontend/public/json/ersatztv.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "ErsatzTV", - "slug": "ersatztv", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8409, - "documentation": "https://ersatztv.org/docs/intro", - "website": "https://ersatztv.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ersatztv.webp", - "config_path": "", - "description": "ErsatzTV is software for configuring and streaming custom live channels using your media library.", - "install_methods": [ - { - "type": "default", - "script": "ct/ersatztv.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/esphome.json b/frontend/public/json/esphome.json deleted file mode 100644 index 0d51bc1c0..000000000 --- a/frontend/public/json/esphome.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "ESPHome", - "slug": "esphome", - "categories": [ - 16 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6052, - "documentation": "https://esphome.io/components/", - "website": "https://esphome.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/esphome.webp", - "config_path": "/root/config/", - "description": "ESPHome is a platform for controlling ESP8266/ESP32-based devices using configuration files and integrating them with Home Automation systems. It provides a simple and flexible way to set up and manage the functionality of these devices, including defining and automating actions, monitoring sensors, and connecting to networks and other services. ESPHome is designed to be user-friendly and easy to use, and supports a wide range of features and integrations, making it a popular choice for home automation projects and IoT applications.", - "install_methods": [ - { - "type": "default", - "script": "ct/esphome.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/evcc.json b/frontend/public/json/evcc.json deleted file mode 100644 index e3d20f23c..000000000 --- a/frontend/public/json/evcc.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "evcc", - "slug": "evcc", - "categories": [ - 16 - ], - "date_created": "2024-10-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7070, - "documentation": "https://evcc.io/#devices", - "website": "https://evcc.io/en/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/evcc.webp", - "config_path": "", - "description": "EVCC is an open-source tool that manages EV charging, prioritizing solar energy use to reduce costs and optimize charging times. It supports various EVs and chargers, adjusting power automatically based on real-time data.", - "install_methods": [ - { - "type": "default", - "script": "ct/evcc.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To configure evcc, type `cd /etc` followed by `evcc configure` in the evcc LXC shell", - "type": "info" - } - ] -} diff --git a/frontend/public/json/excalidraw.json b/frontend/public/json/excalidraw.json deleted file mode 100644 index 6620a5bc2..000000000 --- a/frontend/public/json/excalidraw.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Excalidraw", - "slug": "excalidraw", - "categories": [ - 12 - ], - "date_created": "2025-02-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.excalidraw.com/docs", - "website": "https://excalidraw.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/excalidraw.webp", - "config_path": "", - "description": "An open source virtual hand-drawn style whiteboard. Collaborative and end-to-end encrypted.", - "install_methods": [ - { - "type": "default", - "script": "ct/excalidraw.sh", - "resources": { - "cpu": 2, - "ram": 3072, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/execute.json b/frontend/public/json/execute.json deleted file mode 100644 index 56e4d112a..000000000 --- a/frontend/public/json/execute.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "PVE LXC Execute Command", - "slug": "execute", - "categories": [ - 1 - ], - "date_created": "2025-09-18", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "This script allows administrators to execute a custom command inside one or multiple LXC containers on a Proxmox VE node. Containers can be selectively excluded via an interactive checklist. If a container is stopped, the script will automatically start it, run the command, and then shut it down again. Only Debian and Ubuntu based containers are supported.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/execute.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell.", - "type": "info" - }, - { - "text": "Non-Debian/Ubuntu containers will be skipped automatically.", - "type": "info" - }, - { - "text": "Stopped containers will be started temporarily to run the command, then shut down again.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/fhem.json b/frontend/public/json/fhem.json deleted file mode 100644 index 247a29e1e..000000000 --- a/frontend/public/json/fhem.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "FHEM", - "slug": "fhem", - "categories": [ - 16 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8083, - "documentation": "https://fhem.de/#Documentation", - "website": "https://fhem.de/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/fhem.webp", - "config_path": "", - "description": "FHEM stands for \"Freundliche Hausautomation und Energie-Messung,\" which translates to \"Friendly Home Automation and Energy Measurement\" in English. The software can interface with a wide range of devices, including lighting systems, thermostats, weather stations, and media devices, among others.", - "install_methods": [ - { - "type": "default", - "script": "ct/fhem.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "If the LXC is created Privileged, the script will automatically set up USB passthrough.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/filebrowser-quantum.json b/frontend/public/json/filebrowser-quantum.json deleted file mode 100644 index 3da00bb0e..000000000 --- a/frontend/public/json/filebrowser-quantum.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "name": "FileBrowser Quantum", - "slug": "filebrowser-quantum", - "categories": [ - 1, - 11 - ], - "date_created": "2025-06-18", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://github.com/gtsteffaniak/filebrowser/wiki/Getting-Started", - "website": "https://github.com/gtsteffaniak/filebrowser", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/filebrowser-quantum.webp", - "config_path": "/usr/local/community-scripts/fq-config.yaml", - "description": "FileBrowser Quantum provides an easy way to access and manage your files from the web. It has has a web page interface that allows you to create secure shared links, users with their own specific permissions and settings, and offers a great viewing experience for many file types. This version is called Quantum because it packs tons of advanced features into a tiny easy to run file. Unlike the majority of alternative options, FileBrowser Quantum is simple to install and easy to configure.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/filebrowser-quantum.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - }, - { - "type": "alpine", - "script": "tools/addon/filebrowser-quantum.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": "admin", - "password": "helper-scripts.com" - }, - "notes": [] -} diff --git a/frontend/public/json/filebrowser.json b/frontend/public/json/filebrowser.json deleted file mode 100644 index d8953fa25..000000000 --- a/frontend/public/json/filebrowser.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "File Browser", - "slug": "filebrowser", - "categories": [ - 1, - 11 - ], - "date_created": "2024-05-02", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": null, - "website": "https://filebrowser.org/index.html#features", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/file-browser.webp", - "config_path": "", - "description": "File Browser offers a user-friendly web interface for managing files within a designated directory. It allows you to perform various actions such as uploading, deleting, previewing, renaming, and editing files.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/filebrowser.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - }, - { - "type": "alpine", - "script": "tools/addon/filebrowser.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": "admin", - "password": "helper-scripts.com" - }, - "notes": [ - { - "text": "Execute within an existing LXC Console", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/fileflows.json b/frontend/public/json/fileflows.json deleted file mode 100644 index e618de8b9..000000000 --- a/frontend/public/json/fileflows.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "FileFlows", - "slug": "fileflows", - "categories": [ - 13 - ], - "date_created": "2025-03-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 19200, - "documentation": "https://fileflows.com/docs", - "website": "https://fileflows.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/fileflows.webp", - "config_path": "/opt/fileflows/Data/server.config", - "description": "FileFlows is a powerful, open-source tool for automating media file processing workflows, including encoding, decoding, and media management. It offers an intuitive GUI and extensive plugin support, making it ideal for tasks like video transcoding, organizing, and managing large media libraries.", - "install_methods": [ - { - "type": "default", - "script": "ct/fileflows.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "With Privileged/Unprivileged Hardware Acceleration Support", - "type": "info" - } - ] -} diff --git a/frontend/public/json/firefly.json b/frontend/public/json/firefly.json deleted file mode 100644 index 70bb49b98..000000000 --- a/frontend/public/json/firefly.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Firefly III", - "slug": "firefly", - "categories": [ - 23 - ], - "date_created": "2025-01-01", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.firefly-iii.org/", - "website": "https://firefly-iii.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/firefly-iii.webp", - "config_path": "/opt/firefly/.env", - "description": "Firefly III is a free, self-hosted tool for managing your finances. Track expenses, plan budgets, and get detailed reports.", - "install_methods": [ - { - "type": "default", - "script": "ct/firefly.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Data Importer is at `http:///dataimporter/`", - "type": "info" - }, - { - "text": "Database credentials: `cat ~/firefly.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/fladder.json b/frontend/public/json/fladder.json deleted file mode 100644 index 2bf95eca6..000000000 --- a/frontend/public/json/fladder.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Fladder", - "slug": "fladder", - "categories": [ - 13 - ], - "date_created": "2026-01-13", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/DonutWare/Fladder/blob/develop/INSTALL.md#ubuntudebian", - "website": "https://github.com/DonutWare/Fladder", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/fladder.webp", - "config_path": "/opt/fladder/assets/config/config.json", - "description": "Fladder is a simple Jellyfin frontend built on top of Flutter. It provides a modern interface to stream and sync content locally, manage libraries, support multiple profiles, and offers direct, transcode and offline playback with media segments skipping.", - "install_methods": [ - { - "type": "default", - "script": "ct/fladder.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/flaresolverr.json b/frontend/public/json/flaresolverr.json deleted file mode 100644 index 74f21872c..000000000 --- a/frontend/public/json/flaresolverr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "FlareSolverr", - "slug": "flaresolverr", - "categories": [ - 14 - ], - "date_created": "2024-06-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8191, - "documentation": "https://github.com/FlareSolverr/FlareSolverr/blob/master/README.md", - "website": "https://github.com/FlareSolverr/FlareSolverr", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/flaresolverr.webp", - "config_path": "", - "description": "FlareSolverr is a proxy server to bypass Cloudflare and DDoS-GUARD protection.", - "install_methods": [ - { - "type": "default", - "script": "ct/flaresolverr.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/flatnotes.json b/frontend/public/json/flatnotes.json deleted file mode 100644 index 733e80026..000000000 --- a/frontend/public/json/flatnotes.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Flatnotes", - "slug": "flatnotes", - "categories": [ - 12 - ], - "date_created": "2026-01-16", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://github.com/dullage/flatnotes/wiki", - "website": "https://github.com/dullage/flatnotes", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/flatnotes.webp", - "config_path": "/opt/flatnotes/.env", - "description": "A self-hosted, database-less note-taking web app that utilises a flat folder of markdown files for storage.", - "install_methods": [ - { - "type": "default", - "script": "ct/flatnotes.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/flowiseai.json b/frontend/public/json/flowiseai.json deleted file mode 100644 index 1fd45e03d..000000000 --- a/frontend/public/json/flowiseai.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "FlowiseAI", - "slug": "flowiseai", - "categories": [ - 20 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.flowiseai.com/", - "website": "https://flowiseai.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/flowise.webp", - "config_path": "/opt/flowiseai/.env", - "description": "FlowiseAI is an open source low-code tool for developers to build customized LLM orchestration flow & AI agents", - "install_methods": [ - { - "type": "default", - "script": "ct/flowiseai.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Application takes long time to install. Please be patient!", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/fluid-calendar.json b/frontend/public/json/fluid-calendar.json deleted file mode 100644 index 9c9a1010e..000000000 --- a/frontend/public/json/fluid-calendar.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "name": "Fluid-Calendar", - "slug": "fluid-calendar", - "categories": [ - 19, - 0 - ], - "date_created": "2025-03-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/dotnetfactory/fluid-calendar/tree/main/docs", - "website": "https://github.com/dotnetfactory/fluid-calendar", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/fluidcalendar.webp", - "config_path": "/opt/fluid-calendar/.env", - "description": "The open-source intelligent calendar that adapts to your workflow. Experience seamless task scheduling powered by AI, designed to make your time management effortless.", - "install_methods": [ - { - "type": "default", - "script": "ct/fluid-calendar.sh", - "resources": { - "cpu": 3, - "ram": 4096, - "hdd": 7, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Creds: cat ~/fluid-calendar.creds", - "type": "info" - } - ] -} diff --git a/frontend/public/json/forgejo.json b/frontend/public/json/forgejo.json deleted file mode 100644 index abab02bcd..000000000 --- a/frontend/public/json/forgejo.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "Forgejo", - "slug": "forgejo", - "categories": [ - 20 - ], - "date_created": "2024-06-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://forgejo.org/docs/latest/", - "website": "https://forgejo.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/forgejo.webp", - "config_path": "/etc/forgejo/app.ini", - "description": "Forgejo is an open-source, self-hosted Git service that allows individuals and teams to manage their code repositories.", - "install_methods": [ - { - "type": "default", - "script": "ct/forgejo.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-forgejo.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/freepbx.json b/frontend/public/json/freepbx.json deleted file mode 100644 index 0188d2f2b..000000000 --- a/frontend/public/json/freepbx.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "FreePBX", - "slug": "freepbx", - "categories": [ - 0 - ], - "date_created": "2025-05-22", - "type": "ct", - "updateable": false, - "privileged": false, - "interface_port": 80, - "documentation": "https://sangomakb.atlassian.net/wiki/spaces/FP/overview?homepageId=8454359", - "website": "https://www.freepbx.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/freepbx.webp", - "config_path": "", - "description": "FreePBX is a web-based open-source graphical user interface that manages Asterisk, a voice over IP and telephony server.", - "install_methods": [ - { - "type": "default", - "script": "ct/freepbx.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This script uses the official FreePBX install script. Check it here: https://github.com/FreePBX/sng_freepbx_debian_install", - "type": "info" - } - ] -} diff --git a/frontend/public/json/freshrss.json b/frontend/public/json/freshrss.json deleted file mode 100644 index af4552f6e..000000000 --- a/frontend/public/json/freshrss.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "FreshRSS", - "slug": "freshrss", - "categories": [ - 12 - ], - "date_created": "2025-02-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://freshrss.github.io/FreshRSS/en/", - "website": "https://freshrss.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/freshrss.webp", - "config_path": "/opt/freshrss/data/config.php", - "description": "FreshRSS is a self-hosted RSS and Atom feed aggregator that lets users collect, organize, and read from multiple sources in one place. It is lightweight, easy to work with, powerful, and customizable.", - "install_methods": [ - { - "type": "default", - "script": "ct/freshrss.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Database credentials: `cat ~/freshrss.creds`", - "type": "info" - }, - { - "text": "Per FreshRSS documentation, a cron job to actualize FreshRSS will be setup at `/etc/cron.d/freshrss-actualize`. This can be adjusted as needed", - "type": "info" - } - ] -} diff --git a/frontend/public/json/frigate.json b/frontend/public/json/frigate.json deleted file mode 100644 index 8ff706d0d..000000000 --- a/frontend/public/json/frigate.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Frigate", - "slug": "frigate", - "categories": [ - 15 - ], - "date_created": "2026-02-23", - "type": "ct", - "updateable": false, - "privileged": false, - "config_path": "/config/config.yml", - "interface_port": 5000, - "documentation": "https://docs.frigate.video/", - "website": "https://frigate.video/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/frigate-light.webp", - "description": "Frigate is a complete and local NVR (Network Video Recorder) with realtime AI object detection for CCTV cameras.", - "install_methods": [ - { - "type": "default", - "script": "ct/frigate.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 20, - "os": "Debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "SemanticSearch is not pre-installed due to high resource requirements (8+ cores, 16-24GB RAM, GPU recommended). Manual configuration required if needed.", - "type": "info" - }, - { - "text": "OpenVino detector may fail on older CPUs (pre-Haswell/AVX2). If you encounter 'Illegal instruction' errors, consider using alternative detectors.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/fstrim.json b/frontend/public/json/fstrim.json deleted file mode 100644 index 3277d1e46..000000000 --- a/frontend/public/json/fstrim.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "PVE LXC Filesystem Trim", - "slug": "fstrim", - "categories": [ - 1 - ], - "date_created": "2024-04-29", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": "https://github.com/community-scripts/ProxmoxVE/discussions/805", - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linuxcontainers.webp", - "config_path": "", - "description": "This maintains SSD performance by managing unused blocks. Thin-provisioned storage systems also require management to prevent unnecessary storage use. VMs automate fstrim, while LXC containers need manual or automated fstrim processes for optimal performance.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/fstrim.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - }, - { - "text": "This is designed to work with SSDs on ext4 filesystems only.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/fumadocs.json b/frontend/public/json/fumadocs.json deleted file mode 100644 index 0974cce33..000000000 --- a/frontend/public/json/fumadocs.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Fumadocs", - "slug": "fumadocs", - "categories": [ - 10 - ], - "date_created": "2025-05-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://fumadocs.vercel.app/docs/ui", - "website": "https://fumadocs.vercel.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/fumadocs.webp", - "config_path": "", - "description": "Fumadocs is a flexible and high-performance framework for creating well-structured documentation websites using Next.js. It allows developers to write content and transform it into structured data. Fumadocs supports various content sources, including MDX and Content Collections, and integrates search solutions like Orama and Algolia. It also provides interactive components to enhance the user experience.", - "install_methods": [ - { - "type": "default", - "script": "ct/fumadocs.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/garage.json b/frontend/public/json/garage.json deleted file mode 100644 index 23ea0a4ff..000000000 --- a/frontend/public/json/garage.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "name": "Garage", - "slug": "garage", - "categories": [ - 8 - ], - "date_created": "2025-10-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3900, - "documentation": "https://garagehq.deuxfleurs.fr/documentation/quick-start/", - "website": "https://garagehq.deuxfleurs.fr/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/garage.webp", - "config_path": "/etc/garage.toml", - "description": "Garage is a lightweight, self-hosted, S3-compatible object storage service built for distributed environments. It is designed to be simple, efficient, and easy to deploy across multiple nodes.", - "install_methods": [ - { - "type": "default", - "script": "ct/garage.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 5, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-garage.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 5, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The Garage configuration file is located at `/etc/garage.toml`. You can edit RPC and API bindings, tokens, and data directories there.", - "type": "info" - }, - { - "text": "Admin API runs by default on port `3903`, S3 API on port `3900`, Web UI on `3902`. Adjust firewall rules accordingly.", - "type": "warning" - }, - { - "text": "To view your generated tokens and RPC secret, check `~/garage.creds` after installation.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/gatus.json b/frontend/public/json/gatus.json deleted file mode 100644 index a436dfe40..000000000 --- a/frontend/public/json/gatus.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "gatus", - "slug": "gatus", - "categories": [ - 9 - ], - "date_created": "2025-05-13", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://gatus.io/docs", - "website": "https://gatus.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/gatus.webp", - "config_path": "/opt/gatus/config/config.yaml", - "description": "Gatus is a developer-oriented health dashboard that gives you the ability to monitor your services using HTTP, ICMP, TCP, and even DNS queries as well as evaluate the result of said queries by using a list of conditions on values like the status code, the response time, the certificate expiration, the body and many others. The icing on top is that each of these health checks can be paired with alerting via Slack, Teams, PagerDuty, Discord, Twilio and many more.", - "install_methods": [ - { - "type": "default", - "script": "ct/gatus.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-gatus.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 3, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/ghost.json b/frontend/public/json/ghost.json deleted file mode 100644 index 494d87391..000000000 --- a/frontend/public/json/ghost.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Ghost", - "slug": "ghost", - "categories": [ - 25 - ], - "date_created": "2025-01-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 2368, - "documentation": "https://ghost.org/docs/", - "website": "https://ghost.org", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ghost.webp", - "config_path": "", - "description": "Ghost is a powerful app for professional publishers to create, share, and grow a business around their content. It comes with modern tools to build a website, publish content, send newsletters & offer paid subscriptions to members.", - "install_methods": [ - { - "type": "default", - "script": "ct/ghost.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To run Ghost-CLI commands, first set a password for the ghost-user by running `sudo passwd ghost-user`. Then, switch to the ghost-user with `sudo -su ghost-user`.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/ghostfolio.json b/frontend/public/json/ghostfolio.json deleted file mode 100644 index 5140ddc54..000000000 --- a/frontend/public/json/ghostfolio.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "Ghostfolio", - "slug": "ghostfolio", - "categories": [ - 23 - ], - "date_created": "2025-09-29", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3333, - "documentation": "https://github.com/ghostfolio/ghostfolio?tab=readme-ov-file#self-hosting", - "website": "https://ghostfol.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ghostfolio.webp", - "config_path": "/opt/ghostfolio/.env", - "description": "Ghostfolio is an open source wealth management software built with web technology. The application empowers busy people to keep track of stocks, ETFs or cryptocurrencies and make solid, data-driven investment decisions.", - "install_methods": [ - { - "type": "default", - "script": "ct/ghostfolio.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Create your first user account by visiting the web interface and clicking 'Get Started'. The first user will automatically get admin privileges.", - "type": "info" - }, - { - "text": "Database and Redis credentials: `cat ~/ghostfolio.creds`", - "type": "info" - }, - { - "text": "Optional: CoinGecko API keys can be added during installation or later in the .env file for enhanced cryptocurrency data.", - "type": "info" - }, - { - "text": "Build process requires 4GB RAM (runtime: ~2GB). A temporary swap file will be created automatically if insufficient memory is detected.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/gitea-mirror.json b/frontend/public/json/gitea-mirror.json deleted file mode 100644 index 5dafe2764..000000000 --- a/frontend/public/json/gitea-mirror.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Gitea-Mirror", - "slug": "gitea-mirror", - "categories": [ - 7 - ], - "date_created": "2025-06-05", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 4321, - "documentation": "https://github.com/RayLabsHQ/gitea-mirror/", - "config_path": "/etc/systemd/system/gitea-mirror.service", - "website": "https://github.com/RayLabsHQ/gitea-mirror/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/gitea-mirror.webp", - "description": "Gitea Mirror auto-syncs GitHub repos to your self-hosted Gitea, with a sleek Web UI and easy Docker deployment. ", - "install_methods": [ - { - "type": "default", - "script": "ct/gitea-mirror.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/gitea.json b/frontend/public/json/gitea.json deleted file mode 100644 index 341af27df..000000000 --- a/frontend/public/json/gitea.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "Gitea", - "slug": "gitea", - "categories": [ - 20 - ], - "date_created": "2024-07-26", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.gitea.com/", - "website": "https://gitea.com", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/gitea.webp", - "config_path": "/etc/gitea/app.ini", - "description": "Gitea is a self-hosted Git service. It provides a lightweight and easy-to-install solution for managing Git repositories. Users can collaborate on code, track issues, and manage project tasks. Gitea includes features like pull requests, code reviews, wiki, and project management tools. It is suitable for small to medium-sized teams seeking control over their Git hosting.", - "install_methods": [ - { - "type": "default", - "script": "ct/gitea.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 8, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-gitea.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The script installs SQLite3 as default database provider. You will be asked to select database type when you initially open the application URL. Select SQLite3 or provide your own database.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/github-versions.json b/frontend/public/json/github-versions.json deleted file mode 100644 index 9a9542c9f..000000000 --- a/frontend/public/json/github-versions.json +++ /dev/null @@ -1,1853 +0,0 @@ -{ - "generated": "2026-03-11T12:12:40Z", - "versions": [ - { - "slug": "2fauth", - "repo": "Bubka/2FAuth", - "version": "v6.1.0", - "pinned": false, - "date": "2026-03-11T07:48:27Z" - }, - { - "slug": "adguard", - "repo": "AdguardTeam/AdGuardHome", - "version": "v0.107.73", - "pinned": false, - "date": "2026-03-10T17:23:23Z" - }, - { - "slug": "adguardhome-sync", - "repo": "bakito/adguardhome-sync", - "version": "v0.9.0", - "pinned": false, - "date": "2026-02-27T18:37:37Z" - }, - { - "slug": "adventurelog", - "repo": "seanmorley15/adventurelog", - "version": "v0.12.0", - "pinned": false, - "date": "2026-02-23T14:06:45Z" - }, - { - "slug": "alpine-redlib", - "repo": "redlib-org/redlib", - "version": "v0.36.0", - "pinned": false, - "date": "2025-03-20T03:06:11Z" - }, - { - "slug": "ampache", - "repo": "ampache/ampache", - "version": "7.9.2", - "pinned": false, - "date": "2026-03-05T03:54:03Z" - }, - { - "slug": "argus", - "repo": "release-argus/Argus", - "version": "0.29.3", - "pinned": false, - "date": "2026-01-28T02:07:24Z" - }, - { - "slug": "aria2", - "repo": "mayswind/ariang", - "version": "1.3.13", - "pinned": false, - "date": "2026-01-25T07:56:27Z" - }, - { - "slug": "authelia", - "repo": "authelia/authelia", - "version": "v4.39.15", - "pinned": false, - "date": "2025-11-29T12:13:04Z" - }, - { - "slug": "autobrr", - "repo": "autobrr/autobrr", - "version": "v1.74.0", - "pinned": false, - "date": "2026-03-08T21:45:41Z" - }, - { - "slug": "autocaliweb", - "repo": "pgaskin/kepubify", - "version": "v4.0.4", - "pinned": false, - "date": "2022-03-09T21:10:17Z" - }, - { - "slug": "babybuddy", - "repo": "babybuddy/babybuddy", - "version": "v2.7.1", - "pinned": false, - "date": "2025-02-22T01:14:41Z" - }, - { - "slug": "backrest", - "repo": "garethgeorge/backrest", - "version": "v1.12.1", - "pinned": false, - "date": "2026-03-11T06:16:22Z" - }, - { - "slug": "baikal", - "repo": "sabre-io/Baikal", - "version": "0.11.1", - "pinned": false, - "date": "2025-11-30T14:54:03Z" - }, - { - "slug": "bar-assistant", - "repo": "karlomikus/bar-assistant", - "version": "v5.13.2", - "pinned": false, - "date": "2026-03-08T10:47:13Z" - }, - { - "slug": "bazarr", - "repo": "morpheus65535/bazarr", - "version": "v1.5.6", - "pinned": false, - "date": "2026-02-26T11:33:11Z" - }, - { - "slug": "bentopdf", - "repo": "alam00000/bentopdf", - "version": "v2.5.0", - "pinned": false, - "date": "2026-03-10T08:40:54Z" - }, - { - "slug": "beszel", - "repo": "henrygd/beszel", - "version": "v0.18.4", - "pinned": false, - "date": "2026-02-20T21:10:52Z" - }, - { - "slug": "bichon", - "repo": "rustmailer/bichon", - "version": "0.3.7", - "pinned": false, - "date": "2026-01-28T12:47:09Z" - }, - { - "slug": "bitmagnet", - "repo": "bitmagnet-io/bitmagnet", - "version": "v0.10.0", - "pinned": false, - "date": "2025-03-02T15:13:47Z" - }, - { - "slug": "blocky", - "repo": "0xERR0R/blocky", - "version": "v0.29.0", - "pinned": false, - "date": "2026-02-27T15:48:56Z" - }, - { - "slug": "booklore", - "repo": "booklore-app/BookLore", - "version": "v2.1.0", - "pinned": false, - "date": "2026-03-08T20:27:24Z" - }, - { - "slug": "bookstack", - "repo": "BookStackApp/BookStack", - "version": "v25.12.8", - "pinned": false, - "date": "2026-02-27T10:33:14Z" - }, - { - "slug": "byparr", - "repo": "ThePhaseless/Byparr", - "version": "v2.1.0", - "pinned": false, - "date": "2026-02-08T12:59:20Z" - }, - { - "slug": "bytestash", - "repo": "jordan-dalby/ByteStash", - "version": "v1.5.11", - "pinned": false, - "date": "2026-02-03T22:12:19Z" - }, - { - "slug": "caddy", - "repo": "caddyserver/xcaddy", - "version": "v0.4.5", - "pinned": false, - "date": "2025-07-29T16:39:18Z" - }, - { - "slug": "calibre-web", - "repo": "janeczku/calibre-web", - "version": "0.6.26", - "pinned": false, - "date": "2026-02-06T21:17:44Z" - }, - { - "slug": "checkmate", - "repo": "bluewave-labs/Checkmate", - "version": "v3.5.1", - "pinned": false, - "date": "2026-03-06T21:18:36Z" - }, - { - "slug": "cleanuparr", - "repo": "Cleanuparr/Cleanuparr", - "version": "v2.7.9", - "pinned": false, - "date": "2026-03-10T18:51:23Z" - }, - { - "slug": "cloudreve", - "repo": "cloudreve/cloudreve", - "version": "4.14.1", - "pinned": false, - "date": "2026-02-15T01:39:24Z" - }, - { - "slug": "comfyui", - "repo": "comfyanonymous/ComfyUI", - "version": "v0.16.4", - "pinned": false, - "date": "2026-03-07T22:37:49Z" - }, - { - "slug": "commafeed", - "repo": "Athou/commafeed", - "version": "7.0.0", - "pinned": false, - "date": "2026-02-21T21:54:15Z" - }, - { - "slug": "configarr", - "repo": "raydak-labs/configarr", - "version": "v1.24.0", - "pinned": false, - "date": "2026-03-09T15:16:08Z" - }, - { - "slug": "convertx", - "repo": "C4illin/ConvertX", - "version": "v0.17.0", - "pinned": false, - "date": "2026-01-13T20:47:26Z" - }, - { - "slug": "cosmos", - "repo": "azukaar/Cosmos-Server", - "version": "v0.21.7", - "pinned": false, - "date": "2026-03-03T18:15:29Z" - }, - { - "slug": "cronicle", - "repo": "jhuckaby/Cronicle", - "version": "v0.9.107", - "pinned": false, - "date": "2026-02-23T17:48:27Z" - }, - { - "slug": "cronmaster", - "repo": "fccview/cronmaster", - "version": "2.1.0", - "pinned": false, - "date": "2026-02-11T19:29:11Z" - }, - { - "slug": "cryptpad", - "repo": "cryptpad/cryptpad", - "version": "2026.2.0", - "pinned": false, - "date": "2026-02-11T15:39:05Z" - }, - { - "slug": "databasus", - "repo": "databasus/databasus", - "version": "v3.19.1", - "pinned": false, - "date": "2026-03-11T10:25:28Z" - }, - { - "slug": "dawarich", - "repo": "Freika/dawarich", - "version": "1.3.2", - "pinned": false, - "date": "2026-03-08T20:37:50Z" - }, - { - "slug": "discopanel", - "repo": "nickheyer/discopanel", - "version": "v2.0.3", - "pinned": false, - "date": "2026-03-11T07:29:10Z" - }, - { - "slug": "dispatcharr", - "repo": "Dispatcharr/Dispatcharr", - "version": "v0.20.2", - "pinned": false, - "date": "2026-03-03T01:40:33Z" - }, - { - "slug": "docmost", - "repo": "docmost/docmost", - "version": "v0.70.1", - "pinned": false, - "date": "2026-03-04T12:54:49Z" - }, - { - "slug": "domain-locker", - "repo": "Lissy93/domain-locker", - "version": "v0.1.4", - "pinned": false, - "date": "2026-02-14T07:41:29Z" - }, - { - "slug": "domain-monitor", - "repo": "Hosteroid/domain-monitor", - "version": "v1.1.5", - "pinned": false, - "date": "2026-03-08T19:17:09Z" - }, - { - "slug": "donetick", - "repo": "donetick/donetick", - "version": "v0.1.75-beta.3", - "pinned": false, - "date": "" - }, - { - "slug": "drawio", - "repo": "jgraph/drawio", - "version": "v29.6.1", - "pinned": false, - "date": "2026-03-06T09:14:17Z" - }, - { - "slug": "duplicati", - "repo": "duplicati/duplicati", - "version": "v2.2.0.3_stable_2026-01-06", - "pinned": false, - "date": "2026-01-06T12:05:40Z" - }, - { - "slug": "ebusd", - "repo": "john30/ebusd", - "version": "26.1", - "pinned": false, - "date": "2026-02-09T06:09:24Z" - }, - { - "slug": "elementsynapse", - "repo": "etkecc/synapse-admin", - "version": "v0.11.4-etke54", - "pinned": false, - "date": "2026-03-08T12:37:07Z" - }, - { - "slug": "emby", - "repo": "MediaBrowser/Emby.Releases", - "version": "4.9.3.0", - "pinned": false, - "date": "2026-01-08T16:08:34Z" - }, - { - "slug": "endurain", - "repo": "endurain-project/endurain", - "version": "v0.17.6", - "pinned": false, - "date": "2026-02-27T23:08:50Z" - }, - { - "slug": "ersatztv", - "repo": "ErsatzTV/ErsatzTV", - "version": "v26.3.0", - "pinned": false, - "date": "2026-02-24T21:36:34Z" - }, - { - "slug": "excalidraw", - "repo": "excalidraw/excalidraw", - "version": "v0.18.0", - "pinned": false, - "date": "2025-03-11T12:47:22Z" - }, - { - "slug": "firefly", - "repo": "firefly-iii/firefly-iii", - "version": "v6.5.4", - "pinned": false, - "date": "2026-03-06T09:07:56Z" - }, - { - "slug": "fladder", - "repo": "DonutWare/Fladder", - "version": "v0.10.2", - "pinned": false, - "date": "2026-03-08T15:28:11Z" - }, - { - "slug": "flaresolverr", - "repo": "FlareSolverr/FlareSolverr", - "version": "v3.4.6", - "pinned": false, - "date": "2025-11-29T02:43:00Z" - }, - { - "slug": "flatnotes", - "repo": "dullage/flatnotes", - "version": "v5.5.4", - "pinned": false, - "date": "2025-10-20T20:04:09Z" - }, - { - "slug": "fluid-calendar", - "repo": "dotnetfactory/fluid-calendar", - "version": "v1.4.0", - "pinned": false, - "date": "2025-04-24T16:20:17Z" - }, - { - "slug": "freshrss", - "repo": "FreshRSS/FreshRSS", - "version": "1.28.1", - "pinned": false, - "date": "2026-01-25T18:20:14Z" - }, - { - "slug": "frigate", - "repo": "blakeblackshear/frigate", - "version": "v0.17.0", - "pinned": true, - "date": "2026-02-27T03:03:01Z" - }, - { - "slug": "gatus", - "repo": "TwiN/gatus", - "version": "v5.35.0", - "pinned": false, - "date": "2026-02-20T20:58:03Z" - }, - { - "slug": "ghostfolio", - "repo": "ghostfolio/ghostfolio", - "version": "2.249.0", - "pinned": false, - "date": "2026-03-10T19:26:50Z" - }, - { - "slug": "gitea", - "repo": "go-gitea/gitea", - "version": "v1.25.4", - "pinned": false, - "date": "2026-01-22T01:43:42Z" - }, - { - "slug": "gitea-mirror", - "repo": "RayLabsHQ/gitea-mirror", - "version": "v3.12.5", - "pinned": false, - "date": "2026-03-07T01:30:40Z" - }, - { - "slug": "glance", - "repo": "glanceapp/glance", - "version": "v0.8.4", - "pinned": false, - "date": "2025-06-10T07:57:14Z" - }, - { - "slug": "go2rtc", - "repo": "AlexxIT/go2rtc", - "version": "v1.9.14", - "pinned": false, - "date": "2026-01-19T09:16:56Z" - }, - { - "slug": "gokapi", - "repo": "Forceu/Gokapi", - "version": "v2.2.4", - "pinned": false, - "date": "2026-03-10T15:44:19Z" - }, - { - "slug": "gotify", - "repo": "gotify/server", - "version": "v2.9.1", - "pinned": false, - "date": "2026-02-28T19:07:07Z" - }, - { - "slug": "gramps-web", - "repo": "gramps-project/gramps-web-api", - "version": "v3.7.1.1", - "pinned": false, - "date": "2026-01-30T09:15:46Z" - }, - { - "slug": "grist", - "repo": "gristlabs/grist-core", - "version": "v1.7.11", - "pinned": false, - "date": "2026-02-27T17:13:50Z" - }, - { - "slug": "grocy", - "repo": "grocy/grocy", - "version": "v4.6.0", - "pinned": false, - "date": "2026-03-06T17:35:19Z" - }, - { - "slug": "guardian", - "repo": "HydroshieldMKII/Guardian", - "version": "v1.3.4", - "pinned": false, - "date": "2026-01-20T06:20:36Z" - }, - { - "slug": "headscale", - "repo": "juanfont/headscale", - "version": "v0.28.0", - "pinned": false, - "date": "2026-02-04T20:40:23Z" - }, - { - "slug": "healthchecks", - "repo": "healthchecks/healthchecks", - "version": "v4.0", - "pinned": false, - "date": "2026-01-22T10:21:35Z" - }, - { - "slug": "heimdall-dashboard", - "repo": "linuxserver/Heimdall", - "version": "v2.7.6", - "pinned": false, - "date": "2025-09-15T15:50:44Z" - }, - { - "slug": "hev-socks5-server", - "repo": "heiher/hev-socks5-server", - "version": "2.11.2", - "pinned": false, - "date": "2026-01-27T16:07:21Z" - }, - { - "slug": "hivemq", - "repo": "hivemq/hivemq-community-edition", - "version": "2025.5", - "pinned": false, - "date": "2025-11-19T14:48:47Z" - }, - { - "slug": "homarr", - "repo": "homarr-labs/homarr", - "version": "v1.55.0", - "pinned": false, - "date": "2026-03-06T19:40:16Z" - }, - { - "slug": "homebox", - "repo": "sysadminsmedia/homebox", - "version": "v0.24.2", - "pinned": false, - "date": "2026-03-09T19:54:02Z" - }, - { - "slug": "homepage", - "repo": "gethomepage/homepage", - "version": "v1.10.1", - "pinned": false, - "date": "2026-02-05T15:03:45Z" - }, - { - "slug": "homer", - "repo": "bastienwirtz/homer", - "version": "v25.11.1", - "pinned": false, - "date": "2025-11-16T13:04:21Z" - }, - { - "slug": "hortusfox", - "repo": "danielbrendel/hortusfox-web", - "version": "v5.7", - "pinned": false, - "date": "2025-12-23T14:53:51Z" - }, - { - "slug": "immich-public-proxy", - "repo": "alangrainger/immich-public-proxy", - "version": "v1.15.4", - "pinned": false, - "date": "2026-03-02T21:28:06Z" - }, - { - "slug": "immichframe", - "repo": "immichFrame/ImmichFrame", - "version": "v1.0.32.0", - "pinned": false, - "date": "2026-03-02T22:56:06Z" - }, - { - "slug": "inspircd", - "repo": "inspircd/inspircd", - "version": "v4.9.0", - "pinned": false, - "date": "2025-12-06T08:58:40Z" - }, - { - "slug": "investbrain", - "repo": "investbrainapp/investbrain", - "version": "v1.2.4", - "pinned": false, - "date": "2025-11-07T03:13:57Z" - }, - { - "slug": "invoiceninja", - "repo": "invoiceninja/invoiceninja", - "version": "v5.13.1", - "pinned": false, - "date": "2026-03-10T23:45:05Z" - }, - { - "slug": "jackett", - "repo": "Jackett/Jackett", - "version": "v0.24.1341", - "pinned": false, - "date": "2026-03-11T05:55:00Z" - }, - { - "slug": "jellystat", - "repo": "CyferShepard/Jellystat", - "version": "V1.1.8", - "pinned": false, - "date": "2026-02-08T08:15:00Z" - }, - { - "slug": "joplin-server", - "repo": "laurent22/joplin", - "version": "v3.5.13", - "pinned": false, - "date": "2026-02-25T21:19:11Z" - }, - { - "slug": "jotty", - "repo": "fccview/jotty", - "version": "1.21.0", - "pinned": false, - "date": "2026-03-02T11:08:54Z" - }, - { - "slug": "kapowarr", - "repo": "Casvt/Kapowarr", - "version": "V1.3.0", - "pinned": false, - "date": "2026-03-06T16:38:21Z" - }, - { - "slug": "karakeep", - "repo": "Y2Z/monolith", - "version": "v2.10.1", - "pinned": false, - "date": "2025-03-30T02:41:30Z" - }, - { - "slug": "kavita", - "repo": "Kareadita/Kavita", - "version": "v0.8.9.1", - "pinned": false, - "date": "2026-01-18T23:04:08Z" - }, - { - "slug": "keycloak", - "repo": "keycloak/keycloak", - "version": "26.5.5", - "pinned": false, - "date": "2026-03-05T15:40:30Z" - }, - { - "slug": "kima-hub", - "repo": "Chevron7Locked/kima-hub", - "version": "v1.6.3", - "pinned": false, - "date": "2026-03-10T22:26:12Z" - }, - { - "slug": "kimai", - "repo": "kimai/kimai", - "version": "2.51.0", - "pinned": false, - "date": "2026-03-01T15:56:56Z" - }, - { - "slug": "kitchenowl", - "repo": "TomBursch/kitchenowl", - "version": "v0.7.6", - "pinned": false, - "date": "2026-01-24T01:21:14Z" - }, - { - "slug": "koel", - "repo": "koel/koel", - "version": "v8.3.1", - "pinned": false, - "date": "2026-03-04T08:22:06Z" - }, - { - "slug": "koillection", - "repo": "benjaminjonard/koillection", - "version": "1.8.0", - "pinned": false, - "date": "2026-01-25T13:48:30Z" - }, - { - "slug": "kometa", - "repo": "Kometa-Team/Kometa", - "version": "v2.3.0", - "pinned": false, - "date": "2026-02-09T21:26:56Z" - }, - { - "slug": "komga", - "repo": "pgaskin/kepubify", - "version": "v4.0.4", - "pinned": false, - "date": "2022-03-09T21:10:17Z" - }, - { - "slug": "kubo", - "repo": "ipfs/kubo", - "version": "v0.40.1", - "pinned": false, - "date": "2026-02-27T17:58:22Z" - }, - { - "slug": "kutt", - "repo": "thedevs-network/kutt", - "version": "v3.2.3", - "pinned": false, - "date": "2025-02-06T07:31:37Z" - }, - { - "slug": "leantime", - "repo": "Leantime/leantime", - "version": "v3.7.1", - "pinned": false, - "date": "2026-02-22T01:25:16Z" - }, - { - "slug": "librenms", - "repo": "librenms/librenms", - "version": "26.2.0", - "pinned": false, - "date": "2026-02-16T12:15:13Z" - }, - { - "slug": "librespeed-rust", - "repo": "librespeed/speedtest-rust", - "version": "v1.4.0", - "pinned": false, - "date": "2025-10-28T15:11:12Z" - }, - { - "slug": "libretranslate", - "repo": "LibreTranslate/LibreTranslate", - "version": "v1.9.5", - "pinned": false, - "date": "2026-03-03T18:25:04Z" - }, - { - "slug": "lidarr", - "repo": "Lidarr/Lidarr", - "version": "v3.1.0.4875", - "pinned": false, - "date": "2025-11-16T22:40:18Z" - }, - { - "slug": "linkding", - "repo": "sissbruecker/linkding", - "version": "v1.45.0", - "pinned": false, - "date": "2026-01-06T20:31:04Z" - }, - { - "slug": "linkstack", - "repo": "linkstackorg/linkstack", - "version": "v4.8.6", - "pinned": false, - "date": "2026-02-17T16:53:47Z" - }, - { - "slug": "linkwarden", - "repo": "linkwarden/linkwarden", - "version": "v2.13.5", - "pinned": false, - "date": "2025-12-28T09:15:51Z" - }, - { - "slug": "listmonk", - "repo": "knadh/listmonk", - "version": "v6.0.0", - "pinned": false, - "date": "2026-01-02T17:51:28Z" - }, - { - "slug": "lubelogger", - "repo": "hargata/lubelog", - "version": "v1.6.1", - "pinned": false, - "date": "2026-02-26T20:01:24Z" - }, - { - "slug": "mafl", - "repo": "hywax/mafl", - "version": "v0.15.4", - "pinned": false, - "date": "2024-07-13T11:03:43Z" - }, - { - "slug": "magicmirror", - "repo": "MagicMirrorOrg/MagicMirror", - "version": "v2.34.0", - "pinned": false, - "date": "2026-01-01T14:48:28Z" - }, - { - "slug": "mail-archiver", - "repo": "s1t5/mail-archiver", - "version": "2603.1", - "pinned": false, - "date": "2026-03-10T11:51:08Z" - }, - { - "slug": "managemydamnlife", - "repo": "intri-in/manage-my-damn-life-nextjs", - "version": "v0.8.2", - "pinned": false, - "date": "2025-12-21T03:53:53Z" - }, - { - "slug": "manyfold", - "repo": "manyfold3d/manyfold", - "version": "v0.134.0", - "pinned": false, - "date": "2026-03-09T13:20:45Z" - }, - { - "slug": "mealie", - "repo": "mealie-recipes/mealie", - "version": "v3.12.0", - "pinned": false, - "date": "2026-03-07T21:59:11Z" - }, - { - "slug": "mediamanager", - "repo": "maxdorninger/MediaManager", - "version": "v1.12.3", - "pinned": false, - "date": "2026-02-11T16:45:40Z" - }, - { - "slug": "mediamtx", - "repo": "bluenviron/mediamtx", - "version": "v1.16.3", - "pinned": false, - "date": "2026-03-01T15:49:12Z" - }, - { - "slug": "meilisearch", - "repo": "riccox/meilisearch-ui", - "version": "v0.15.1", - "pinned": false, - "date": "2026-02-04T03:56:59Z" - }, - { - "slug": "memos", - "repo": "usememos/memos", - "version": "v0.26.2", - "pinned": false, - "date": "2026-02-23T13:28:34Z" - }, - { - "slug": "metube", - "repo": "alexta69/metube", - "version": "2026.03.08", - "pinned": false, - "date": "2026-03-08T20:28:19Z" - }, - { - "slug": "miniflux", - "repo": "miniflux/v2", - "version": "2.2.17", - "pinned": false, - "date": "2026-02-13T20:30:17Z" - }, - { - "slug": "monica", - "repo": "monicahq/monica", - "version": "v4.1.2", - "pinned": false, - "date": "2024-05-04T08:06:50Z" - }, - { - "slug": "myip", - "repo": "jason5ng32/MyIP", - "version": "v5.2.1", - "pinned": false, - "date": "2026-02-10T07:38:47Z" - }, - { - "slug": "mylar3", - "repo": "mylar3/mylar3", - "version": "v0.8.3", - "pinned": false, - "date": "2025-08-17T06:24:54Z" - }, - { - "slug": "myspeed", - "repo": "gnmyt/myspeed", - "version": "v1.0.9", - "pinned": false, - "date": "2024-05-21T22:15:33Z" - }, - { - "slug": "navidrome", - "repo": "navidrome/navidrome", - "version": "v0.60.3", - "pinned": false, - "date": "2026-02-10T23:55:04Z" - }, - { - "slug": "netbox", - "repo": "netbox-community/netbox", - "version": "v4.5.4", - "pinned": false, - "date": "2026-03-03T20:32:16Z" - }, - { - "slug": "nextcloud-exporter", - "repo": "xperimental/nextcloud-exporter", - "version": "v0.9.0", - "pinned": false, - "date": "2025-10-12T20:03:10Z" - }, - { - "slug": "nginx-ui", - "repo": "0xJacky/nginx-ui", - "version": "v2.3.3", - "pinned": false, - "date": "2026-02-15T00:58:14Z" - }, - { - "slug": "nightscout", - "repo": "nightscout/cgm-remote-monitor", - "version": "v15.0.6", - "pinned": false, - "date": "2026-03-03T23:04:35Z" - }, - { - "slug": "nocodb", - "repo": "nocodb/nocodb", - "version": "0.301.1", - "pinned": true, - "date": "2026-01-14T13:13:33Z" - }, - { - "slug": "nodebb", - "repo": "NodeBB/NodeBB", - "version": "v4.9.1", - "pinned": false, - "date": "2026-03-01T20:52:43Z" - }, - { - "slug": "nodecast-tv", - "repo": "technomancer702/nodecast-tv", - "version": "v2.1.1", - "pinned": false, - "date": "2026-01-19T23:30:29Z" - }, - { - "slug": "oauth2-proxy", - "repo": "oauth2-proxy/oauth2-proxy", - "version": "v7.14.3", - "pinned": false, - "date": "2026-02-26T14:10:21Z" - }, - { - "slug": "ombi", - "repo": "Ombi-app/Ombi", - "version": "v4.53.4", - "pinned": false, - "date": "2026-01-08T21:52:46Z" - }, - { - "slug": "open-archiver", - "repo": "LogicLabs-OU/OpenArchiver", - "version": "v0.4.2", - "pinned": false, - "date": "2026-02-24T20:47:40Z" - }, - { - "slug": "opencloud", - "repo": "opencloud-eu/opencloud", - "version": "v5.2.0", - "pinned": true, - "date": "2026-03-09T13:32:31Z" - }, - { - "slug": "opengist", - "repo": "thomiceli/opengist", - "version": "v1.12.1", - "pinned": false, - "date": "2026-02-03T09:00:43Z" - }, - { - "slug": "openproject", - "repo": "jemalloc/jemalloc", - "version": "5.3.0", - "pinned": false, - "date": "2022-05-06T19:14:21Z" - }, - { - "slug": "ots", - "repo": "Luzifer/ots", - "version": "v1.21.2", - "pinned": false, - "date": "2026-02-20T16:20:03Z" - }, - { - "slug": "outline", - "repo": "outline/outline", - "version": "v1.5.0", - "pinned": false, - "date": "2026-02-15T18:04:16Z" - }, - { - "slug": "owncast", - "repo": "owncast/owncast", - "version": "v0.2.4", - "pinned": false, - "date": "2026-01-10T23:34:29Z" - }, - { - "slug": "pairdrop", - "repo": "schlagmichdoch/PairDrop", - "version": "v1.11.2", - "pinned": false, - "date": "2025-02-24T19:47:06Z" - }, - { - "slug": "pangolin", - "repo": "fosrl/pangolin", - "version": "1.16.2", - "pinned": false, - "date": "2026-02-28T20:35:52Z" - }, - { - "slug": "paperless-ai", - "repo": "clusterzx/paperless-ai", - "version": "v3.0.9", - "pinned": false, - "date": "2025-11-04T07:28:45Z" - }, - { - "slug": "paperless-gpt", - "repo": "icereed/paperless-gpt", - "version": "v0.25.1", - "pinned": false, - "date": "2026-02-26T14:50:11Z" - }, - { - "slug": "paperless-ngx", - "repo": "paperless-ngx/paperless-ngx", - "version": "v2.20.10", - "pinned": false, - "date": "2026-03-04T19:20:57Z" - }, - { - "slug": "patchmon", - "repo": "PatchMon/PatchMon", - "version": "v1.4.2", - "pinned": false, - "date": "2026-02-20T13:15:31Z" - }, - { - "slug": "paymenter", - "repo": "paymenter/paymenter", - "version": "v1.4.7", - "pinned": false, - "date": "2025-12-09T11:44:49Z" - }, - { - "slug": "peanut", - "repo": "Brandawg93/PeaNUT", - "version": "v5.22.0", - "pinned": false, - "date": "2026-02-08T00:32:25Z" - }, - { - "slug": "pelican-panel", - "repo": "pelican-dev/panel", - "version": "v1.0.0-beta33", - "pinned": false, - "date": "2026-02-18T21:37:11Z" - }, - { - "slug": "pelican-wings", - "repo": "pelican-dev/wings", - "version": "v1.0.0-beta24", - "pinned": false, - "date": "2026-02-15T16:09:56Z" - }, - { - "slug": "pf2etools", - "repo": "Pf2eToolsOrg/Pf2eTools", - "version": "v0.10.1", - "pinned": false, - "date": "2025-09-28T08:55:44Z" - }, - { - "slug": "photoprism", - "repo": "photoprism/photoprism", - "version": "260305-fad9d5395", - "pinned": false, - "date": "2026-03-05T20:25:12Z" - }, - { - "slug": "pihole-exporter", - "repo": "eko/pihole-exporter", - "version": "v1.2.0", - "pinned": false, - "date": "2025-07-29T19:15:37Z" - }, - { - "slug": "planka", - "repo": "plankanban/planka", - "version": "v2.0.3", - "pinned": false, - "date": "2026-03-01T16:03:23Z" - }, - { - "slug": "plant-it", - "repo": "MDeLuise/plant-it", - "version": "0.10.0", - "pinned": true, - "date": "2024-12-10T09:35:26Z" - }, - { - "slug": "pocketbase", - "repo": "pocketbase/pocketbase", - "version": "v0.36.6", - "pinned": false, - "date": "2026-03-06T08:07:09Z" - }, - { - "slug": "pocketid", - "repo": "pocket-id/pocket-id", - "version": "v2.4.0", - "pinned": false, - "date": "2026-03-07T17:51:41Z" - }, - { - "slug": "powerdns", - "repo": "poweradmin/poweradmin", - "version": "v4.0.7", - "pinned": false, - "date": "2026-02-15T20:09:48Z" - }, - { - "slug": "privatebin", - "repo": "PrivateBin/PrivateBin", - "version": "2.0.3", - "pinned": false, - "date": "2025-11-12T07:10:14Z" - }, - { - "slug": "profilarr", - "repo": "Dictionarry-Hub/profilarr", - "version": "v1.1.4", - "pinned": false, - "date": "2026-01-29T14:57:25Z" - }, - { - "slug": "projectsend", - "repo": "projectsend/projectsend", - "version": "r1945", - "pinned": false, - "date": "2025-10-10T02:30:05Z" - }, - { - "slug": "prometheus", - "repo": "prometheus/prometheus", - "version": "v3.10.0", - "pinned": false, - "date": "2026-02-26T01:19:51Z" - }, - { - "slug": "prometheus-alertmanager", - "repo": "prometheus/alertmanager", - "version": "v0.31.1", - "pinned": false, - "date": "2026-02-11T21:28:26Z" - }, - { - "slug": "prometheus-blackbox-exporter", - "repo": "prometheus/blackbox_exporter", - "version": "v0.28.0", - "pinned": false, - "date": "2025-12-06T13:32:18Z" - }, - { - "slug": "prometheus-paperless-ngx-exporter", - "repo": "hansmi/prometheus-paperless-exporter", - "version": "v0.0.9", - "pinned": false, - "date": "2025-12-08T20:37:45Z" - }, - { - "slug": "prowlarr", - "repo": "Prowlarr/Prowlarr", - "version": "v2.3.0.5236", - "pinned": false, - "date": "2025-11-16T22:41:22Z" - }, - { - "slug": "ps5-mqtt", - "repo": "FunkeyFlo/ps5-mqtt", - "version": "v1.4.0", - "pinned": false, - "date": "2024-08-06T19:57:33Z" - }, - { - "slug": "pterodactyl-wings", - "repo": "pterodactyl/wings", - "version": "v1.12.1", - "pinned": false, - "date": "2026-01-13T20:39:22Z" - }, - { - "slug": "pulse", - "repo": "rcourtman/Pulse", - "version": "v5.1.23", - "pinned": false, - "date": "2026-03-09T22:22:12Z" - }, - { - "slug": "pve-scripts-local", - "repo": "community-scripts/ProxmoxVE-Local", - "version": "v0.5.6", - "pinned": false, - "date": "2026-01-29T15:08:44Z" - }, - { - "slug": "qbittorrent", - "repo": "userdocs/qbittorrent-nox-static", - "version": "release-5.1.4_v2.0.11", - "pinned": false, - "date": "2025-11-19T23:54:34Z" - }, - { - "slug": "qbittorrent-exporter", - "repo": "martabal/qbittorrent-exporter", - "version": "v1.13.3", - "pinned": false, - "date": "2026-02-22T13:01:42Z" - }, - { - "slug": "qdrant", - "repo": "qdrant/qdrant", - "version": "v1.17.0", - "pinned": false, - "date": "2026-02-20T11:11:50Z" - }, - { - "slug": "qui", - "repo": "autobrr/qui", - "version": "v1.14.1", - "pinned": false, - "date": "2026-02-23T13:13:31Z" - }, - { - "slug": "radarr", - "repo": "Radarr/Radarr", - "version": "v6.0.4.10291", - "pinned": false, - "date": "2025-11-16T22:39:01Z" - }, - { - "slug": "radicale", - "repo": "Kozea/Radicale", - "version": "v3.6.1", - "pinned": false, - "date": "2026-02-24T06:36:23Z" - }, - { - "slug": "rclone", - "repo": "rclone/rclone", - "version": "v1.73.2", - "pinned": false, - "date": "2026-03-06T20:42:26Z" - }, - { - "slug": "rdtclient", - "repo": "rogerfar/rdt-client", - "version": "v2.0.125", - "pinned": false, - "date": "2026-03-01T18:29:10Z" - }, - { - "slug": "reactive-resume", - "repo": "amruthpillai/reactive-resume", - "version": "v5.0.11", - "pinned": false, - "date": "2026-03-04T20:39:11Z" - }, - { - "slug": "recyclarr", - "repo": "recyclarr/recyclarr", - "version": "v8.4.0", - "pinned": false, - "date": "2026-03-06T01:25:59Z" - }, - { - "slug": "reitti", - "repo": "dedicatedcode/reitti", - "version": "v3.4.1", - "pinned": false, - "date": "2026-01-23T09:52:28Z" - }, - { - "slug": "revealjs", - "repo": "hakimel/reveal.js", - "version": "6.0.0", - "pinned": false, - "date": "2026-03-11T11:54:59Z" - }, - { - "slug": "romm", - "repo": "RetroAchievements/RALibretro", - "version": "1.8.3", - "pinned": false, - "date": "2026-03-07T23:41:29Z" - }, - { - "slug": "rustdeskserver", - "repo": "lejianwen/rustdesk-server", - "version": "v0.1.2", - "pinned": false, - "date": "2025-09-01T02:52:30Z" - }, - { - "slug": "rustypaste", - "repo": "orhun/rustypaste", - "version": "v0.16.1", - "pinned": false, - "date": "2025-03-21T20:44:47Z" - }, - { - "slug": "sabnzbd", - "repo": "sabnzbd/sabnzbd", - "version": "4.5.5", - "pinned": false, - "date": "2025-10-24T11:12:22Z" - }, - { - "slug": "scanopy", - "repo": "scanopy/scanopy", - "version": "v0.14.17", - "pinned": false, - "date": "2026-03-09T05:04:49Z" - }, - { - "slug": "scraparr", - "repo": "thecfu/scraparr", - "version": "v3.0.3", - "pinned": false, - "date": "2026-02-12T14:20:56Z" - }, - { - "slug": "seaweedfs", - "repo": "seaweedfs/seaweedfs", - "version": "4.17", - "pinned": false, - "date": "2026-03-11T09:30:38Z" - }, - { - "slug": "seelf", - "repo": "YuukanOO/seelf", - "version": "v2.4.2", - "pinned": false, - "date": "2025-03-08T10:49:04Z" - }, - { - "slug": "seerr", - "repo": "seerr-team/seerr", - "version": "v3.1.0", - "pinned": false, - "date": "2026-02-27T17:25:29Z" - }, - { - "slug": "semaphore", - "repo": "semaphoreui/semaphore", - "version": "v2.17.21", - "pinned": false, - "date": "2026-03-09T09:33:06Z" - }, - { - "slug": "shelfmark", - "repo": "FlareSolverr/FlareSolverr", - "version": "v3.4.6", - "pinned": false, - "date": "2025-11-29T02:43:00Z" - }, - { - "slug": "signoz", - "repo": "SigNoz/signoz-otel-collector", - "version": "v0.144.2", - "pinned": false, - "date": "2026-02-26T05:57:26Z" - }, - { - "slug": "silverbullet", - "repo": "silverbulletmd/silverbullet", - "version": "2.5.2", - "pinned": false, - "date": "2026-03-06T12:20:58Z" - }, - { - "slug": "slskd", - "repo": "slskd/slskd", - "version": "0.24.5", - "pinned": false, - "date": "2026-03-01T04:00:42Z" - }, - { - "slug": "snipeit", - "repo": "grokability/snipe-it", - "version": "v8.4.0", - "pinned": false, - "date": "2026-02-23T20:59:43Z" - }, - { - "slug": "snowshare", - "repo": "TuroYT/snowshare", - "version": "v1.3.8", - "pinned": false, - "date": "2026-03-02T07:43:42Z" - }, - { - "slug": "sonarr", - "repo": "Sonarr/Sonarr", - "version": "v4.0.16.2944", - "pinned": false, - "date": "2025-11-05T01:56:48Z" - }, - { - "slug": "sonobarr", - "repo": "Dodelidoo-Labs/sonobarr", - "version": "0.12.1", - "pinned": false, - "date": "2026-03-03T13:43:02Z" - }, - { - "slug": "speedtest-tracker", - "repo": "alexjustesen/speedtest-tracker", - "version": "v1.13.10", - "pinned": false, - "date": "2026-02-20T03:14:47Z" - }, - { - "slug": "spoolman", - "repo": "Donkie/Spoolman", - "version": "v0.23.1", - "pinned": false, - "date": "2026-02-03T19:03:55Z" - }, - { - "slug": "sportarr", - "repo": "Sportarr/Sportarr", - "version": "v4.0.988.1063", - "pinned": false, - "date": "2026-03-07T12:15:33Z" - }, - { - "slug": "stirling-pdf", - "repo": "Stirling-Tools/Stirling-PDF", - "version": "v2.7.0", - "pinned": false, - "date": "2026-03-06T11:21:47Z" - }, - { - "slug": "streamlink-webui", - "repo": "CrazyWolf13/streamlink-webui", - "version": "0.6", - "pinned": false, - "date": "2025-09-05T06:05:04Z" - }, - { - "slug": "stylus", - "repo": "mmastrac/stylus", - "version": "v0.17.0", - "pinned": false, - "date": "2025-09-19T22:23:28Z" - }, - { - "slug": "sure", - "repo": "we-promise/sure", - "version": "v0.6.8", - "pinned": false, - "date": "2026-02-28T12:55:36Z" - }, - { - "slug": "tandoor", - "repo": "TandoorRecipes/recipes", - "version": "2.5.3", - "pinned": false, - "date": "2026-02-14T12:42:14Z" - }, - { - "slug": "tasmoadmin", - "repo": "TasmoAdmin/TasmoAdmin", - "version": "v5.0.0", - "pinned": false, - "date": "2026-03-09T20:51:03Z" - }, - { - "slug": "tautulli", - "repo": "Tautulli/Tautulli", - "version": "v2.16.1", - "pinned": false, - "date": "2026-02-15T20:40:37Z" - }, - { - "slug": "teddycloud", - "repo": "toniebox-reverse-engineering/teddycloud", - "version": "tc_v0.6.7", - "pinned": false, - "date": "2026-01-11T12:00:06Z" - }, - { - "slug": "termix", - "repo": "Termix-SSH/Termix", - "version": "release-1.11.2-tag", - "pinned": false, - "date": "2026-03-08T23:27:30Z" - }, - { - "slug": "the-lounge", - "repo": "thelounge/thelounge-deb", - "version": "v4.4.3", - "pinned": false, - "date": "2024-04-06T12:24:35Z" - }, - { - "slug": "thingsboard", - "repo": "thingsboard/thingsboard", - "version": "v4.3.1", - "pinned": false, - "date": "2026-03-10T09:25:25Z" - }, - { - "slug": "threadfin", - "repo": "threadfin/threadfin", - "version": "1.2.37", - "pinned": false, - "date": "2025-09-11T16:13:41Z" - }, - { - "slug": "tianji", - "repo": "msgbyte/tianji", - "version": "v1.31.13", - "pinned": false, - "date": "2026-02-13T16:30:09Z" - }, - { - "slug": "tinyauth", - "repo": "steveiliop56/tinyauth", - "version": "v5.0.2", - "pinned": false, - "date": "2026-03-08T15:46:59Z" - }, - { - "slug": "traccar", - "repo": "traccar/traccar", - "version": "v6.12.2", - "pinned": false, - "date": "2026-02-27T15:08:36Z" - }, - { - "slug": "tracearr", - "repo": "connorgallopo/Tracearr", - "version": "v1.4.22", - "pinned": false, - "date": "2026-03-09T17:39:52Z" - }, - { - "slug": "tracktor", - "repo": "javedh-dev/tracktor", - "version": "1.2.1", - "pinned": false, - "date": "2026-01-21T09:31:18Z" - }, - { - "slug": "traefik", - "repo": "traefik/traefik", - "version": "v3.6.10", - "pinned": false, - "date": "2026-03-06T15:08:35Z" - }, - { - "slug": "trilium", - "repo": "TriliumNext/Trilium", - "version": "v0.102.1", - "pinned": false, - "date": "2026-03-08T09:11:01Z" - }, - { - "slug": "trip", - "repo": "itskovacs/TRIP", - "version": "1.41.1", - "pinned": false, - "date": "2026-03-04T07:25:35Z" - }, - { - "slug": "tududi", - "repo": "chrisvel/tududi", - "version": "v0.88.5", - "pinned": false, - "date": "2026-02-13T13:54:14Z" - }, - { - "slug": "tunarr", - "repo": "chrisbenincasa/tunarr", - "version": "v1.1.19", - "pinned": false, - "date": "2026-03-11T02:21:06Z" - }, - { - "slug": "uhf", - "repo": "swapplications/comskip", - "version": "1.4.0", - "pinned": false, - "date": "2025-06-01T09:16:31Z" - }, - { - "slug": "umami", - "repo": "umami-software/umami", - "version": "v3.0.3", - "pinned": false, - "date": "2025-12-12T02:39:27Z" - }, - { - "slug": "umlautadaptarr", - "repo": "PCJones/Umlautadaptarr", - "version": "v0.7.5", - "pinned": false, - "date": "2025-11-18T10:50:21Z" - }, - { - "slug": "upgopher", - "repo": "wanetty/upgopher", - "version": "v1.15.2", - "pinned": false, - "date": "2026-03-03T13:40:45Z" - }, - { - "slug": "upsnap", - "repo": "seriousm4x/UpSnap", - "version": "5.2.8", - "pinned": false, - "date": "2026-02-13T00:02:37Z" - }, - { - "slug": "uptimekuma", - "repo": "louislam/uptime-kuma", - "version": "2.2.1", - "pinned": false, - "date": "2026-03-10T02:25:33Z" - }, - { - "slug": "vaultwarden", - "repo": "dani-garcia/vaultwarden", - "version": "1.35.4", - "pinned": false, - "date": "2026-02-23T21:43:25Z" - }, - { - "slug": "victoriametrics", - "repo": "VictoriaMetrics/VictoriaMetrics", - "version": "v1.137.0", - "pinned": false, - "date": "2026-03-02T10:09:29Z" - }, - { - "slug": "vikunja", - "repo": "go-vikunja/vikunja", - "version": "v2.1.0", - "pinned": false, - "date": "2026-02-27T14:26:53Z" - }, - { - "slug": "wallabag", - "repo": "wallabag/wallabag", - "version": "2.6.14", - "pinned": false, - "date": "2025-10-07T08:06:17Z" - }, - { - "slug": "wallos", - "repo": "ellite/Wallos", - "version": "v4.6.2", - "pinned": false, - "date": "2026-03-05T22:18:06Z" - }, - { - "slug": "wanderer", - "repo": "meilisearch/meilisearch", - "version": "v1.38.2", - "pinned": false, - "date": "2026-03-11T11:36:01Z" - }, - { - "slug": "warracker", - "repo": "sassanix/Warracker", - "version": "1.0.2", - "pinned": false, - "date": "2025-10-30T18:23:23Z" - }, - { - "slug": "watcharr", - "repo": "sbondCo/Watcharr", - "version": "v3.0.1", - "pinned": false, - "date": "2026-03-09T11:33:44Z" - }, - { - "slug": "watchyourlan", - "repo": "aceberg/WatchYourLAN", - "version": "2.1.4", - "pinned": false, - "date": "2025-09-10T12:08:09Z" - }, - { - "slug": "wavelog", - "repo": "wavelog/wavelog", - "version": "2.3", - "pinned": false, - "date": "2026-02-11T15:46:40Z" - }, - { - "slug": "wealthfolio", - "repo": "afadil/wealthfolio", - "version": "v3.0.3", - "pinned": true, - "date": "2026-03-03T21:47:55Z" - }, - { - "slug": "web-check", - "repo": "CrazyWolf13/web-check", - "version": "1.1.0", - "pinned": false, - "date": "2025-11-11T14:30:28Z" - }, - { - "slug": "wger", - "repo": "wger-project/wger", - "version": "2.4", - "pinned": false, - "date": "2026-01-18T12:12:02Z" - }, - { - "slug": "wikijs", - "repo": "requarks/wiki", - "version": "v2.5.312", - "pinned": false, - "date": "2026-02-12T02:45:22Z" - }, - { - "slug": "wishlist", - "repo": "cmintey/wishlist", - "version": "v0.60.1", - "pinned": false, - "date": "2026-02-24T04:01:37Z" - }, - { - "slug": "wizarr", - "repo": "wizarrrr/wizarr", - "version": "v2026.2.1", - "pinned": false, - "date": "2026-02-25T01:07:56Z" - }, - { - "slug": "writefreely", - "repo": "writefreely/writefreely", - "version": "v0.16.0", - "pinned": false, - "date": "2025-08-29T19:30:02Z" - }, - { - "slug": "yt-dlp-webui", - "repo": "marcopiovanello/yt-dlp-web-ui", - "version": "v3.2.6", - "pinned": false, - "date": "2025-03-15T10:24:27Z" - }, - { - "slug": "yubal", - "repo": "guillevc/yubal", - "version": "v0.7.0", - "pinned": false, - "date": "2026-03-08T13:37:49Z" - }, - { - "slug": "zerobyte", - "repo": "restic/restic", - "version": "v0.18.1", - "pinned": false, - "date": "2025-09-21T18:24:38Z" - }, - { - "slug": "zigbee2mqtt", - "repo": "Koenkk/zigbee2mqtt", - "version": "2.9.1", - "pinned": false, - "date": "2026-03-02T11:16:46Z" - }, - { - "slug": "zipline", - "repo": "diced/zipline", - "version": "v4.4.2", - "pinned": false, - "date": "2026-02-11T04:58:54Z" - }, - { - "slug": "zitadel", - "repo": "zitadel/zitadel", - "version": "v4.12.2", - "pinned": false, - "date": "2026-03-11T07:50:10Z" - }, - { - "slug": "zoraxy", - "repo": "tobychui/zoraxy", - "version": "v3.3.2-rc3", - "pinned": false, - "date": "2026-03-09T13:56:45Z" - }, - { - "slug": "zwave-js-ui", - "repo": "zwave-js/zwave-js-ui", - "version": "v11.14.0", - "pinned": false, - "date": "2026-03-06T09:34:32Z" - } - ] -} diff --git a/frontend/public/json/glance.json b/frontend/public/json/glance.json deleted file mode 100644 index a2d3431bb..000000000 --- a/frontend/public/json/glance.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Glance", - "slug": "glance", - "categories": [ - 10 - ], - "date_created": "2024-12-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://github.com/glanceapp/glance/blob/main/docs/configuration.md", - "website": "https://github.com/glanceapp/glance", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/glance.webp", - "config_path": "/opt/glance/glance.yml", - "description": "A self-hosted dashboard that puts all your feeds in one place", - "install_methods": [ - { - "type": "default", - "script": "ct/glance.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/glances.json b/frontend/public/json/glances.json deleted file mode 100644 index b73698c9d..000000000 --- a/frontend/public/json/glances.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Glances", - "slug": "glances", - "categories": [ - 9 - ], - "date_created": "2024-05-02", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 61208, - "documentation": "https://glances.readthedocs.io/en/latest/", - "website": "https://nicolargo.github.io/glances/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/glances.webp", - "config_path": "/usr/local/share/doc/glances/glances.conf", - "description": "Glances is an open-source system cross-platform monitoring tool. It allows real-time monitoring of various aspects of your system such as CPU, memory, disk, network usage etc.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/glances.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within an existing LXC Console (Debian / Ubuntu / Alpine supported)", - "type": "info" - } - ] -} diff --git a/frontend/public/json/globaleaks.json b/frontend/public/json/globaleaks.json deleted file mode 100644 index e3f79cbe3..000000000 --- a/frontend/public/json/globaleaks.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "GlobaLeaks", - "slug": "globaleaks", - "categories": [ - 0 - ], - "date_created": "2025-09-18", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 443, - "documentation": "https://docs.globaleaks.org", - "website": "https://www.globaleaks.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/globaleaks.webp", - "config_path": "", - "description": "GlobaLeaks is a free and open-source whistleblowing software enabling anyone to easily set up and maintain a secure reporting platform.", - "install_methods": [ - { - "type": "default", - "script": "ct/globaleaks.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/glpi.json b/frontend/public/json/glpi.json deleted file mode 100644 index b2149dc3a..000000000 --- a/frontend/public/json/glpi.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "GLPI", - "slug": "glpi", - "categories": [ - 25 - ], - "date_created": "2025-01-06", - "type": "ct", - "updateable": false, - "privileged": false, - "interface_port": 80, - "documentation": "https://glpi-project.org/documentation/", - "website": "https://glpi-project.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/glpi.webp", - "config_path": "/etc/glpi", - "description": "GLPI is a Free Asset and IT Management Software package, Data center management, ITIL Service Desk, licenses tracking and software auditing.", - "install_methods": [ - { - "type": "default", - "script": "ct/glpi.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "glpi", - "password": "glpi" - }, - "notes": [] -} diff --git a/frontend/public/json/go2rtc.json b/frontend/public/json/go2rtc.json deleted file mode 100644 index 0f82984d7..000000000 --- a/frontend/public/json/go2rtc.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "go2rtc", - "slug": "go2rtc", - "categories": [ - 15 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 1984, - "documentation": "https://github.com/AlexxIT/go2rtc/blob/master/README.md", - "website": "https://github.com/AlexxIT/go2rtc", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/go2rtc.webp", - "config_path": "/opt/go2rtc/go2rtc.yaml", - "description": "go2rtc is the ultimate camera streaming application with support RTSP, WebRTC, HomeKit, FFmpeg, RTMP, etc.", - "install_methods": [ - { - "type": "default", - "script": "ct/go2rtc.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/gokapi.json b/frontend/public/json/gokapi.json deleted file mode 100644 index 88ba7e6b9..000000000 --- a/frontend/public/json/gokapi.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Gokapi", - "slug": "gokapi", - "categories": [ - 11 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 53842, - "documentation": "https://gokapi.readthedocs.io/en/latest/usage.html", - "website": "https://github.com/Forceu/Gokapi", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/gokapi.webp", - "config_path": "/opt/gokapi/config", - "description": "Gokapi is a lightweight server to share files, which expire after a set amount of downloads or days.", - "install_methods": [ - { - "type": "default", - "script": "ct/gokapi.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/gotify.json b/frontend/public/json/gotify.json deleted file mode 100644 index c4e29b988..000000000 --- a/frontend/public/json/gotify.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Gotify", - "slug": "gotify", - "categories": [ - 19 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://gotify.net/docs/index", - "website": "https://gotify.net/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/gotify.webp", - "config_path": "/etc/gotify/config.yml", - "description": "Gotify is a simple server for sending and receiving messages", - "install_methods": [ - { - "type": "default", - "script": "ct/gotify.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [] -} diff --git a/frontend/public/json/grafana.json b/frontend/public/json/grafana.json deleted file mode 100644 index b60e1a359..000000000 --- a/frontend/public/json/grafana.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "Grafana", - "slug": "grafana", - "categories": [ - 9 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://grafana.com/docs/grafana/latest/", - "website": "https://grafana.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/grafana.webp", - "config_path": "Debian: /etc/grafana/grafana.ini | Alpine: /etc/grafana.ini", - "description": "Grafana is a data visualization and monitoring platform that enables users to query, visualize, alert on and understand metrics, logs, and other data sources. It integrates with various data sources, including Prometheus, InfluxDB, Elasticsearch, and many others, to present a unified view of the data and enable users to create insightful and interactive dashboards.", - "install_methods": [ - { - "type": "default", - "script": "ct/grafana.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-grafana.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 2, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [] -} diff --git a/frontend/public/json/gramps-web.json b/frontend/public/json/gramps-web.json deleted file mode 100644 index 49c3cb0ae..000000000 --- a/frontend/public/json/gramps-web.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Gramps Web", - "slug": "gramps-web", - "categories": [ - 12 - ], - "date_created": "2026-02-22", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5000, - "documentation": "https://www.grampsweb.org/install_setup/setup/", - "website": "https://www.grampsweb.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/gramps.webp", - "config_path": "/opt/gramps-web/config/config.cfg", - "description": "Gramps Web is a collaborative genealogy platform for browsing, editing and sharing family trees through a modern web interface.", - "install_methods": [ - { - "type": "default", - "script": "ct/gramps-web.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 20, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "On first access, create the owner account via the built-in onboarding wizard.", - "type": "info" - }, - { - "text": "The initial deployment compiles the frontend and can take several minutes.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/graylog.json b/frontend/public/json/graylog.json deleted file mode 100644 index d52568936..000000000 --- a/frontend/public/json/graylog.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Graylog", - "slug": "graylog", - "categories": [ - 9 - ], - "date_created": "2025-02-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9000, - "documentation": "https://go2docs.graylog.org/current/home.htm", - "website": "https://graylog.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/graylog.webp", - "config_path": "/etc/graylog/server/server.conf", - "description": "Graylog is an open-source log management and analysis platform that centralizes and processes log data from various sources, enabling real-time search, analysis, and alerting for IT infrastructure monitoring and troubleshooting.", - "install_methods": [ - { - "type": "default", - "script": "ct/graylog.sh", - "resources": { - "cpu": 2, - "ram": 8192, - "hdd": 30, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Initial Setup credentials: `tail /var/log/graylog-server/server.log` after the server starts for the first time.", - "type": "info" - }, - { - "text": "Type `cat ~/graylog.creds` to get admin password that you use to log in AFTER the Initial Setup", - "type": "info" - } - ] -} diff --git a/frontend/public/json/grist.json b/frontend/public/json/grist.json deleted file mode 100644 index 88b35d9be..000000000 --- a/frontend/public/json/grist.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Grist", - "slug": "grist", - "categories": [ - 12 - ], - "date_created": "2024-12-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8484, - "documentation": "https://support.getgrist.com/self-managed/#the-essentials", - "website": "https://www.getgrist.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/grist.webp", - "config_path": "/opt/grist/.env", - "description": "Grist is like a spreadsheet + database hybrid. It lets you store structured data, use relational links between tables, apply formulas (even with Python), build custom layouts (cards, forms, dashboards), set fine-grained access rules, and visualize data with charts or pivot-tables.", - "install_methods": [ - { - "type": "default", - "script": "ct/grist.sh", - "resources": { - "cpu": 2, - "ram": 3072, - "hdd": 6, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/grocy.json b/frontend/public/json/grocy.json deleted file mode 100644 index a05db59f2..000000000 --- a/frontend/public/json/grocy.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "grocy", - "slug": "grocy", - "categories": [ - 24 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/grocy/grocy#how-to-install", - "website": "https://grocy.info/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/grocy.webp", - "config_path": "/var/www/html/data/config.php", - "description": "grocy is a web-based self-hosted groceries & household management solution for your home. It helps you keep track of your groceries and household items, manage your shopping list, and keep track of your pantry, recipes, meal plans, and more.", - "install_methods": [ - { - "type": "default", - "script": "ct/grocy.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [] -} diff --git a/frontend/public/json/guardian.json b/frontend/public/json/guardian.json deleted file mode 100644 index 1d4655a9b..000000000 --- a/frontend/public/json/guardian.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Guardian", - "slug": "guardian", - "categories": [ - 13 - ], - "date_created": "2025-10-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/HydroshieldMKII/Guardian/blob/main/README.md", - "config_path": "/opt/guardian/.env", - "website": "https://github.com/HydroshieldMKII/Guardian", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/guardian-plex.webp", - "description": "Guardian is a lightweight companion app for Plex that lets you monitor, approve or block devices in real time. It helps you enforce per-user or global policies, stop unwanted sessions automatically and grant temporary access - all through a simple web interface.", - "install_methods": [ - { - "type": "default", - "script": "ct/guardian.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/gwn-manager.json b/frontend/public/json/gwn-manager.json deleted file mode 100644 index 820ddb531..000000000 --- a/frontend/public/json/gwn-manager.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "GWN Manager", - "slug": "gwn-manager", - "categories": [ - 9 - ], - "date_created": "2026-01-08", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8443, - "documentation": "https://documentation.grandstream.com/article-categories/gwn-mgmt/", - "website": "https://www.grandstream.com/products/networking-solutions/wi-fi-management/product/gwn-manager", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/gwn-manager.webp", - "config_path": "/gwn/conf/gwn.conf", - "description": "GWN Manager is a free on-premise enterprise-grade, management platform for Grandstream GWN series devices. Typically deployed on a customer’s private network, this flexible, scalable solution offers simplified configuration and management.", - "install_methods": [ - { - "type": "default", - "script": "ct/gwn-manager.sh", - "resources": { - "cpu": 2, - "ram": 6144, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Installation package is pulled from GrandStream website. Installation may take a while.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/haos-vm.json b/frontend/public/json/haos-vm.json deleted file mode 100644 index 901d7c38a..000000000 --- a/frontend/public/json/haos-vm.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Home Assistant OS", - "slug": "haos-vm", - "categories": [ - 16 - ], - "date_created": "2024-04-29", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": 8123, - "documentation": "https://www.home-assistant.io/docs/", - "website": "https://www.home-assistant.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/home-assistant.webp", - "config_path": "", - "description": "This script automates the process of creating a Virtual Machine (VM) using the official KVM (qcow2) disk image provided by the Home Assistant Team. It involves finding, downloading, and extracting the image, defining user-defined settings, importing and attaching the disk, setting the boot order, and starting the VM. It supports various storage types, and does not involve any hidden installations. After the script completes, click on the VM, then on the Summary tab to find the VM IP.", - "install_methods": [ - { - "type": "default", - "script": "vm/haos-vm.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 32, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The disk must have a minimum size of 32GB and its size cannot be changed during the creation of the VM.", - "type": "warning" - }, - { - "text": "After the script completes, click on the VM, then on the Summary or Console tab to find the VM IP.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/headscale.json b/frontend/public/json/headscale.json deleted file mode 100644 index 19d70eb25..000000000 --- a/frontend/public/json/headscale.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Headscale", - "slug": "headscale", - "categories": [ - 4 - ], - "date_created": "2024-05-13", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://headscale.net/", - "website": "https://github.com/juanfont/headscale", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/headscale.webp", - "config_path": "/etc/headscale/config.yaml", - "description": "An open source, self-hosted implementation of the Tailscale control server", - "install_methods": [ - { - "type": "default", - "script": "ct/headscale.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Configuration settings: `/etc/headscale/config.yaml`", - "type": "info" - }, - { - "text": "Access headscale-admin UI via `http:///admin/`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/healthchecks.json b/frontend/public/json/healthchecks.json deleted file mode 100644 index 4db95eaef..000000000 --- a/frontend/public/json/healthchecks.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Healthchecks", - "slug": "healthchecks", - "categories": [ - 9 - ], - "date_created": "2025-08-25", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/opt/healthchecks/hc/local_settings.py", - "interface_port": 3000, - "documentation": "https://healthchecks.io/docs/", - "website": "https://healthchecks.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/healthchecks.webp", - "description": "Healthchecks is a cron job monitoring service. It listens for HTTP requests and email messages (\"pings\") from your cron jobs and scheduled tasks (\"checks\"). When a ping does not arrive on time, Healthchecks sends out alerts. Healthchecks comes with a web dashboard, API, 25+ integrations for delivering notifications, monthly email reports, WebAuthn 2FA support, team management features: projects, team members, read-only access.", - "install_methods": [ - { - "type": "default", - "script": "ct/healthchecks.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "if you change your LXC-IP, you need to update /etc/caddy/Caddyfile & /opt/healthchecks/hc/local_settings.py", - "type": "info" - }, - { - "text": "Show credentials: `cat ~/healthchecks.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/heimdall-dashboard.json b/frontend/public/json/heimdall-dashboard.json deleted file mode 100644 index eb2332aaa..000000000 --- a/frontend/public/json/heimdall-dashboard.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Heimdall Dashboard", - "slug": "heimdall-dashboard", - "categories": [ - 10 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7990, - "documentation": "https://github.com/linuxserver/Heimdall/blob/2.x/readme.md", - "website": "https://heimdall.site/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/heimdall.webp", - "config_path": "/opt/Heimdall/.env", - "description": "Heimdall Dashboard is a self-hosted, web-based dashboard for managing and monitoring the health of applications and servers. It allows you to keep track of the status of your systems from a single, centralized location, and receive notifications when things go wrong. With Heimdall Dashboard, you have full control over your data and can customize it to meet your specific needs. Self-hosting the dashboard gives you the flexibility to run it on your own infrastructure, making it a suitable solution for organizations that prioritize data security and privacy.", - "install_methods": [ - { - "type": "default", - "script": "ct/heimdall-dashboard.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/hev-socks5-server.json b/frontend/public/json/hev-socks5-server.json deleted file mode 100644 index 9bb8ff1e6..000000000 --- a/frontend/public/json/hev-socks5-server.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "hev-socks5-server", - "slug": "hev-socks5-server", - "categories": [ - 4 - ], - "date_created": "2025-02-23", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 1080, - "documentation": "https://github.com/heiher/hev-socks5-server/blob/main/README.md", - "website": "https://github.com/heiher/hev-socks5-server", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/hev.webp", - "config_path": "/opt/hivemq/conf/config.xml", - "description": "HevSocks5Server is a simple, lightweight socks5 server.", - "install_methods": [ - { - "type": "default", - "script": "ct/hev-socks5-server.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Default credentials: `cat /root/hev.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/hivemq.json b/frontend/public/json/hivemq.json deleted file mode 100644 index dfdff7660..000000000 --- a/frontend/public/json/hivemq.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "HiveMQ CE", - "slug": "hivemq", - "categories": [ - 18 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": false, - "privileged": false, - "interface_port": 1883, - "documentation": "https://github.com/hivemq/hivemq-community-edition/wiki", - "website": "https://www.hivemq.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/hivemq.webp", - "config_path": "/opt/hivemq/conf/config.xml", - "description": "HiveMQ CE is a Java-based open source MQTT broker that fully supports MQTT 3.x and MQTT 5.", - "install_methods": [ - { - "type": "default", - "script": "ct/hivemq.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To check if HiveMQ is listening to the default port for MQTT `lsof -i :1883`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/homarr.json b/frontend/public/json/homarr.json deleted file mode 100644 index 2a70a6955..000000000 --- a/frontend/public/json/homarr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Homarr", - "slug": "homarr", - "categories": [ - 10 - ], - "date_created": "2025-01-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7575, - "documentation": "https://homarr.dev/docs/getting-started/", - "website": "https://homarr.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/homarr.webp", - "config_path": "/opt/homarr.env", - "description": "Homarr is a sleek, modern dashboard that puts all of your apps and services at your fingertips.", - "install_methods": [ - { - "type": "default", - "script": "ct/homarr.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/homeassistant.json b/frontend/public/json/homeassistant.json deleted file mode 100644 index 1f25730fc..000000000 --- a/frontend/public/json/homeassistant.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "name": "Home Assistant Container", - "slug": "homeassistant", - "categories": [ - 16 - ], - "date_created": "2024-04-29", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8123, - "documentation": "https://www.home-assistant.io/docs/", - "website": "https://www.home-assistant.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/home-assistant.webp", - "config_path": "/var/lib/docker/volumes/hass_config/_data", - "description": "A standalone container-based installation of Home Assistant Core means that the software is installed inside a Docker container, separate from the host operating system. This allows for flexibility and scalability, as well as improved security, as the container can be easily moved or isolated from other processes on the host.", - "install_methods": [ - { - "type": "default", - "script": "ct/homeassistant.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 16, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Containerized version doesn't allow Home Assistant add-ons.", - "type": "warning" - }, - { - "text": "If the LXC is created Privileged, the script will automatically set up USB passthrough.", - "type": "warning" - }, - { - "text": "config path: `/var/lib/docker/volumes/hass_config/_data`", - "type": "info" - }, - { - "text": "Portainer interface: $IP: 9443 - User & password must be set manually within 5 minutes, otherwise a restart of Portainer is required!", - "type": "info" - }, - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/homebox.json b/frontend/public/json/homebox.json deleted file mode 100644 index fee0a18fb..000000000 --- a/frontend/public/json/homebox.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "HomeBox", - "slug": "homebox", - "categories": [ - 24 - ], - "date_created": "2024-09-16", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7745, - "documentation": "https://homebox.software/en/quick-start.html", - "website": "https://homebox.software/en/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/homebox.webp", - "config_path": "/opt/homebox/.env", - "description": "HomeBox is a simple, home-focused inventory management software. It allows users to organize and track household items by adding, updating, or deleting them. Features include optional details like warranty info, CSV import/export, custom labels, locations, and multi-tenant support for sharing with others. It\u2019s designed to be fast, easy to use, and portable.", - "install_methods": [ - { - "type": "default", - "script": "ct/homebox.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/homebridge.json b/frontend/public/json/homebridge.json deleted file mode 100644 index 3c101f0da..000000000 --- a/frontend/public/json/homebridge.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Homebridge", - "slug": "homebridge", - "categories": [ - 16 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8581, - "documentation": "https://github.com/homebridge/homebridge/wiki", - "website": "https://homebridge.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/homebridge.webp", - "config_path": "/var/lib/homebridge/config.json", - "description": "Homebridge is a popular open-source software platform that enables you to integrate smart home devices and services that do not natively support Apple's HomeKit protocol into the HomeKit ecosystem. This allows you to control and automate these devices using Siri, the Home app, or other HomeKit-enabled apps, making it easy to bring together a variety of different devices into a unified smart home system. With Homebridge, you can expand the capabilities of your smart home, unlocking new possibilities for automating and controlling your devices and systems.", - "install_methods": [ - { - "type": "default", - "script": "ct/homebridge.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [] -} diff --git a/frontend/public/json/homepage.json b/frontend/public/json/homepage.json deleted file mode 100644 index 89aca036a..000000000 --- a/frontend/public/json/homepage.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Homepage", - "slug": "homepage", - "categories": [ - 10 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://gethomepage.dev/configs/", - "website": "https://gethomepage.dev", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/homepage.webp", - "config_path": "/opt/homepage/config/", - "description": "Homepage is a self-hosted dashboard solution for centralizing and organizing data and information.", - "install_methods": [ - { - "type": "default", - "script": "ct/homepage.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 6, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Configuration (bookmarks.yaml, services.yaml, widgets.yaml) path: `/opt/homepage/config/`", - "type": "info" - }, - { - "text": "Add additional allowed hosts to `/opt/homepage/.env`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/homer.json b/frontend/public/json/homer.json deleted file mode 100644 index 43b9f20e6..000000000 --- a/frontend/public/json/homer.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Homer", - "slug": "homer", - "categories": [ - 10 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8010, - "documentation": "https://github.com/bastienwirtz/homer/blob/main/README.md#table-of-contents", - "website": "https://github.com/bastienwirtz/homer", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/homer.webp", - "config_path": "/opt/homer/assets/config.yml", - "description": "Homer is a simple and lightweight static homepage generator that allows you to create and manage a home page for your server. It uses a YAML configuration file to define the layout and content of your homepage, making it easy to set up and customize. The generated homepage is static, meaning it does not require any server-side processing, making it fast and efficient to serve. Homer is designed to be a flexible and low-maintenance solution for organizing and accessing your services and information from a single, centralized location.", - "install_methods": [ - { - "type": "default", - "script": "ct/homer.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Configuration Path: `/opt/homer/assets/config.yml`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/hortusfox.json b/frontend/public/json/hortusfox.json deleted file mode 100644 index bfa6ad163..000000000 --- a/frontend/public/json/hortusfox.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "HortusFox", - "slug": "hortusfox", - "categories": [ - 24 - ], - "type": "ct", - "updateable": true, - "privileged": false, - "date_created": "2025-08-08", - "config_path": "/opt/hortusfox/.env", - "interface_port": 80, - "documentation": "https://github.com/danielbrendel/hortusfox-web", - "website": "https://www.hortusfox.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/hortusfox.webp", - "description": "HortusFox is a collaborative plant management system for plant enthusiasts. Manage, document and track your entire plant collection – self-hosted and privacy-friendly.", - "install_methods": [ - { - "type": "default", - "script": "ct/hortusfox.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Login Credentials : `cat ~/hortusfox.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/host-backup.json b/frontend/public/json/host-backup.json deleted file mode 100644 index 6b4cd3239..000000000 --- a/frontend/public/json/host-backup.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "PVE Host Backup", - "slug": "host-backup", - "categories": [ - 1 - ], - "date_created": "2024-04-29", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "This script serves as a versatile backup utility, enabling users to specify both the backup path and the directory they want to work in. This flexibility empowers users to select the specific files and directories they wish to back up, making it compatible with a wide range of hosts, not limited to Proxmox.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/host-backup.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - }, - { - "text": "A backup is rendered ineffective when it remains stored on the host", - "type": "info" - } - ] -} diff --git a/frontend/public/json/hyperhdr.json b/frontend/public/json/hyperhdr.json deleted file mode 100644 index 086a2cbb6..000000000 --- a/frontend/public/json/hyperhdr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "HyperHDR", - "slug": "hyperhdr", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 8090, - "documentation": "https://github.com/awawa-dev/HyperHDR/wiki", - "website": "https://github.com/awawa-dev/HyperHDR", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/hyperhdr.webp", - "config_path": "", - "description": "HyperHDR is a highly optimized open source ambient lighting implementation based on modern digital video and audio stream analysis.", - "install_methods": [ - { - "type": "default", - "script": "ct/hyperhdr.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/hyperion.json b/frontend/public/json/hyperion.json deleted file mode 100644 index 1be61d2d0..000000000 --- a/frontend/public/json/hyperion.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Hyperion", - "slug": "hyperion", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8090, - "documentation": "https://docs.hyperion-project.org/", - "website": "https://hyperion-project.org/forum/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/hyperion.webp", - "config_path": "", - "description": "Hyperion is an opensource Ambient Lighting implementation. It supports many LED devices and video grabbers.", - "install_methods": [ - { - "type": "default", - "script": "ct/hyperion.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/immich-public-proxy.json b/frontend/public/json/immich-public-proxy.json deleted file mode 100644 index 7eac59dc6..000000000 --- a/frontend/public/json/immich-public-proxy.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Immich Public Proxy", - "slug": "immich-public-proxy", - "categories": [ - 21 - ], - "date_created": "2026-02-04", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/alangrainger/immich-public-proxy/tree/main/docs", - "website": "https://github.com/alangrainger/immich-public-proxy", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/immich-public-proxy.webp", - "config_path": "/opt/immich-proxy/app/.env", - "description": "Share your Immich photos and albums in a safe way without exposing your Immich instance to the public.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/immich-public-proxy.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Requires Node.js 24+", - "type": "info" - }, - { - "text": "Update with: update_immich-public-proxy", - "type": "info" - } - ] -} diff --git a/frontend/public/json/immich.json b/frontend/public/json/immich.json deleted file mode 100644 index 520590e26..000000000 --- a/frontend/public/json/immich.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "name": "Immich", - "slug": "immich", - "categories": [ - 13 - ], - "date_created": "2025-06-01", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 2283, - "documentation": "https://immich.app/docs/overview/introduction", - "website": "https://immich.app", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/immich.webp", - "config_path": "/opt/immich/.env", - "description": "High performance self-hosted photo and video management solution.", - "install_methods": [ - { - "type": "default", - "script": "ct/immich.sh", - "resources": { - "cpu": 4, - "ram": 6144, - "hdd": 20, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Please be aware that Immich releases are pinned to specific versions until compatibility has been confirmed by the Community Scripts maintainers; as a result, the version installed by the helper script may not be the most current version of Immich", - "type": "info" - }, - { - "text": "During installation, you will be prompted with the option to install Intel OpenVINO for hardware-accelerated machine-learning. If you opt in, increase your LXC RAM after installation, as OpenVINO is memory-intensive", - "type": "info" - }, - { - "text": "HW-accelerated video transcoding is supported, but must be enabled in Immich Settings", - "type": "info" - }, - { - "text": "To change upload location, edit 'IMMICH_MEDIA_LOCATION' in `/opt/immich/.env`, retarget the symlink 'upload' in /opt/immich/app and /opt/immich/app/machine-learning to your new upload location, copy the default upload location `/opt/immich/upload` and its contents to the new upload location, confirm that the new upload location and its contents are owned by `immich:immich`, and restart the Immich server. See `https://github.com/community-scripts/ProxmoxVE/discussions/5075` for more information.", - "type": "info" - }, - { - "text": "Logs: `/var/log/immich`", - "type": "info" - }, - { - "text": "During first install, 5 custom libraries need to be compiled from source. Depending on your CPU, this can take anywhere between 15 minutes and 2 hours. Please be patient. Touch grass or something.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/immichframe.json b/frontend/public/json/immichframe.json deleted file mode 100644 index 2e88f892b..000000000 --- a/frontend/public/json/immichframe.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "ImmichFrame", - "slug": "immichframe", - "categories": [ - 13 - ], - "date_created": "2026-03-07", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://immichframe.dev/docs/overview", - "config_path": "/opt/immichframe/Config/Settings.yml", - "website": "https://immichframe.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/immich-frame.webp", - "description": "ImmichFrame is a digital photo frame web application that connects to your Immich server and displays your photos as a fullscreen slideshow.", - "install_methods": [ - { - "type": "default", - "script": "ct/immichframe.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After installation, edit `/opt/immichframe/Config/Settings.yml` and set ImmichServerUrl and ApiKey. Then restart the service with `systemctl restart immichframe`.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/infisical.json b/frontend/public/json/infisical.json deleted file mode 100644 index de8a42a19..000000000 --- a/frontend/public/json/infisical.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Infisical", - "slug": "infisical", - "categories": [ - 6 - ], - "date_created": "2025-11-07", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://infisical.com/docs/documentation/getting-started/overview", - "config_path": "/etc/infisical/infisical.rb", - "website": "https://infisical.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/infisical.webp", - "description": "Secrets, certificates, and access management on autopilot. All-in-one platform to securely manage application secrets, certificates, SSH keys, and configurations across your team and infrastructure.", - "install_methods": [ - { - "type": "default", - "script": "ct/infisical.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/influxdb.json b/frontend/public/json/influxdb.json deleted file mode 100644 index 29f9bad4a..000000000 --- a/frontend/public/json/influxdb.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "InfluxDB", - "slug": "influxdb", - "categories": [ - 8 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8086, - "documentation": "https://docs.influxdata.com/", - "website": "https://www.influxdata.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/influxdb.webp", - "config_path": "", - "description": "InfluxDB is designed to handle high write and query loads, and is optimized for storing and analyzing time-stamped data, such as metrics, events, and logs. InfluxDB supports SQL-like query language and has a built-in HTTP API for data ingestion and retrieval. It's commonly used for IoT and industrial applications where time-series data is involved.\r\n\r\nTelegraf is a server agent that collects, processes, and aggregates metrics and events data from different sources, such as systems, databases, and APIs, and outputs the data to various outputs, such as InfluxDB, Prometheus, Elasticsearch, and many others.", - "install_methods": [ - { - "type": "default", - "script": "ct/influxdb.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Port for V1: 8888", - "type": "info" - }, - { - "text": "Port for V2: 8086", - "type": "info" - } - ] -} diff --git a/frontend/public/json/inspircd.json b/frontend/public/json/inspircd.json deleted file mode 100644 index 7da3ce22b..000000000 --- a/frontend/public/json/inspircd.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "InspIRCd 4", - "slug": "inspircd", - "categories": [ - 24 - ], - "date_created": "2024-11-29", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6667, - "documentation": "https://docs.inspircd.org/", - "website": "https://www.inspircd.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/inspircd.webp", - "config_path": "/etc/inspircd/inspircd.conf", - "description": "InspIRCd is a modular C++ Internet Relay Chat (IRC) server for UNIX-like and Windows systems.", - "install_methods": [ - { - "type": "default", - "script": "ct/inspircd.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/inventree.json b/frontend/public/json/inventree.json deleted file mode 100644 index c61381066..000000000 --- a/frontend/public/json/inventree.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "InvenTree", - "slug": "inventree", - "categories": [ - 25 - ], - "date_created": "2025-03-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.inventree.org/en/latest/", - "website": "https://inventree.org", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/inventree.webp", - "config_path": "/etc/inventree/", - "description": "InvenTree is an open-source inventory management system which provides intuitive parts management and stock control. It is designed to be lightweight and easy to use for SME or hobbyist applications.", - "install_methods": [ - { - "type": "default", - "script": "ct/inventree.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "`cat /etc/inventree/admin_password.txt`" - }, - "notes": [ - { - "text": "Please read the documentation for your configuration needs.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/investbrain.json b/frontend/public/json/investbrain.json deleted file mode 100644 index 051661274..000000000 --- a/frontend/public/json/investbrain.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Investbrain", - "slug": "investbrain", - "categories": [ - 23 - ], - "date_created": "2026-01-13", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://github.com/investbrainapp/investbrain", - "website": "https://investbra.in", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/investbrain.webp", - "config_path": "/opt/investbrain/.env", - "description": "Investbrain is a smart open-source investment tracker that helps you manage, track, and make informed decisions about your investments.", - "install_methods": [ - { - "type": "default", - "script": "ct/investbrain.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Database credentials: `cat ~/investbrain.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/invoiceninja.json b/frontend/public/json/invoiceninja.json deleted file mode 100644 index b2b0d992f..000000000 --- a/frontend/public/json/invoiceninja.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "InvoiceNinja", - "slug": "invoiceninja", - "categories": [ - 25 - ], - "date_created": "2025-12-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://invoiceninja.github.io/selfhost.html", - "config_path": "/opt/invoiceninja/.env", - "website": "https://invoiceninja.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/invoice-ninja.webp", - "description": "Invoice Ninja is a free, open-source invoicing, quotes, expenses and time-tracking application for freelancers and businesses. Features include recurring invoices, online payments, client portal, and multi-company support.", - "install_methods": [ - { - "type": "default", - "script": "ct/invoiceninja.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "First visit http://IP:8080/setup to complete the initial configuration", - "type": "info" - }, - { - "text": "Database credentials are stored in `~/invoiceninja.creds`. Use them in the above initial configuration step.", - "type": "info" - }, - { - "text": "Configure SMTP settings in /opt/invoiceninja/.env for email functionality", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/iobroker.json b/frontend/public/json/iobroker.json deleted file mode 100644 index 134ccfbf6..000000000 --- a/frontend/public/json/iobroker.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "ioBroker", - "slug": "iobroker", - "categories": [ - 16 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8081, - "documentation": "https://www.iobroker.net/#en/documentation", - "website": "https://www.iobroker.net/#en/intro", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/iobroker.webp", - "config_path": "", - "description": "ioBroker is an open-source platform for building and managing smart home automation systems. It provides a centralized control and management interface for connected devices, sensors, and other IoT devices. ioBroker integrates with a wide range of popular smart home systems, devices, and services, making it easy to automate tasks and processes, monitor and control devices, and collect and analyze data from a variety of sources. With its flexible architecture and easy-to-use interface, ioBroker is designed to make it simple for users to build and customize their own smart home automation systems, regardless of their technical background or experience.", - "install_methods": [ - { - "type": "default", - "script": "ct/iobroker.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/itsm-ng.json b/frontend/public/json/itsm-ng.json deleted file mode 100644 index 0c686e10f..000000000 --- a/frontend/public/json/itsm-ng.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "ITSM-NG", - "slug": "itsm-ng", - "categories": [ - 25 - ], - "date_created": "2025-07-01", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://wiki.itsm-ng.org/en/home", - "website": "https://itsm-ng.com", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/itsm-ng.webp", - "config_path": "/etc/itsm-ng", - "description": "ITSM-NG is a powerful, open-source IT Service Management (ITSM) solution designed for managing IT assets, software, licenses, and support processes in accordance with ITIL best practices. It offers integrated features for asset inventory, incident tracking, problem management, change requests, and service desk workflows.", - "install_methods": [ - { - "type": "default", - "script": "ct/itsm-ng.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "itsm", - "password": "itsm" - }, - "notes": [] -} diff --git a/frontend/public/json/jackett.json b/frontend/public/json/jackett.json deleted file mode 100644 index 67de86191..000000000 --- a/frontend/public/json/jackett.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Jackett", - "slug": "jackett", - "categories": [ - 11 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9117, - "documentation": "https://github.com/Jackett/Jackett/wiki", - "website": "https://github.com/Jackett/Jackett", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/jackett.webp", - "config_path": "/opt/.env", - "description": "Jackett supports a wide range of trackers, including popular ones like The Pirate Bay, RARBG, and Torrentz2, as well as many private trackers. It can be integrated with several BitTorrent clients, including qBittorrent, Deluge, and uTorrent, among others.", - "install_methods": [ - { - "type": "default", - "script": "ct/jackett.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/jeedom.json b/frontend/public/json/jeedom.json deleted file mode 100644 index 5bd877762..000000000 --- a/frontend/public/json/jeedom.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Jeedom", - "slug": "jeedom", - "categories": [ - 16 - ], - "date_created": "2025-07-29", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://doc.jeedom.com", - "config_path": "", - "website": "https://jeedom.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/jeedom.webp", - "description": "Jeedom is a home automation system that is free, open, and cloudless. It allows users to manage and automate various aspects of their homes by creating objects, installing plugins for added functionalities, and connecting to a Market account for services. It also supports direct access URLs and user management.", - "install_methods": [ - { - "type": "default", - "script": "ct/jeedom.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 16, - "os": "Debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [ - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - }, - { - "text": "Only OS packages are updateable. To update Jeedom, please use the web interface.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/jellyfin.json b/frontend/public/json/jellyfin.json deleted file mode 100644 index 7bcfab60e..000000000 --- a/frontend/public/json/jellyfin.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "Jellyfin Media Server", - "slug": "jellyfin", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8096, - "documentation": "https://jellyfin.org/docs/", - "website": "https://jellyfin.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/jellyfin.webp", - "config_path": "/etc/jellyfin/", - "description": "Jellyfin is a free and open-source media server and suite of multimedia applications designed to organize, manage, and share digital media files to networked devices.", - "install_methods": [ - { - "type": "default", - "script": "ct/jellyfin.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 16, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "With Privileged/Unprivileged Hardware Acceleration Support", - "type": "info" - }, - { - "text": "FFmpeg path: /usr/lib/jellyfin-ffmpeg/ffmpeg", - "type": "info" - }, - { - "text": "For NVIDIA graphics cards, you'll need to install the same drivers in the container that you did on the host. In the container, run the driver installation script and add the CLI arg --no-kernel-module", - "type": "info" - }, - { - "text": "Log rotation is configured in /etc/logrotate.d/jellyfin. To reduce verbosity, change MinimumLevel in /etc/jellyfin/logging.json to Warning or Error (disables fail2ban auth logging).", - "type": "info" - } - ] -} diff --git a/frontend/public/json/jellystat.json b/frontend/public/json/jellystat.json deleted file mode 100644 index c4c829cb0..000000000 --- a/frontend/public/json/jellystat.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Jellystat", - "slug": "jellystat", - "categories": [ - 9 - ], - "date_created": "2026-01-12", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/CyferShepard/Jellystat", - "website": "https://github.com/CyferShepard/Jellystat", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/jellystat.webp", - "config_path": "/opt/jellystat/.env", - "description": "A free and open source statistics app for Jellyfin", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/jellystat.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Requires Node.js 20+ and PostgreSQL (auto-installed if missing)", - "type": "info" - }, - { - "text": "Default PostgreSQL credentials: jellystat / jellystat", - "type": "info" - }, - { - "text": "Update with: update_jellystat", - "type": "info" - } - ] -} diff --git a/frontend/public/json/jenkins.json b/frontend/public/json/jenkins.json deleted file mode 100644 index fc7dad8c3..000000000 --- a/frontend/public/json/jenkins.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Jenkins", - "slug": "jenkins", - "categories": [ - 20 - ], - "date_created": "2024-12-26", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://www.jenkins.io/doc/", - "website": "https://www.jenkins.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/jenkins.webp", - "config_path": "", - "description": "Jenkins provides hundreds of plugins to support building, deploying and automating any project. ", - "install_methods": [ - { - "type": "default", - "script": "ct/jenkins.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/joplin-server.json b/frontend/public/json/joplin-server.json deleted file mode 100644 index 67bbdbd92..000000000 --- a/frontend/public/json/joplin-server.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Joplin Server", - "slug": "joplin-server", - "categories": [ - 12 - ], - "date_created": "2025-09-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 22300, - "documentation": "https://joplinapp.org/help/", - "config_path": "/opt/joplin-server/.env", - "website": "https://joplinapp.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/joplin.webp", - "description": "Joplin - the privacy-focused note taking app with sync capabilities for Windows, macOS, Linux, Android and iOS.", - "install_methods": [ - { - "type": "default", - "script": "ct/joplin-server.sh", - "resources": { - "cpu": 2, - "ram": 6144, - "hdd": 20, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin@localhost", - "password": "admin" - }, - "notes": [ - { - "text": "Application can take some time to build, depending on your host speed. Please be patient.", - "type": "info" - }, - { - "text": "Default RAM size for LXC is set to 6GB because of Node.js building process. You can lower it after application installs", - "type": "info" - } - ] -} diff --git a/frontend/public/json/jotty.json b/frontend/public/json/jotty.json deleted file mode 100644 index c04487c94..000000000 --- a/frontend/public/json/jotty.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "jotty", - "slug": "jotty", - "categories": [ - 12 - ], - "date_created": "2025-10-21", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/fccview/jotty/blob/main/README.md", - "website": "https://github.com/fccview/jotty", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/jotty.webp", - "config_path": "/opt/jotty/.env", - "description": "A simple, self-hosted app for your checklists and notes. Tired of bloated, cloud-based to-do apps? jotty is a lightweight alternative for managing your personal checklists and notes. It's built with Next.js 14, is easy to deploy, and keeps all your data on your own server.", - "install_methods": [ - { - "type": "default", - "script": "ct/jotty.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/jupyternotebook.json b/frontend/public/json/jupyternotebook.json deleted file mode 100644 index c33f0fa6f..000000000 --- a/frontend/public/json/jupyternotebook.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Jupyter Notebook", - "slug": "jupyternotebook", - "categories": [ - 20 - ], - "date_created": "2025-02-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8888, - "documentation": "https://jupyter-notebook.readthedocs.io/en/stable/", - "website": "https://jupyter.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/jupyter.webp", - "config_path": "", - "description": "The Jupyter Notebook is an open-source web application that allows you to create and share documents that contain live code, equations, visualizations and narrative text. Uses include: data cleaning and transformation, numerical simulation, statistical modeling, data visualization, machine learning, and much more.", - "install_methods": [ - { - "type": "default", - "script": "ct/jupyternotebook.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To get the token to access the Jupyter Notebook, run the following command: jupyter notebook list.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/kapowarr.json b/frontend/public/json/kapowarr.json deleted file mode 100644 index 348f28f2e..000000000 --- a/frontend/public/json/kapowarr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Kapowarr", - "slug": "kapowarr", - "categories": [ - 14 - ], - "date_created": "2025-06-30", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5656, - "documentation": "https://casvt.github.io/Kapowarr/general_info/workings/", - "website": "https://casvt.github.io/Kapowarr/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/kapowarr.webp", - "config_path": "", - "description": "Kapowarr allows you to build a digital library of comics. You can add volumes, map them to a folder and start managing! Download, rename, move and convert issues of the volume (including TPB's, One Shots, Hard Covers, and more). The whole process is automated and can be customised in the settings.", - "install_methods": [ - { - "type": "default", - "script": "ct/kapowarr.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/karakeep.json b/frontend/public/json/karakeep.json deleted file mode 100644 index e5ecffe95..000000000 --- a/frontend/public/json/karakeep.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "karakeep", - "slug": "karakeep", - "categories": [ - 12 - ], - "date_created": "2025-04-07", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.karakeep.app/", - "website": "https://karakeep.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/karakeep.webp", - "config_path": "/etc/karakeep/karakeep.env", - "description": "karakeep (formerly: hoarder) is an AI-powered bookmarking tool that helps you save and organize your digital content. It automatically tags your links, notes, and images, making them easy to find later. With features like auto-fetching, lists, and full-text search, karakeep is the perfect tool for anyone who wants to keep track of their digital life.", - "install_methods": [ - { - "type": "default", - "script": "ct/karakeep.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/kasm.json b/frontend/public/json/kasm.json deleted file mode 100644 index 094289129..000000000 --- a/frontend/public/json/kasm.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Kasm", - "slug": "kasm", - "categories": [ - 9 - ], - "date_created": "2025-05-26", - "type": "ct", - "updateable": true, - "privileged": true, - "config_path": "", - "interface_port": 443, - "documentation": "https://www.kasmweb.com/docs/", - "website": "https://www.kasmweb.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/kasm-workspaces.webp", - "description": "Kasm Workspaces is a container streaming platform that delivers browser-based access to desktops, applications, and web services with enhanced security and scalability.", - "install_methods": [ - { - "type": "default", - "script": "ct/kasm.sh", - "resources": { - "cpu": 2, - "ram": 8192, - "hdd": 50, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - }, - { - "text": "Kasm needs swap (on Proxmox host) and activated FUSE to be installed successfully!", - "type": "warning" - }, - { - "text": "Show credentials: `cat ~/kasm.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/kavita.json b/frontend/public/json/kavita.json deleted file mode 100644 index 9c6dff4c1..000000000 --- a/frontend/public/json/kavita.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Kavita", - "slug": "kavita", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5000, - "documentation": "https://wiki.kavitareader.com/getting-started/", - "website": "https://www.kavitareader.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/kavita.webp", - "config_path": "", - "description": "Kavita is a fast, feature rich, cross platform reading server. Built with a focus for manga, and the goal of being a full solution for all your reading needs.", - "install_methods": [ - { - "type": "default", - "script": "ct/kavita.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To enable folder adding append your lxc.conf on your host with 'lxc.environment: DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1'", - "type": "info" - } - ] -} diff --git a/frontend/public/json/kernel-clean.json b/frontend/public/json/kernel-clean.json deleted file mode 100644 index 14a7b271d..000000000 --- a/frontend/public/json/kernel-clean.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PVE Kernel Clean", - "slug": "kernel-clean", - "categories": [ - 1 - ], - "date_created": "2024-04-29", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "Cleaning unused kernel images is beneficial for reducing the length of the GRUB menu and freeing up disk space. By removing old, unused kernels, the system is able to conserve disk space and streamline the boot process.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/kernel-clean.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - } - ] -} diff --git a/frontend/public/json/kernel-pin.json b/frontend/public/json/kernel-pin.json deleted file mode 100644 index 3f53700c4..000000000 --- a/frontend/public/json/kernel-pin.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PVE Kernel Pin", - "slug": "kernel-pin", - "categories": [ - 1 - ], - "date_created": "2024-05-08", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "Kernel Pin is an essential tool for effortlessly managing kernel pinning and unpinning.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/kernel-pin.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - } - ] -} diff --git a/frontend/public/json/keycloak.json b/frontend/public/json/keycloak.json deleted file mode 100644 index a52bad85a..000000000 --- a/frontend/public/json/keycloak.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Keycloak", - "slug": "keycloak", - "categories": [ - 6 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://www.keycloak.org/documentation", - "website": "https://www.keycloak.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/keycloak.webp", - "config_path": "/opt/keycloak/conf/keycloak.conf", - "description": "Keycloak is an open-source identity and access management solution that provides centralized authentication and authorization for modern applications and services. It enables organizations to secure their applications and services with a single sign-on (SSO) solution, reducing the need for users to remember multiple login credentials. Keycloak supports various authentication protocols, including SAML, OAuth, and OpenID Connect, and integrates with a wide range of applications and services. With Keycloak, administrators can manage user identities, define security policies, and monitor access to their applications and services. The software is designed to be scalable, flexible, and easy to use, making it a valuable tool for enhancing the security and usability of modern applications and services.", - "install_methods": [ - { - "type": "default", - "script": "ct/keycloak.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "tmpadm", - "password": "admin123" - }, - "notes": [ - { - "text": "First start can take a few minutes", - "type": "warning" - }, - { - "text": "When updating, if you had modified cache-ispn.xml: Re-apply your changes to the new file, otherwise leave it unchanged.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/kima-hub.json b/frontend/public/json/kima-hub.json deleted file mode 100644 index c5cec4084..000000000 --- a/frontend/public/json/kima-hub.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Kima-Hub", - "slug": "kima-hub", - "categories": [ - 13 - ], - "date_created": "2026-02-26", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3030, - "documentation": "https://github.com/Chevron7Locked/kima-hub#readme", - "website": "https://github.com/Chevron7Locked/kima-hub", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/kima-hub.webp", - "config_path": "/opt/kima-hub/backend/.env", - "description": "Self-hosted, on-demand audio streaming platform with AI-powered vibe matching, mood detection, smart playlists, and Lidarr/Audiobookshelf integration.", - "install_methods": [ - { - "type": "default", - "script": "ct/kima-hub.sh", - "resources": { - "cpu": 4, - "ram": 8192, - "hdd": 20, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "First user to register becomes the administrator.", - "type": "info" - }, - { - "text": "Mount your music library to /music in the container.", - "type": "warning" - }, - { - "text": "Audio analysis (mood/vibe detection) requires significant RAM (2-4GB per worker).", - "type": "info" - } - ] -} diff --git a/frontend/public/json/kimai.json b/frontend/public/json/kimai.json deleted file mode 100644 index 8d43be7a1..000000000 --- a/frontend/public/json/kimai.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Kimai", - "slug": "kimai", - "categories": [ - 25 - ], - "date_created": "2024-11-20", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://www.kimai.org/documentation/", - "website": "https://www.kimai.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/kimai.webp", - "config_path": "/opt/kimai/.env", - "description": "Kimai is an open-source time-tracking software designed for freelancers, small teams, and businesses to efficiently track, manage, and analyze work hours. This web-based tool enables users to log their work time with ease, associating entries with specific clients, projects, and tasks.", - "install_methods": [ - { - "type": "default", - "script": "ct/kimai.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 7, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin@helper-scripts.com", - "password": "helper-scripts.com" - }, - "notes": [] -} diff --git a/frontend/public/json/kitchenowl.json b/frontend/public/json/kitchenowl.json deleted file mode 100644 index f1e4a65e8..000000000 --- a/frontend/public/json/kitchenowl.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "KitchenOwl", - "slug": "kitchenowl", - "categories": [ - 13 - ], - "date_created": "2026-02-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.kitchenowl.org/", - "website": "https://kitchenowl.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/kitchenowl.webp", - "config_path": "/opt/kitchenowl/kitchenowl.env", - "description": "KitchenOwl is a smart self-hosted grocery list and recipe manager with real-time synchronization, recipe management, meal planning, and expense tracking.", - "install_methods": [ - { - "type": "default", - "script": "ct/kitchenowl.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/koel.json b/frontend/public/json/koel.json deleted file mode 100644 index b5aab6bf0..000000000 --- a/frontend/public/json/koel.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Koel", - "slug": "koel", - "categories": [ - 13 - ], - "date_created": "2025-12-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.koel.dev/", - "config_path": "/opt/koel/.env", - "website": "https://koel.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/koel-light.webp", - "description": "Koel is a simple web-based personal audio streaming service written in Vue and Laravel. It supports multiple users, audio visualization, smart playlists, YouTube integration, and Last.fm scrobbling.", - "install_methods": [ - { - "type": "default", - "script": "ct/koel.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin@koel.dev", - "password": "KoelIsCool" - }, - "notes": [ - { - "text": "Media files should be placed in /opt/koel_media", - "type": "info" - }, - { - "text": "Database credentials are stored in ~/koel.creds", - "type": "info" - }, - { - "text": "Music library is scanned hourly via cron job", - "type": "info" - } - ] -} diff --git a/frontend/public/json/koillection.json b/frontend/public/json/koillection.json deleted file mode 100644 index e92b1a3d2..000000000 --- a/frontend/public/json/koillection.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Koillection", - "slug": "koillection", - "categories": [ - 24 - ], - "date_created": "2025-02-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/benjaminjonard/koillection/wiki", - "website": "https://koillection.github.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/koillection.webp", - "config_path": "ct/koillection.sh", - "description": "Koillection is a self-hosted collection manager created to keep track of physical (mostly) collections of any kind like books, DVDs, stamps, games... Koillection is meant to be used for any kind of collections and doesn't come with pre-built metadata download. But you can tailor your own HTML scraper, or you can add your own metadata freely.", - "install_methods": [ - { - "type": "default", - "script": "ct/koillection.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/kometa.json b/frontend/public/json/kometa.json deleted file mode 100644 index 2ecf4392d..000000000 --- a/frontend/public/json/kometa.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Kometa", - "slug": "kometa", - "categories": [ - 13 - ], - "date_created": "2025-02-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://kometa.wiki/en/latest/", - "website": "https://github.com/Kometa-Team/Kometa", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/kometa.webp", - "config_path": "/opt/kometa/config/config.yml", - "description": "Kometa (formerly known as Plex Meta Manager) is a powerful tool designed to give you complete control over your media libraries", - "install_methods": [ - { - "type": "default", - "script": "ct/kometa.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "During installation you will be prompted to input your TMDb key, Plex URL and Plex token. Make sure you have them ready.", - "type": "info" - }, - { - "text": "Configuration file is at `/opt/kometa/config/config.yml`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/komga.json b/frontend/public/json/komga.json deleted file mode 100644 index 0731ec031..000000000 --- a/frontend/public/json/komga.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Komga", - "slug": "komga", - "categories": [ - 13 - ], - "date_created": "2024-11-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 25600, - "documentation": "https://komga.org/docs/introduction", - "website": "https://komga.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/komga.webp", - "config_path": "~/.komga/application.yml", - "description": "A media server for your comics, mangas, BDs, magazines and eBooks. Organize your CBZ, CBR, PDF and EPUB files in different libraries, collections or reading lists. Use the integrated Webreader, the Mihon extension, any OPDS reader, or other integrations. Edit metadata for your series and books.", - "install_methods": [ - { - "type": "default", - "script": "ct/komga.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Starting Komga (Web UI) may take up to 2 minutes after a restart or fresh installation.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/komodo.json b/frontend/public/json/komodo.json deleted file mode 100644 index e594ffc60..000000000 --- a/frontend/public/json/komodo.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "Komodo", - "slug": "komodo", - "categories": [ - 3 - ], - "date_created": "2025-01-01", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 9120, - "documentation": "https://komo.do/docs/intro", - "website": "https://komo.do", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/komodo.webp", - "config_path": "/opt/komodo/compose.env", - "description": "Komodo is a build and deployment system that automates the creation of versioned Docker images from Git repositories and facilitates the deployment of Docker containers and Docker Compose setups. It provides features such as build automation triggered by Git pushes, deployment management, and monitoring of uptime and logs across multiple servers. The core API and associated agent are developed in Rust.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/komodo.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This is an addon script intended to be used on top of an existing Docker container.", - "type": "info" - }, - { - "text": "Execute within an existing LXC console (Debian / Ubuntu / Alpine supported)", - "type": "info" - }, - { - "text": "For admin username and password, run: cat ~/komodo.creds", - "type": "info" - }, - { - "text": "To update, run the addon script again and select Update, or use: bash <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/addon/komodo.sh)", - "type": "info" - } - ] -} diff --git a/frontend/public/json/kubo.json b/frontend/public/json/kubo.json deleted file mode 100644 index 84781ed98..000000000 --- a/frontend/public/json/kubo.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Kubo", - "slug": "kubo", - "categories": [ - 4 - ], - "date_created": "2024-06-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5001, - "documentation": "https://docs.ipfs.tech/how-to/command-line-quick-start/", - "website": "https://github.com/ipfs/kubo", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ipfs.webp", - "config_path": "~/.ipfs/config", - "description": "Kubo, developed by IPFS, is a decentralized file storage and sharing protocol. It implements the IPFS protocol, allowing users to manage files across a distributed network, ensuring data integrity and availability. Kubo supports file versioning, pinning, provides APIs and CLI tools for developers, and allows customizable node configurations for enhanced privacy and control.", - "install_methods": [ - { - "type": "default", - "script": "ct/kubo.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/kutt.json b/frontend/public/json/kutt.json deleted file mode 100644 index 53ed7e74b..000000000 --- a/frontend/public/json/kutt.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Kutt", - "slug": "kutt", - "categories": [ - 21 - ], - "date_created": "2026-01-14", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 443, - "documentation": "https://github.com/thedevs-network/kutt/", - "config_path": "/opt/kutt/.env", - "website": "https://kutt.it", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/kutt.webp", - "description": "Kutt is a modern URL shortener with support for custom domains. Create and edit links, view statistics, manage users, and more.", - "install_methods": [ - { - "type": "default", - "script": "ct/kutt.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 2, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Kutt needs so be served with an SSL certificate for its login to work. During install, you will be prompted to choose if you want to have Caddy installed for SSL termination or if you want to use your own reverse proxy (in that case point your reverse proxy to port 3000).", - "type": "info" - } - ] -} diff --git a/frontend/public/json/languagetool.json b/frontend/public/json/languagetool.json deleted file mode 100644 index db1af3640..000000000 --- a/frontend/public/json/languagetool.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "LanguageTool", - "slug": "languagetool", - "categories": [ - 0 - ], - "date_created": "2026-01-30", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8081, - "documentation": "https://dev.languagetool.org/", - "config_path": "/opt/LanguageTool/server.properties", - "website": "https://languagetool.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/languagetool.webp", - "description": "LanguageTool is an Open Source proofreading software for English, Spanish, French, German, Portuguese, Polish, Dutch, and more than 20 other languages. It finds many errors that a simple spell checker cannot detect.", - "install_methods": [ - { - "type": "default", - "script": "ct/languagetool.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 16, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "API is available at `http://:8081/v2`.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/lazylibrarian.json b/frontend/public/json/lazylibrarian.json deleted file mode 100644 index a2b06caf7..000000000 --- a/frontend/public/json/lazylibrarian.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "LazyLibrarian", - "slug": "lazylibrarian", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5299, - "documentation": "https://lazylibrarian.gitlab.io/lazylibrarian.gitlab.io/", - "website": "https://gitlab.com/LazyLibrarian/LazyLibrarian", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/lazylibrarian.webp", - "config_path": "/opt/LazyLibrarian/config.ini", - "description": "LazyLibrarian is a SickBeard, CouchPotato, Headphones-like application for ebooks, audiobooks and magazines.", - "install_methods": [ - { - "type": "default", - "script": "ct/lazylibrarian.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/leantime.json b/frontend/public/json/leantime.json deleted file mode 100644 index e103de7f0..000000000 --- a/frontend/public/json/leantime.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Leantime", - "slug": "leantime", - "categories": [ - 12 - ], - "date_created": "2025-09-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.leantime.io/", - "config_path": "/opt/Leantime/config/.env", - "website": "https://leantime.io", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/leantime.webp", - "description": "Leantime is a goals focused project management system for non-project managers. Building with ADHD, Autism, and dyslexia in mind. ", - "install_methods": [ - { - "type": "default", - "script": "ct/leantime.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 20, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/librenms.json b/frontend/public/json/librenms.json deleted file mode 100644 index 7058a7988..000000000 --- a/frontend/public/json/librenms.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "LibreNMS", - "slug": "librenms", - "categories": [ - 9 - ], - "date_created": "2025-11-14", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.librenms.org/", - "website": "https://librenms.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/librenms.webp", - "config_path": "/opt/librenms/config.php and /opt/librenms/.env", - "description": "LibreNMS is an open-source, community-driven network monitoring system that provides automatic discovery, alerting, and performance tracking for network devices. It supports a wide range of hardware and integrates with various notification and logging platforms.", - "install_methods": [ - { - "type": "default", - "script": "ct/librenms.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After installation, the admin user credentials are saved in the file ~/librenms.creds inside the container.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/librespeed-rust.json b/frontend/public/json/librespeed-rust.json deleted file mode 100644 index 66cd1ef0d..000000000 --- a/frontend/public/json/librespeed-rust.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Librespeed Rust", - "slug": "librespeed-rust", - "categories": [ - 4 - ], - "date_created": "2025-07-01", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/var/lib/librespeed-rs/configs.toml", - "interface_port": 8080, - "documentation": "https://github.com/librespeed/speedtest-rust", - "website": "https://github.com/librespeed/speedtest-rust", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/librespeed.webp", - "description": "Librespeed is a no flash, no java, no websocket speedtest server. This community script deploys the rust version for simplicity and low resource usage.", - "install_methods": [ - { - "type": "default", - "script": "ct/librespeed-rust.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/libretranslate.json b/frontend/public/json/libretranslate.json deleted file mode 100644 index e587f132a..000000000 --- a/frontend/public/json/libretranslate.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "LibreTranslate", - "slug": "libretranslate", - "categories": [ - 0 - ], - "date_created": "2025-06-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5000, - "documentation": "https://github.com/LibreTranslate/LibreTranslate?tab=readme-ov-file#settings--flags", - "website": "https://libretranslate.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/libretranslate.webp", - "config_path": "/opt/libretranslate/.env", - "description": "Free and Open Source Machine Translation API, entirely self-hosted. Unlike other APIs, it doesn't rely on proprietary providers such as Google or Azure to perform translations. Instead, its translation engine is powered by the open source Argos Translate library.", - "install_methods": [ - { - "type": "default", - "script": "ct/libretranslate.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 20, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "During the installation, application will download language models used for translation. Depending on how fast your internet/host is, this can take 5-10 minutes.", - "type": "info" - }, - { - "text": "At every boot of LXC, application will look for updates for language models installed. This can prolong the startup of the LXC.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/lidarr.json b/frontend/public/json/lidarr.json deleted file mode 100644 index efc852ea7..000000000 --- a/frontend/public/json/lidarr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Lidarr", - "slug": "lidarr", - "categories": [ - 14 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8686, - "documentation": "https://wiki.servarr.com/en/lidarr", - "website": "https://lidarr.audio/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/lidarr.webp", - "config_path": "/var/lib/lidarr/config.xml", - "description": "Lidarr is a music management tool designed for Usenet and BitTorrent users. It allows users to manage and organize their music collection with ease. Lidarr integrates with popular Usenet and BitTorrent clients, such as Sonarr and Radarr, to automate the downloading and organizing of music files. The software provides a web-based interface for managing and organizing music, making it easy to search and find songs, albums, and artists. Lidarr also supports metadata management, including album art, artist information, and lyrics, making it easy for users to keep their music collection organized and up-to-date. The software is designed to be easy to use and provides a simple and intuitive interface for managing and organizing music collections, making it a valuable tool for music lovers who want to keep their collection organized and up-to-date. With Lidarr, users can enjoy their music collection from anywhere, making it a powerful tool for managing and sharing music files.", - "install_methods": [ - { - "type": "default", - "script": "ct/lidarr.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/limesurvey.json b/frontend/public/json/limesurvey.json deleted file mode 100644 index 1811c1db9..000000000 --- a/frontend/public/json/limesurvey.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "LimeSurvey", - "slug": "limesurvey", - "categories": [ - 25 - ], - "date_created": "2025-10-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://www.limesurvey.org/manual/LimeSurvey_Manual", - "config_path": "", - "website": "https://community.limesurvey.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/limesurvey.webp", - "description": "LimeSurvey is the simple, quick and anonymous online survey tool that's bursting with juicy insights. Calling students, professionals and enterprises: design a survey and get the best insights, it’s free and as easy as squeezing a lime. Make a free online survey now!", - "install_methods": [ - { - "type": "default", - "script": "ct/limesurvey.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "You will need to input database credentials into LimeSurvey installer. Use `cat ~/limesurvey.creds` inside LXC.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/linkding.json b/frontend/public/json/linkding.json deleted file mode 100644 index 366832701..000000000 --- a/frontend/public/json/linkding.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "linkding", - "slug": "linkding", - "categories": [ - 12 - ], - "date_created": "2026-02-16", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9090, - "documentation": "https://linkding.link/", - "website": "https://linkding.link/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linkding.webp", - "config_path": "/opt/linkding/.env", - "description": "linkding is a self-hosted bookmark manager that is designed to be minimal, fast, and easy to set up. It features a clean UI, tag-based organization, bulk editing, Markdown notes, read it later functionality, sharing, REST API, and browser extensions for Firefox and Chrome.", - "install_methods": [ - { - "type": "default", - "script": "ct/linkding.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": null - }, - "notes": [ - { - "text": "Admin credentials are stored in /opt/linkding/.env", - "type": "info" - } - ] -} diff --git a/frontend/public/json/linkstack.json b/frontend/public/json/linkstack.json deleted file mode 100644 index 9f95ac5df..000000000 --- a/frontend/public/json/linkstack.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "LinkStack", - "slug": "linkstack", - "categories": [ - 9 - ], - "date_created": "2025-07-22", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/var/www/html/linkstack/.env", - "interface_port": 80, - "documentation": "https://docs.linkstack.org/", - "website": "https://linkstack.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linkstack.webp", - "description": "LinkStack is an open-source, self-hosted alternative to Linktree, allowing users to create a customizable profile page to share multiple links, hosted on their own server.", - "install_methods": [ - { - "type": "default", - "script": "ct/linkstack.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 5, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "LinkStack can be updated via the user interface.", - "type": "info" - }, - { - "text": "Complete setup via the web interface at http:///. Check installation logs: `cat ~/linkstack-install.log`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/linkwarden.json b/frontend/public/json/linkwarden.json deleted file mode 100644 index 960fb808c..000000000 --- a/frontend/public/json/linkwarden.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Linkwarden", - "slug": "linkwarden", - "categories": [ - 12 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.linkwarden.app/", - "website": "https://linkwarden.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linkwarden.webp", - "config_path": "/opt/linkwarden/.env", - "description": "Linkwarden is a fully self-hostable, open-source collaborative bookmark manager to collect, organize and archive webpages.", - "install_methods": [ - { - "type": "default", - "script": "ct/linkwarden.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 12, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/listmonk.json b/frontend/public/json/listmonk.json deleted file mode 100644 index 63728d172..000000000 --- a/frontend/public/json/listmonk.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "listmonk", - "slug": "listmonk", - "categories": [ - 0 - ], - "date_created": "2024-11-22", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9000, - "documentation": "https://listmonk.app/docs/", - "website": "https://listmonk.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/listmonk.webp", - "config_path": "/opt/listmonk/config.toml", - "description": "High performance, self-hosted, newsletter and mailing list manager with a modern dashboard.", - "install_methods": [ - { - "type": "default", - "script": "ct/listmonk.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/litellm.json b/frontend/public/json/litellm.json deleted file mode 100644 index 02d23bc3e..000000000 --- a/frontend/public/json/litellm.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "LiteLLM", - "slug": "litellm", - "categories": [ - 20 - ], - "date_created": "2025-08-21", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 4000, - "documentation": "https://docs.litellm.ai/", - "config_path": "/opt/litellm/litellm.yaml", - "website": "https://www.litellm.ai/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/litellm-light.webp", - "description": "LLM proxy to call 100+ LLMs in a unified interface & track spend, set budgets per virtual key/user", - "install_methods": [ - { - "type": "default", - "script": "ct/litellm.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "sk-1234" - }, - "notes": [ - { - "text": "Update master key in the config file", - "type": "info" - } - ] -} diff --git a/frontend/public/json/livebook.json b/frontend/public/json/livebook.json deleted file mode 100644 index af8d42716..000000000 --- a/frontend/public/json/livebook.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Livebook", - "slug": "livebook", - "categories": [ - 20 - ], - "date_created": "2025-10-30", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://hexdocs.pm/livebook/readme.html", - "config_path": null, - "website": "https://livebook.dev", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/livebook.webp", - "description": "Elixir Livebook is an interactive, web-based notebook platform for Elixir that combines code, documentation, and visualizations in a single document. Similar to Jupyter notebooks, it allows developers to write and execute Elixir code in real-time, making it ideal for data exploration, prototyping, learning, and collaborative development. Livebook features rich markdown support, built-in charting capabilities, and seamless integration with the Elixir ecosystem.", - "install_methods": [ - { - "type": "default", - "script": "ct/livebook.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "Ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Show initial Livebook password: `cat /opt/livebook/livebook.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/lldap.json b/frontend/public/json/lldap.json deleted file mode 100644 index de83e3bd9..000000000 --- a/frontend/public/json/lldap.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "lldap", - "slug": "lldap", - "categories": [ - 6 - ], - "date_created": "2024-08-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 17170, - "documentation": "https://github.com/lldap/lldap/blob/main/README.md", - "website": "https://github.com/lldap/lldap", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/lldap.webp", - "config_path": "", - "description": "LLDAP is a lightweight LDAP server designed for simplicity and ease of use. It provides secure user authentication and authorization management through LDAP over TLS. Ideal for small to medium-sized environments, It aims to streamline identity management tasks with a minimalistic and straightforward setup.", - "install_methods": [ - { - "type": "default", - "script": "ct/lldap.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "password" - }, - "notes": [] -} diff --git a/frontend/public/json/loki.json b/frontend/public/json/loki.json deleted file mode 100644 index c48504fb5..000000000 --- a/frontend/public/json/loki.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "Loki", - "slug": "loki", - "categories": [ - 9 - ], - "date_created": "2026-01-22", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3100, - "documentation": "https://grafana.com/docs/loki/latest/", - "website": "https://github.com/grafana/loki", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/loki.webp", - "config_path": "Debian: /etc/loki/config.yml | Alpine: /etc/loki/loki-local-config.yaml", - "description": "Grafana Loki is a set of open source components that can be composed into a fully featured logging stack. A small index and highly compressed chunks simplifies the operation and significantly lowers the cost of Loki.", - "install_methods": [ - { - "type": "default", - "script": "ct/loki.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-loki.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Promtail can be optionally installed during setup to collect and ship logs to Loki.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/lubelogger.json b/frontend/public/json/lubelogger.json deleted file mode 100644 index 23091ea29..000000000 --- a/frontend/public/json/lubelogger.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "LubeLogger", - "slug": "lubelogger", - "categories": [ - 24 - ], - "date_created": "2024-11-29", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5000, - "documentation": "https://docs.lubelogger.com/", - "website": "https://lubelogger.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/lubelogger.webp", - "config_path": "/opt/lubelogger/appsettings.json", - "description": "Web-Based Vehicle Maintenance and Fuel Mileage Tracker", - "install_methods": [ - { - "type": "default", - "script": "ct/lubelogger.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/lxc-delete.json b/frontend/public/json/lxc-delete.json deleted file mode 100644 index eb9f87c11..000000000 --- a/frontend/public/json/lxc-delete.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PVE LXC Deletion", - "slug": "lxc-delete", - "categories": [ - 1 - ], - "date_created": "2025-01-21", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linuxcontainers.webp", - "config_path": "", - "description": "This script helps manage and delete LXC containers on a Proxmox VE server. It lists all available containers, allowing the user to select one or more for deletion through an interactive menu. Running containers are automatically stopped before deletion, and the user is asked to confirm each action. The script ensures a controlled and efficient container management process.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/lxc-delete.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - } - ] -} diff --git a/frontend/public/json/lyrionmusicserver.json b/frontend/public/json/lyrionmusicserver.json deleted file mode 100644 index a9fc01f1d..000000000 --- a/frontend/public/json/lyrionmusicserver.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Lyrion Music Server", - "slug": "lyrionmusicserver", - "categories": [ - 13 - ], - "date_created": "2025-06-05", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/etc/default/lyrionmusicserver", - "interface_port": 9000, - "documentation": "https://lyrion.org/", - "website": "https://lyrion.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/lyrion-music-server.webp", - "description": "Lyrion Music Server is an open-source server software to stream local music collections, internet radio, and music services to Squeezebox and compatible audio players.", - "install_methods": [ - { - "type": "default", - "script": "ct/lyrionmusicserver.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 3, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/mafl.json b/frontend/public/json/mafl.json deleted file mode 100644 index cbedfefd3..000000000 --- a/frontend/public/json/mafl.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Mafl", - "slug": "mafl", - "categories": [ - 10 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://mafl.hywax.space/reference/configuration.html", - "website": "https://mafl.hywax.space/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mafl.webp", - "config_path": "/opt/mafl/data/config.yml", - "description": "Mafl is an intuitive service for organizing your homepage. Customize Mafl to your individual needs and work even more efficiently!", - "install_methods": [ - { - "type": "default", - "script": "ct/mafl.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Configuration Path: `/opt/mafl/data/config.yml`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/magicmirror.json b/frontend/public/json/magicmirror.json deleted file mode 100644 index 319ff0968..000000000 --- a/frontend/public/json/magicmirror.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "MagicMirror Server", - "slug": "magicmirror", - "categories": [ - 24 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://docs.magicmirror.builders/configuration/introduction.html#configuring-your-magicmirror", - "website": "https://docs.magicmirror.builders/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/magicmirror2.webp", - "config_path": "/opt/magicmirror/config/config.js", - "description": "MagicMirror\u00b2 is a smart mirror software that allows you to build your own personal smart mirror. It uses modular components that you can customize to display information such as the weather, news, calendar, to-do list, and more. The platform is open source, allowing for community contributions and customization.", - "install_methods": [ - { - "type": "default", - "script": "ct/magicmirror.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 3, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Configuration Path: `/opt/magicmirror/config/config.js`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/mail-archiver.json b/frontend/public/json/mail-archiver.json deleted file mode 100644 index d788d6be3..000000000 --- a/frontend/public/json/mail-archiver.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Mail-Archiver", - "slug": "mail-archiver", - "categories": [ - 7 - ], - "date_created": "2025-12-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5000, - "documentation": "https://github.com/s1t5/mail-archiver/blob/main/doc/Index.md", - "config_path": "/opt/mail-archiver/.env, /opt/mail-archiver/appsettings.json", - "website": "https://github.com/s1t5/mail-archiver", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mail-archiver.webp", - "description": "Mail-Archiver is a web application for archiving, searching, and exporting emails from multiple accounts. Featuring folder sync, attachment support, mailbox migration and a dashboard.", - "install_methods": [ - { - "type": "default", - "script": "ct/mail-archiver.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "secure123!" - }, - "notes": [] -} diff --git a/frontend/public/json/managemydamnlife.json b/frontend/public/json/managemydamnlife.json deleted file mode 100644 index 15fe90f34..000000000 --- a/frontend/public/json/managemydamnlife.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Manage My Damn Life", - "slug": "managemydamnlife", - "categories": [ - 0 - ], - "date_created": "2025-06-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://manage-my-damn-life-nextjs.readthedocs.io/en/latest/", - "config_path": "/opt/mmdl/.env", - "website": "https://github.com/intri-in/manage-my-damn-life-nextjs", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/manage-my-damn-life.webp", - "description": "Manage My Damn Life (MMDL) is a self-hosted front end for managing your CalDAV tasks and calendars.", - "install_methods": [ - { - "type": "default", - "script": "ct/managemydamnlife.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/manyfold.json b/frontend/public/json/manyfold.json deleted file mode 100644 index 0394bc829..000000000 --- a/frontend/public/json/manyfold.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Manyfold", - "slug": "manyfold", - "categories": [ - 24 - ], - "date_created": "2026-01-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://manyfold.app/sysadmin/", - "website": "https://manyfold.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/manyfold.webp", - "config_path": "/opt/manyfold/.env", - "description": "Manyfold is an open source, self-hosted web application for managing a collection of 3d models, particularly focused on 3d printing.", - "install_methods": [ - { - "type": "default", - "script": "ct/manyfold.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 15, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Setup library on first connection in /opt/manyfold/data", - "type": "info" - } - ] -} diff --git a/frontend/public/json/mariadb.json b/frontend/public/json/mariadb.json deleted file mode 100644 index 2fd02b377..000000000 --- a/frontend/public/json/mariadb.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "Mariadb", - "slug": "mariadb", - "categories": [ - 8 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3306, - "documentation": "https://github.com/community-scripts/ProxmoxVE/discussions/192", - "website": "https://mariadb.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mariadb.webp", - "config_path": "", - "description": "MariaDB is a fork of the popular MySQL database management system that is developed and maintained by the open-source community. It is also commercially supported, offering enterprise-level features and support for organizations that require them. MariaDB aims to maintain high compatibility with MySQL, ensuring a drop-in replacement capability.", - "install_methods": [ - { - "type": "default", - "script": "ct/mariadb.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-mariadb.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This script requires some extra steps after the installation, Please checkout the 'documentation'", - "type": "info" - }, - { - "text": "Access Adminer Web UI at `http:///adminer.php`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/matterbridge.json b/frontend/public/json/matterbridge.json deleted file mode 100644 index e3a1f297d..000000000 --- a/frontend/public/json/matterbridge.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Matterbridge", - "slug": "matterbridge", - "categories": [ - 17 - ], - "date_created": "2024-06-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8283, - "documentation": "https://github.com/Luligu/matterbridge/blob/main/README.md", - "website": "https://github.com/Luligu/matterbridge", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/matterbridge.webp", - "config_path": "", - "description": "Matterbridge allows you to have all your Matter devices up and running in a couple of minutes without having to deal with the pairing process of each single device.", - "install_methods": [ - { - "type": "default", - "script": "ct/matterbridge.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "If the LXC is created Privileged, the script will automatically set up USB passthrough.", - "type": "warning" - }, - { - "text": "Updatable via the Matterbridge WebUI", - "type": "info" - } - ] -} diff --git a/frontend/public/json/mattermost.json b/frontend/public/json/mattermost.json deleted file mode 100644 index 5a530221f..000000000 --- a/frontend/public/json/mattermost.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Mattermost", - "slug": "mattermost", - "categories": [ - 25 - ], - "date_created": "2025-01-30", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8065, - "documentation": "https://docs.mattermost.com/", - "website": "https://mattermost.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mattermost.webp", - "config_path": "/opt/mattermost/config/config.json", - "description": "Mattermost is an open source platform for secure collaboration across the entire software development lifecycle. It's written in Go and React and runs as a single Linux binary with MySQL or PostgreSQL. It has a slimilar interface and features to Slack or Discord.", - "install_methods": [ - { - "type": "default", - "script": "ct/mattermost.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 8, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/mealie.json b/frontend/public/json/mealie.json deleted file mode 100644 index 43f2bd26b..000000000 --- a/frontend/public/json/mealie.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Mealie", - "slug": "mealie", - "categories": [ - 13 - ], - "date_created": "2025-07-14", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/opt/mealie/mealie.env", - "interface_port": 9000, - "documentation": "https://mealie.io/", - "website": "https://mealie.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mealie.webp", - "description": "Mealie is a self hosted recipe manager, meal planner and shopping list with a RestAPI backend and a reactive frontend built in Vue for a pleasant user experience for the whole family. Easily add recipes into your database by providing the URL and Mealie will automatically import the relevant data, or add a family recipe with the UI editor. Mealie also provides an API for interactions from 3rd party applications.", - "install_methods": [ - { - "type": "default", - "script": "ct/mealie.sh", - "resources": { - "cpu": 2, - "ram": 3072, - "hdd": 10, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/mediamanager.json b/frontend/public/json/mediamanager.json deleted file mode 100644 index 87db184f2..000000000 --- a/frontend/public/json/mediamanager.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "MediaManager", - "slug": "mediamanager", - "categories": [ - 14, - 13 - ], - "date_created": "2025-08-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://maxdorninger.github.io/MediaManager/latest/", - "config_path": "/opt/mm/config/config.toml", - "website": "https://github.com/maxdorninger/MediaManager", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mediamanager.webp", - "description": "A modern selfhosted media management system for your media library", - "install_methods": [ - { - "type": "default", - "script": "ct/mediamanager.sh", - "resources": { - "cpu": 2, - "ram": 3072, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "", - "password": "admin" - }, - "notes": [ - { - "text": "During the installation, provide the email address of the first admin user", - "type": "info" - }, - { - "text": "You're probably going to want to use a bind mount for the media directories", - "type": "info" - } - ] -} diff --git a/frontend/public/json/mediamtx.json b/frontend/public/json/mediamtx.json deleted file mode 100644 index 3e17ec818..000000000 --- a/frontend/public/json/mediamtx.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "MediaMTX", - "slug": "mediamtx", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://github.com/bluenviron/mediamtx/blob/main/README.md", - "website": "https://github.com/bluenviron/mediamtx", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mediamtx.webp", - "config_path": "/opt/mediamtx/mediamtx.yml", - "description": "MediaMTX is a ready-to-use SRT / WebRTC / RTSP / RTMP / LL-HLS media server and media proxy that allows you to read, publish, proxy, record and playback video and audio streams.", - "install_methods": [ - { - "type": "default", - "script": "ct/mediamtx.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/medusa.json b/frontend/public/json/medusa.json deleted file mode 100644 index b3c6586b3..000000000 --- a/frontend/public/json/medusa.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Medusa", - "slug": "medusa", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8081, - "documentation": "https://github.com/pymedusa/Medusa/wiki", - "website": "https://pymedusa.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/py-medusa.webp", - "config_path": "", - "description": "Medusa is an automatic Video Library Manager for TV Shows. It watches for new episodes of your favorite shows, and when they are posted it does its magic: automatic torrent/nzb searching, downloading, and processing at the qualities you want.", - "install_methods": [ - { - "type": "default", - "script": "ct/medusa.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 6, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/meilisearch.json b/frontend/public/json/meilisearch.json deleted file mode 100644 index daa2d5705..000000000 --- a/frontend/public/json/meilisearch.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Meilisearch", - "slug": "meilisearch", - "categories": [ - 8 - ], - "date_created": "2025-04-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7700, - "documentation": "https://www.meilisearch.com/docs", - "website": "https://www.meilisearch.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/meilisearch.webp", - "config_path": "/etc/meilisearch.toml", - "description": "Meilisearch is a fast, open-source search engine designed for instant, full-text search with typo tolerance. It provides an API that allows developers to integrate powerful search features into applications. Meilisearch-UI is an optional web-based interface that provides a simple way to interact with Meilisearch, visualize indexed data, and test queries without needing to use the API directly.", - "install_methods": [ - { - "type": "default", - "script": "ct/meilisearch.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 7, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Meilisearch-UI can optionally be installed for a web-based search interface", - "type": "info" - }, - { - "text": "Meilisearch-UI has early development status and can cause performance issues", - "type": "warn" - } - ] -} diff --git a/frontend/public/json/memos.json b/frontend/public/json/memos.json deleted file mode 100644 index 176a00268..000000000 --- a/frontend/public/json/memos.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Memos", - "slug": "memos", - "categories": [ - 12 - ], - "date_created": "2024-10-31", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9030, - "documentation": "https://www.usememos.com/docs", - "website": "https://www.usememos.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/memos.webp", - "config_path": "", - "description": "Memos is an open-source, self-hosted platform designed for fast, privacy-focused note-taking. Users can create, organize, and format notes with Markdown, which are securely stored in a local database. It\u2019s lightweight and customizable, built for quick access and adaptability to individual or team needs.", - "install_methods": [ - { - "type": "default", - "script": "ct/memos.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 3, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/meshcentral.json b/frontend/public/json/meshcentral.json deleted file mode 100644 index 68fe6de65..000000000 --- a/frontend/public/json/meshcentral.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "MeshCentral", - "slug": "meshcentral", - "categories": [ - 4 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://ylianst.github.io/MeshCentral/", - "website": "https://meshcentral.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/meshcentral.webp", - "config_path": "/opt/meshcentral/meshcentral-data/config.json", - "description": "MeshCentral is a web-based computer management platform that provides remote control and management capabilities for computers. It allows administrators to manage and control computers over a local network or the internet through a single, centralized web-based interface. With MeshCentral, users can monitor the status of computers, perform remote administration tasks, and control the power state of machines. The software supports various operating systems and provides real-time updates and alerts to keep administrators informed of the status of their systems. MeshCentral is designed to provide an easy-to-use, scalable, and secure solution for remote computer management, making it a valuable tool for IT administrators, helpdesk support, and remote workers.", - "install_methods": [ - { - "type": "default", - "script": "ct/meshcentral.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/metabase.json b/frontend/public/json/metabase.json deleted file mode 100644 index bb65c03f9..000000000 --- a/frontend/public/json/metabase.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Metabase", - "slug": "metabase", - "categories": [ - 9 - ], - "date_created": "2025-11-16", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://www.metabase.com/docs/latest/", - "config_path": "/opt/metabase/.env", - "website": "https://www.metabase.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/metabase.webp", - "description": "Metabase is an open-source business intelligence platform. You can use Metabase to ask questions about your data, or embed Metabase in your app to let your customers explore their data on their own.", - "install_methods": [ - { - "type": "default", - "script": "ct/metabase.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/metadata.json b/frontend/public/json/metadata.json deleted file mode 100644 index 4973b8db2..000000000 --- a/frontend/public/json/metadata.json +++ /dev/null @@ -1,186 +0,0 @@ -{ - "categories": [ - { - "name": "Proxmox & Virtualization", - "id": 1, - "sort_order": 1.0, - "description": "Tools and scripts to manage Proxmox VE and virtualization platforms effectively.", - "icon": "server" - }, - { - "name": "Operating Systems", - "id": 2, - "sort_order": 2.0, - "description": "Scripts for deploying and managing various operating systems.", - "icon": "monitor" - }, - { - "name": "Containers & Docker", - "id": 3, - "sort_order": 3.0, - "description": "Solutions for containerization using Docker and related technologies.", - "icon": "box" - }, - { - "name": "Network & Firewall", - "id": 4, - "sort_order": 4.0, - "description": "Enhance network security and configure firewalls with ease.", - "icon": "shield" - }, - { - "name": "Adblock & DNS", - "id": 5, - "sort_order": 5.0, - "description": "Optimize your network with DNS and ad-blocking solutions.", - "icon": "ban" - }, - { - "name": "Authentication & Security", - "id": 6, - "sort_order": 6.0, - "description": "Secure your infrastructure with authentication and security tools.", - "icon": "lock" - }, - { - "name": "Backup & Recovery", - "id": 7, - "sort_order": 7.0, - "description": "Reliable backup and recovery scripts to protect your data.", - "icon": "archive" - }, - { - "name": "Databases", - "id": 8, - "sort_order": 8.0, - "description": "Deploy and manage robust database systems with ease.", - "icon": "database" - }, - { - "name": "Monitoring & Analytics", - "id": 9, - "sort_order": 9.0, - "description": "Monitor system performance and analyze data seamlessly.", - "icon": "bar-chart" - }, - { - "name": "Dashboards & Frontends", - "id": 10, - "sort_order": 10.0, - "description": "Create interactive dashboards and user-friendly frontends.", - "icon": "layout" - }, - { - "name": "Files & Downloads", - "id": 11, - "sort_order": 11.0, - "description": "Manage file sharing and downloading solutions efficiently.", - "icon": "download" - }, - { - "name": "Documents & Notes", - "id": 12, - "sort_order": 12.0, - "description": "Organize and manage documents and note-taking tools.", - "icon": "file-text" - }, - { - "name": "Media & Streaming", - "id": 13, - "sort_order": 13.0, - "description": "Stream and manage media effortlessly across devices.", - "icon": "play" - }, - { - "name": "*Arr Suite", - "id": 14, - "sort_order": 14.0, - "description": "Automated media management with the popular *Arr suite tools.", - "icon": "tv" - }, - { - "name": "NVR & Cameras", - "id": 15, - "sort_order": 15.0, - "description": "Manage network video recorders and camera setups.", - "icon": "camera" - }, - { - "name": "IoT & Smart Home", - "id": 16, - "sort_order": 16.0, - "description": "Control and automate IoT devices and smart home systems.", - "icon": "home" - }, - { - "name": "ZigBee, Z-Wave & Matter", - "id": 17, - "sort_order": 17.0, - "description": "Solutions for ZigBee, Z-Wave, and Matter-based device management.", - "icon": "radio" - }, - { - "name": "MQTT & Messaging", - "id": 18, - "sort_order": 18.0, - "description": "Set up reliable messaging and MQTT-based communication systems.", - "icon": "message-circle" - }, - { - "name": "Automation & Scheduling", - "id": 19, - "sort_order": 19.0, - "description": "Automate tasks and manage scheduling with powerful tools.", - "icon": "clock" - }, - { - "name": "AI / Coding & Dev-Tools", - "id": 20, - "sort_order": 20.0, - "description": "Leverage AI and developer tools for smarter coding workflows.", - "icon": "code" - }, - { - "name": "Webservers & Proxies", - "id": 21, - "sort_order": 21.0, - "description": "Deploy and configure web servers and proxy solutions.", - "icon": "globe" - }, - { - "name": "Bots & ChatOps", - "id": 22, - "sort_order": 22.0, - "description": "Enhance collaboration with bots and ChatOps integrations.", - "icon": "bot" - }, - { - "name": "Finance & Budgeting", - "id": 23, - "sort_order": 23.0, - "description": "Track expenses and manage budgets efficiently.", - "icon": "dollar-sign" - }, - { - "name": "Gaming & Leisure", - "id": 24, - "sort_order": 24.0, - "description": "Scripts for gaming servers and leisure-related tools.", - "icon": "gamepad-2" - }, - { - "name": "Business & ERP", - "id": 25, - "sort_order": 25.0, - "description": "Streamline business operations with ERP and management tools.", - "icon": "building" - }, - { - "name": "Miscellaneous", - "id": 0, - "sort_order": 99.0, - "description": "General scripts and tools that don't fit into other categories.", - "icon": "more-horizontal" - } - ] -} diff --git a/frontend/public/json/metube.json b/frontend/public/json/metube.json deleted file mode 100644 index 05534c753..000000000 --- a/frontend/public/json/metube.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "MeTube", - "slug": "metube", - "categories": [ - 11 - ], - "date_created": "2025-12-05", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8081, - "documentation": "https://github.com/alexta69/metube/blob/master/README.md", - "website": "https://github.com/alexta69/metube", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/metube.webp", - "config_path": "/opt/metube/.env", - "description": "MeTube allows you to download videos from YouTube and dozens of other sites.", - "install_methods": [ - { - "type": "default", - "script": "ct/metube.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/microcode.json b/frontend/public/json/microcode.json deleted file mode 100644 index 149039c3a..000000000 --- a/frontend/public/json/microcode.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "PVE Processor Microcode", - "slug": "microcode", - "categories": [ - 1 - ], - "date_created": "2024-04-29", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "Processor Microcode is a layer of low-level software that runs on the processor and provides patches or updates to its firmware. Microcode updates can fix hardware bugs, improve performance, and enhance security features of the processor.\r\n\r\nIt's important to note that the availability of firmware update mechanisms, such as Intel's Management Engine (ME) or AMD's Platform Security Processor (PSP), may vary depending on the processor and its specific implementation. Therefore, it's recommended to consult the documentation for your processor to confirm whether firmware updates can be applied through the operating system.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/microcode.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - }, - { - "text": "After a reboot, you can check whether any microcode updates are currently in effect by running the following command. `journalctl -k | grep -E \"microcode\" | head -n 1`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/mikrotik-routeros.json b/frontend/public/json/mikrotik-routeros.json deleted file mode 100644 index 105566778..000000000 --- a/frontend/public/json/mikrotik-routeros.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "name": "Mikrotik RouterOS CHR", - "slug": "mikrotik-routeros", - "categories": [ - 2, - 4 - ], - "date_created": "2024-05-02", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://help.mikrotik.com/docs/spaces/ROS/pages/18350234/Cloud+Hosted+Router+CHR", - "website": "https://mikrotik.com", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mikrotik.webp", - "config_path": "", - "description": "Mikrotik RouterOS CHR is a Linux-based operating system that transforms a computer into a router. It provides a wide range of features for network routing, firewall, bandwidth management, wireless access point, backhaul link, hotspot gateway, VPN server, and many others. RouterOS is a versatile solution that supports various network configurations, including those with multiple WAN links, hotspots, and VPNs. It is highly customizable, allowing administrators to configure and manage their networks according to their specific requirements. With RouterOS, network administrators can monitor and control the performance and security of their networks, ensuring reliable and secure communication for their users. The software is designed to be easy to use and provides a wide range of tools for network management, making it a valuable solution for small and large networks alike.", - "install_methods": [ - { - "type": "default", - "script": "vm/mikrotik-routeros.sh", - "resources": { - "cpu": 2, - "ram": 512, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": "admin", - "password": "none" - }, - "notes": [] -} diff --git a/frontend/public/json/minarca.json b/frontend/public/json/minarca.json deleted file mode 100644 index 870374104..000000000 --- a/frontend/public/json/minarca.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Minarca", - "slug": "minarca", - "categories": [ - 7 - ], - "date_created": "2025-06-08", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://nexus.ikus-soft.com/repository/archive/minarca/6.0.3/doc/index.html", - "config_path": "/etc/minarca/minarca-server.conf", - "website": "https://minarca.org/en_CA", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/minarca.webp", - "description": "Minarca is a self-hosted open source data backup software that allows you to manage your computer and server backups for free from a direct online accessible centralized view of your data with easy retrieval.", - "install_methods": [ - { - "type": "default", - "script": "ct/minarca.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 10, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin123" - }, - "notes": [] -} diff --git a/frontend/public/json/miniflux.json b/frontend/public/json/miniflux.json deleted file mode 100644 index 64bc502a2..000000000 --- a/frontend/public/json/miniflux.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Miniflux", - "slug": "miniflux", - "categories": [ - 13 - ], - "date_created": "2025-11-12", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/etc/miniflux.conf", - "interface_port": 8080, - "documentation": "https://miniflux.app/docs/index.html", - "website": "https://miniflux.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/miniflux-light.webp", - "description": "Miniflux is a minimalist and opinionated feed reader.", - "install_methods": [ - { - "type": "default", - "script": "ct/miniflux.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "randomly generated during installation process" - }, - "notes": [ - { - "text": "Admin password available as `ADMIN_PASSWORD` in `~/miniflux.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/minio.json b/frontend/public/json/minio.json deleted file mode 100644 index 6d0a45bce..000000000 --- a/frontend/public/json/minio.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "MinIO", - "slug": "minio", - "categories": [ - 8 - ], - "date_created": "2025-02-13", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9001, - "documentation": "https://min.io/docs/minio/linux/index.html", - "website": "https://min.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/minio.webp", - "config_path": "/etc/default/minio", - "description": "MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.", - "install_methods": [ - { - "type": "default", - "script": "ct/minio.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "minioadmin", - "password": null - }, - "notes": [ - { - "text": "Application credentials: `cat ~/minio.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/mongodb.json b/frontend/public/json/mongodb.json deleted file mode 100644 index f380953b4..000000000 --- a/frontend/public/json/mongodb.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "MongoDB", - "slug": "mongodb", - "categories": [ - 8 - ], - "date_created": "2024-05-18", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 27017, - "documentation": "https://www.mongodb.com/docs/manual/", - "website": "https://www.mongodb.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mongodb.webp", - "config_path": "", - "description": "MongoDB is a NoSQL database that uses a document-oriented data model, storing data in JSON-like documents with dynamic schemas. This design offers flexibility and scalability, making it ideal for handling large volumes of data. MongoDB supports indexing, replication, and load balancing, ensuring high performance and availability, and can distribute data across multiple servers, making it well-suited for big data applications.", - "install_methods": [ - { - "type": "default", - "script": "ct/mongodb.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/monica.json b/frontend/public/json/monica.json deleted file mode 100644 index 63003dfec..000000000 --- a/frontend/public/json/monica.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Monica", - "slug": "monica", - "categories": [ - 24 - ], - "date_created": "2025-01-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/monicahq/monica/tree/4.x/docs", - "website": "https://www.monicahq.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/monica.webp", - "config_path": "/opt/monica/.env", - "description": "Monica is an open-source personal CRM designed to help you manage and strengthen your relationships. It allows you to store important details about your contacts, track interactions, set reminders for special dates, and log activities—all in one secure, private place. Perfect for busy individuals, Monica helps you stay organized, remember meaningful moments, and nurture your connections without ads or data mining. Install it on your own server for full control!", - "install_methods": [ - { - "type": "default", - "script": "ct/monica.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin@helper-scripts.com", - "password": "helper-scripts.com" - }, - "notes": [] -} diff --git a/frontend/public/json/monitor-all.json b/frontend/public/json/monitor-all.json deleted file mode 100644 index 88fb173da..000000000 --- a/frontend/public/json/monitor-all.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "name": "PVE Monitor-All", - "slug": "monitor-all", - "categories": [ - 1 - ], - "date_created": "2024-04-29", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "This script will add Monitor-All to Proxmox VE, which will monitor the status of all your instances, both containers and virtual machines, excluding templates and user-defined ones, and automatically restart or reset them if they become unresponsive. This is particularly useful if you're experiencing problems with Home Assistant becoming non-responsive every few days/weeks. Monitor-All also maintains a log of the entire process, which can be helpful for troubleshooting and monitoring purposes.\r\n\r\n\ud83d\udec8 Virtual machines without the QEMU guest agent installed must be excluded.\r\n\ud83d\udec8 Prior to generating any new CT/VM not found in this repository, it's necessary to halt Proxmox VE Monitor-All by running systemctl stop ping-instances.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/monitor-all.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - }, - { - "text": "Virtual machines without the QEMU guest agent installed must be excluded.", - "type": "warning" - }, - { - "text": "Prior to generating any new CT/VM not found in this repository, it's necessary to halt Proxmox VE Monitor-All by running `systemctl stop ping-instances`.", - "type": "warning" - }, - { - "text": "To make setup changes, first stop the service: `systemctl stop ping-instances`", - "type": "info" - }, - { - "text": "To edit pause time: `nano /usr/local/bin/ping-instances.sh`", - "type": "info" - }, - { - "text": "To add excluded instances: `nano /etc/systemd/system/ping-instances.service`", - "type": "info" - }, - { - "text": "After changes have been saved, `systemctl daemon-reload` and start the service: `systemctl start ping-instances`", - "type": "info" - }, - { - "text": "Monitor-All logs: `cat /var/log/ping-instances.log`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/motioneye.json b/frontend/public/json/motioneye.json deleted file mode 100644 index f09b8d324..000000000 --- a/frontend/public/json/motioneye.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "MotionEye NVR", - "slug": "motioneye", - "categories": [ - 15 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8765, - "documentation": "https://github.com/motioneye-project/motioneye/wiki", - "website": "https://github.com/motioneye-project/motioneye", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/motioneye.webp", - "config_path": "/etc/motioneye/motioneye.conf", - "description": "MotionEye is an open-source, self-hosted network video recording (NVR) software designed to manage and monitor IP cameras. It runs on various platforms such as Linux, Raspberry Pi, and Docker, and offers features such as real-time video streaming, motion detection, and customizable camera views.", - "install_methods": [ - { - "type": "default", - "script": "ct/motioneye.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "none" - }, - "notes": [] -} diff --git a/frontend/public/json/mqtt.json b/frontend/public/json/mqtt.json deleted file mode 100644 index 2459cec80..000000000 --- a/frontend/public/json/mqtt.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "MQTT", - "slug": "mqtt", - "categories": [ - 18 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://mosquitto.org/documentation/", - "website": "https://mosquitto.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mqtt.webp", - "config_path": "/etc/mosquitto/conf.d/default.conf", - "description": "Eclipse Mosquitto is an open-source message broker that implements the MQTT (Message Queuing Telemetry Transport) protocol. It is a lightweight and simple-to-use message broker that allows IoT devices and applications to communicate with each other by exchanging messages in real-time. Mosquitto is widely used in IoT applications, due to its low resource requirements and its compatibility with a wide range of devices and platforms", - "install_methods": [ - { - "type": "default", - "script": "ct/mqtt.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "You can find post-install info here: `https://github.com/community-scripts/ProxmoxVE/discussions/782`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/myip.json b/frontend/public/json/myip.json deleted file mode 100644 index 6212c9c7b..000000000 --- a/frontend/public/json/myip.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "MyIP", - "slug": "myip", - "categories": [ - 4 - ], - "date_created": "2025-09-29", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/opt/myip/.env", - "interface_port": 18966, - "documentation": "https://github.com/jason5ng32/MyIP#-environment-variable", - "website": "https://ipcheck.ing/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/myip.webp", - "description": "The best IP Toolbox. Easy to check what's your IPs, IP geolocation, check for DNS leaks, examine WebRTC connections, speed test, ping test, MTR test, check website availability, whois search and more!", - "install_methods": [ - { - "type": "default", - "script": "ct/myip.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 2, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/mylar3.json b/frontend/public/json/mylar3.json deleted file mode 100644 index 8c2f7b3d4..000000000 --- a/frontend/public/json/mylar3.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Mylar3", - "slug": "mylar3", - "categories": [ - 14 - ], - "date_created": "2024-12-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8090, - "documentation": "https://mylarcomics.com/docs/introduction/", - "website": "https://mylarcomics.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mylar3.webp", - "config_path": "", - "description": "Mylar is an automated Comic Book (cbr/cbz) downloader program for use with NZB and torrents.\r\nMylar allows you to create a watchlist of series that it monitors for various things (new issues, updated information, etc). It will grab, sort, and rename downloaded issues. It will also allow you to monitor weekly pull-lists for items belonging to said watchlisted series to download, as well as being able to monitor and maintain story-arcs.", - "install_methods": [ - { - "type": "default", - "script": "ct/mylar3.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/myspeed.json b/frontend/public/json/myspeed.json deleted file mode 100644 index 810d7fcd7..000000000 --- a/frontend/public/json/myspeed.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "MySpeed", - "slug": "myspeed", - "categories": [ - 4 - ], - "date_created": "2024-06-14", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5216, - "documentation": "https://docs.myspeed.dev/", - "website": "https://myspeed.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/myspeed.webp", - "config_path": "", - "description": "MySpeed is a speed test analysis tool that records and displays internet speed metrics for up to 30 days. It offers automated tests using Cron expressions and supports multiple speed test servers (Ookla, LibreSpeed, Cloudflare). MySpeed provides detailed statistics, health check notifications via email or messaging apps, and integrates with Prometheus and Grafana for advanced monitoring.", - "install_methods": [ - { - "type": "default", - "script": "ct/myspeed.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/mysql.json.bak b/frontend/public/json/mysql.json.bak deleted file mode 100644 index d6d8ff440..000000000 --- a/frontend/public/json/mysql.json.bak +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "MySQL", - "slug": "mysql", - "categories": [ - 8 - ], - "date_created": "2024-10-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://dev.mysql.com/doc/", - "website": "https://www.mysql.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/mysql.webp", - "config_path": "", - "description": "MySQL is an open-source relational database management system (RDBMS) that uses SQL for managing and manipulating data. It is known for its scalability, reliability, and high performance, making it suitable for small to large-scale applications. Key features include support for ACID transactions, data replication for high availability, and compatibility with various programming languages like Python, PHP, and Java.", - "install_methods": [ - { - "type": "default", - "script": "ct/mysql.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Database credentials: `cat mysql.creds`", - "type": "info" - }, - { - "text": "With an option to install the MySQL 8.4 LTS release instead of MySQL 8.0", - "type": "info" - }, - { - "text": "If installed, access phpMyAdmin at `http:///phpMyAdmin`, case sensitive.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/n8n.json b/frontend/public/json/n8n.json deleted file mode 100644 index e0e827f86..000000000 --- a/frontend/public/json/n8n.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "n8n", - "slug": "n8n", - "categories": [ - 16 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5678, - "documentation": "https://docs.n8n.io/", - "website": "https://n8n.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/n8n.webp", - "config_path": "/opt/n8n.env", - "description": "n8n is a workflow automation tool that enables users to automate various tasks and processes by connecting various data sources, systems, and services. It provides a visual interface for building workflows, allowing users to easily define and automate complex sequences of actions, such as data processing, conditional branching, and API calls. n8n supports a wide range of integrations, making it a versatile tool for automating a variety of use cases, from simple data processing workflows to complex business processes. With its extendable architecture, n8n is designed to be easily customizable and can be adapted to meet the specific needs of different users and industries.", - "install_methods": [ - { - "type": "default", - "script": "ct/n8n.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "You may need to configure the `WEBHOOK_URL` in the config file when using a domain.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/navidrome.json b/frontend/public/json/navidrome.json deleted file mode 100644 index cf712114c..000000000 --- a/frontend/public/json/navidrome.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Navidrome", - "slug": "navidrome", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 4533, - "documentation": "https://www.navidrome.org/docs/", - "website": "https://www.navidrome.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/navidrome.webp", - "config_path": "/etc/navidrome/navidrome.toml", - "description": "Navidrome is a music server solution that makes your music collection accessible from anywhere. It provides a modern web-based user interface and compatibility with a range of third-party mobile apps for both iOS and Android devices. With Navidrome, users can access their music collection from anywhere, whether at home or on the go. The software supports a variety of music formats, making it easy for users to play their favorite songs and albums. Navidrome provides a simple and user-friendly interface for managing and organizing music collections, making it a valuable tool for music lovers who want to access their music from anywhere. The software is designed to be easy to set up and use, making it a popular choice for those who want to host their own music server and enjoy their music collection from anywhere.", - "install_methods": [ - { - "type": "default", - "script": "ct/navidrome.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To change Navidrome music folder path, `nano /etc/navidrome/navidrome.toml`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/neo4j.json b/frontend/public/json/neo4j.json deleted file mode 100644 index 48effac69..000000000 --- a/frontend/public/json/neo4j.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Neo4j", - "slug": "neo4j", - "categories": [ - 8 - ], - "date_created": "2024-10-20", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7474, - "documentation": "https://neo4j.com/docs/", - "website": "https://neo4j.com/product/neo4j-graph-database/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/neo4j.webp", - "config_path": "/etc/neo4j/neo4j.conf", - "description": "Neo4j is a graph database designed to manage complex data relationships. It uses nodes, relationships, and properties to store and analyze connected data, making it ideal for applications like recommendation engines, fraud detection, and network analysis. Its structure allows for fast querying and deep data insights through native graph storage.", - "install_methods": [ - { - "type": "default", - "script": "ct/neo4j.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "neo4j", - "password": "neo4j" - }, - "notes": [] -} diff --git a/frontend/public/json/netbird.json b/frontend/public/json/netbird.json deleted file mode 100644 index 1b23d7ce0..000000000 --- a/frontend/public/json/netbird.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "name": "NetBird", - "slug": "netbird", - "categories": [4], - "date_created": "2026-03-05", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://docs.netbird.io/", - "website": "https://netbird.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/netbird.webp", - "config_path": "/etc/netbird/config.json", - "description": "NetBird is an open source VPN management platform that creates secure peer-to-peer networks using WireGuard. It enables secure connectivity between devices anywhere in the world without complex firewall configurations or port forwarding. NetBird offers features like zero-configuration networking, SSO integration, access control policies, and a centralized management dashboard. It's designed to be simple to deploy and manage, making it ideal for connecting remote teams, securing IoT devices, or building secure infrastructure networks.", - "install_methods": [ - { - "type": "default", - "script": "ct/netbird.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The NetBird client (agent) allows a peer to join a pre-existing NetBird deployment. If a NetBird deployment is not yet available, there are both managed and self-hosted options available.", - "type": "info" - }, - { - "text": "After installation, enter the container and run `netbird` to view the commands.", - "type": "info" - }, - { - "text": "Use a Setup Key from your NetBird dashboard or SSO login to authenticate during setup or in the container.", - "type": "info" - }, - { - "text": "Check connection status with `netbird status`.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/netbox.json b/frontend/public/json/netbox.json deleted file mode 100644 index 4fbe4839b..000000000 --- a/frontend/public/json/netbox.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "NetBox", - "slug": "netbox", - "categories": [ - 4 - ], - "date_created": "2024-11-17", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 443, - "documentation": "https://netboxlabs.com/docs/netbox/en/stable/", - "website": "https://netboxlabs.com/products/netbox/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/netbox.webp", - "config_path": "/opt/netbox/netbox/netbox/configuration.py", - "description": "NetBox is the source of truth for everything on your network, from physical components like power systems and cabling to virtual assets like IP addresses and VLANs. Network automation and observability tools depend on NetBox’s authoritative data to roll out configurations, monitor changes, and accelerate operations across the enterprise", - "install_methods": [ - { - "type": "default", - "script": "ct/netbox.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Show login and database credentials: `cat netbox.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/netdata.json b/frontend/public/json/netdata.json deleted file mode 100644 index 81584b995..000000000 --- a/frontend/public/json/netdata.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PVE Netdata", - "slug": "netdata", - "categories": [ - 1 - ], - "date_created": "2024-04-29", - "type": "addon", - "updateable": false, - "privileged": false, - "interface_port": 19999, - "documentation": "https://learn.netdata.cloud/", - "website": "https://www.netdata.cloud/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/netdata.webp", - "config_path": "/etc/netdata/netdata.conf", - "description": "Netdata is an open-source, real-time performance monitoring tool designed to provide insights into the performance and health of systems and applications. It is often used by system administrators, DevOps professionals, and developers to monitor and troubleshoot issues on servers and other devices.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/netdata.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - } - ] -} diff --git a/frontend/public/json/nextcloud-exporter.json b/frontend/public/json/nextcloud-exporter.json deleted file mode 100644 index 722810072..000000000 --- a/frontend/public/json/nextcloud-exporter.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Nextcloud Exporter", - "slug": "nextcloud-exporter", - "categories": [ - 9 - ], - "date_created": "2025-12-27", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 9205, - "documentation": "https://github.com/xperimental/nextcloud-exporter", - "website": "https://github.com/xperimental/nextcloud-exporter", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nextcloud.webp", - "config_path": "/etc/nextcloud-exporter.env", - "description": "Prometheus exporter for Nextcloud servers. ", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/nextcloud-exporter.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/nextcloud-vm.json b/frontend/public/json/nextcloud-vm.json deleted file mode 100644 index 2290d1760..000000000 --- a/frontend/public/json/nextcloud-vm.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Nextcloud", - "slug": "nextcloud-vm", - "categories": [ - 2 - ], - "date_created": "2023-11-14", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.nextcloud.com/server/21/admin_manual/contents.html", - "website": "https://www.turnkeylinux.org/nextcloud", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nextcloud.webp", - "config_path": "", - "description": "TurnKey Nextcloud is an open-source file sharing server and collaboration platform that can store your personal content, like documents and pictures, in a centralized location.", - "install_methods": [ - { - "type": "default", - "script": "vm/nextcloud-vm.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 12, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": null - }, - "notes": [ - { - "text": "This VM requires extra installation steps, see install guide at `https://github.com/community-scripts/ProxmoxVE/discussions/144`", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/nextcloudpi.json b/frontend/public/json/nextcloudpi.json deleted file mode 100644 index fba48633e..000000000 --- a/frontend/public/json/nextcloudpi.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "NextcloudPi", - "slug": "nextcloudpi", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 4443, - "documentation": "https://docs.nextcloudpi.com/", - "website": "https://github.com/nextcloud/nextcloudpi", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nextcloud.webp", - "config_path": "", - "description": "NextCloudPi is a popular self-hosted solution for file collaboration and data storage. It is built on the NextCloud software, which is an open-source platform for data management.", - "install_methods": [ - { - "type": "default", - "script": "ct/nextcloudpi.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "12" - } - }, - { - "type": "alpine", - "script": "ct/alpine-nextcloud.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 2, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Only Alpine: To get the username and password, run the script again inside the LXC shell.", - "type": "warning" - }, - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/nextpvr.json b/frontend/public/json/nextpvr.json deleted file mode 100644 index f53e7ef3c..000000000 --- a/frontend/public/json/nextpvr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "NextPVR", - "slug": "nextpvr", - "categories": [ - 13 - ], - "date_created": "2024-11-20", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8866, - "documentation": "https://github.com/sub3/NextPVR/wiki", - "website": "https://nextpvr.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nextpvr.webp", - "config_path": "/var/opt/nextpvr/config.xml", - "description": "NextPVR is a personal video recorder application for Microsoft Windows, Linux, Mac and Docker. NextPVR makes it easy to watch or record live TV, and provides great features like series recordings, web scheduling, iPhone/iPad client application, Kodi/Emby integration etc.", - "install_methods": [ - { - "type": "default", - "script": "ct/nextpvr.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "password" - }, - "notes": [] -} diff --git a/frontend/public/json/nginx-ui.json b/frontend/public/json/nginx-ui.json deleted file mode 100644 index bba6324d3..000000000 --- a/frontend/public/json/nginx-ui.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Nginx UI", - "slug": "nginx-ui", - "categories": [ - 21 - ], - "date_created": "2026-02-05", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9000, - "documentation": "https://nginxui.com/guide/", - "website": "https://nginxui.com", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nginx-ui.webp", - "config_path": "/usr/local/etc/nginx-ui/app.ini", - "description": "Nginx UI is a comprehensive web-based interface designed to simplify the management and configuration of Nginx servers. It provides features like online statistics, ChatGPT-powered config assistant, automatic Let's Encrypt certificates, and config file editing with syntax highlighting.", - "install_methods": [ - { - "type": "default", - "script": "ct/nginx-ui.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "On first visit, the setup wizard will guide you to create an admin account and configure ACME email.", - "type": "warning" - }, - { - "text": "Nginx runs on ports 80/443, Nginx UI management interface on port 9000.", - "type": "info" - }, - { - "text": "SSL certificates can be managed automatically with Let's Encrypt integration.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/nginxproxymanager.json b/frontend/public/json/nginxproxymanager.json deleted file mode 100644 index fec1d27c6..000000000 --- a/frontend/public/json/nginxproxymanager.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "Nginx Proxy Manager", - "slug": "nginxproxymanager", - "categories": [ - 21 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 81, - "documentation": "https://nginxproxymanager.com/guide/", - "website": "https://nginxproxymanager.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nginx-proxy-manager.webp", - "config_path": "", - "disable": false, - "disable_description": "This script is temporarily disabled due to an external issue with the OpenResty APT repository. The repository's GPG key uses SHA-1 signatures, which are no longer accepted by Debian as of February 1, 2026. This causes installation to fail with APT errors. The issue is tracked in openresty/openresty#1097. A workaround exists but requires manual configuration. The script will be re-enabled once OpenResty updates their repository signing key. For more details, see: https://github.com/community-scripts/ProxmoxVE/issues/11406", - "description": "Nginx Proxy Manager is a tool that provides a web-based interface to manage Nginx reverse proxies. It enables users to easily and securely expose their services to the internet by providing features such as HTTPS encryption, domain mapping, and access control. It eliminates the need for manual configuration of Nginx reverse proxies, making it easy for users to quickly and securely expose their services to the public.", - "install_methods": [ - { - "type": "default", - "script": "ct/nginxproxymanager.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "On first launch, a setup wizard will guide you through creating an admin account. There are no default credentials.", - "type": "info" - }, - { - "text": "You can install the specific one certbot you prefer, or you can Running /app/scripts/install-certbot-plugins within the Nginx Proxy Manager (NPM) LXC shell will install many common plugins. Important: This script does not install all Certbot plugins, as some require additional, external system dependencies (like specific packages for certain DNS providers). These external dependencies must be manually installed within the LXC container before you can successfully install and use the corresponding Certbot plugin. Consult the plugin's documentation for required packages.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/nic-offloading-fix.json b/frontend/public/json/nic-offloading-fix.json deleted file mode 100644 index a2cad596a..000000000 --- a/frontend/public/json/nic-offloading-fix.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Intel e1000e NIC Offloading Fix", - "slug": "nic-offloading-fix", - "categories": [ - 1 - ], - "date_created": "2025-05-25", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "config_path": "", - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "description": "This script automates the process of disabling network interface card (NIC) offloading features specifically for Intel e1000e network interfaces on Linux systems.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/nic-offloading-fix.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - } - ] -} diff --git a/frontend/public/json/nightscout.json b/frontend/public/json/nightscout.json deleted file mode 100644 index 9776172c8..000000000 --- a/frontend/public/json/nightscout.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "Nightscout", - "slug": "nightscout", - "categories": [ - 0 - ], - "date_created": "2026-02-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 1337, - "documentation": "https://nightscout.github.io/", - "website": "http://www.nightscout.info/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nightscout.webp", - "config_path": "/opt/nightscout/my.env", - "description": "Nightscout is an open source, DIY project that allows real time access to a CGM data via personal website, smartwatch watchers, or apps and widgets available for smartphones.", - "install_methods": [ - { - "type": "default", - "script": "ct/nightscout.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Nightscout requires configuring `my.env` with your Mongo connection string. API_SECRET has been generated and saved to `~/nightscout.creds`.", - "type": "info" - }, - { - "text": "Edit `/opt/nightscout/my.env` to configure your CGM source (Dexcom/CareLink). Then run: `systemctl restart nightscout`", - "type": "info" - }, - { - "text": "Official Configuration Guide: https://nightscout.github.io/nightscout/setup_variables/", - "type": "info" - }, - { - "text": "Nightscout requires HTTPS for many features (security, tokens, PWA). Usage of a reverse proxy (e.g. Nginx Proxy Manager, Traefik, Caddy) is highly recommended.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/nocodb.json b/frontend/public/json/nocodb.json deleted file mode 100644 index 3a737c407..000000000 --- a/frontend/public/json/nocodb.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "NocoDB", - "slug": "nocodb", - "categories": [ - 25 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://docs.nocodb.com/", - "website": "https://www.nocodb.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nocodb.webp", - "config_path": "", - "description": "NocoDB is a document-oriented database management system. It uses the NoSQL (Not Only SQL) data model, which allows for more flexible and scalable data storage than traditional relational databases. NoCoDB stores data in JSON format, making it easier to manage and query complex data structures, and supports a range of data types, including strings, numbers, arrays, and objects. The software provides a web-based interface for managing and querying data, and includes features such as real-time data synchronization, auto-indexing, and full-text search. NoCoDB is designed to be scalable, and can be used for a range of applications, from small projects to large enterprise systems. The software is free and open-source, and is designed to be easy to use and integrate with other applications.", - "install_methods": [ - { - "type": "default", - "script": "ct/nocodb.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/node-red.json b/frontend/public/json/node-red.json deleted file mode 100644 index e678bec78..000000000 --- a/frontend/public/json/node-red.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "Node-Red", - "slug": "node-red", - "categories": [ - 16 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 1880, - "documentation": "https://nodered.org/docs/", - "website": "https://nodered.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/node-red.webp", - "config_path": "~/.node-red/settings.js", - "description": "Node-RED is a visual programming tool that allows developers and non-developers alike to easily wire together hardware devices, APIs, and online services to create custom applications. It provides a visual interface for building workflows, making it easy to create and modify complex integrations without having to write any code. Node-RED is used in a wide range of applications, from simple automations to complex integrations, and is known for its simplicity, versatility, and ease of use.", - "install_methods": [ - { - "type": "default", - "script": "ct/node-red.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-node-red.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To install themes, type `update` in the LXC console. (debian/ubuntu only)", - "type": "info" - } - ] -} diff --git a/frontend/public/json/nodebb.json b/frontend/public/json/nodebb.json deleted file mode 100644 index b4d23154b..000000000 --- a/frontend/public/json/nodebb.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "name": "NodeBB", - "slug": "nodebb", - "categories": [ - 10, - 25 - ], - "date_created": "2025-01-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 4567, - "documentation": "https://docs.nodebb.org/", - "website": "https://nodebb.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nodebb.webp", - "config_path": "", - "description": "NodeBB Forum Software is powered by Node.js and supports either Redis, MongoDB, or a PostgreSQL database. It utilizes web sockets for instant interactions and real-time notifications. NodeBB takes the best of the modern web: real-time streaming discussions, mobile responsiveness, and rich RESTful read/write APIs, while staying true to the original bulletin board/forum format → categorical hierarchies, local user accounts, and asynchronous messaging.", - "install_methods": [ - { - "type": "default", - "script": "ct/nodebb.sh", - "resources": { - "cpu": 4, - "ram": 2048, - "hdd": 10, - "os": "Ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": "helper-scripts", - "password": "helper-scripts" - }, - "notes": [ - { - "text": "Only use Ubuntu 24.04!", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/nodecast-tv.json b/frontend/public/json/nodecast-tv.json deleted file mode 100644 index dec82e69b..000000000 --- a/frontend/public/json/nodecast-tv.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "nodecast-tv", - "slug": "nodecast-tv", - "categories": [ - 13 - ], - "date_created": "2026-01-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/technomancer702/nodecast-tv/blob/main/README.md", - "website": "https://github.com/technomancer702/nodecast-tv", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nodecast-tv.webp", - "config_path": "", - "description": "nodecast-tv is a modern, web-based IPTV player featuring Live TV, EPG, Movies (VOD), and Series support. Built with performance and user experience in mind.", - "install_methods": [ - { - "type": "default", - "script": "ct/nodecast-tv.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/notifiarr.json b/frontend/public/json/notifiarr.json deleted file mode 100644 index c8d011097..000000000 --- a/frontend/public/json/notifiarr.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Notifiarr", - "slug": "notifiarr", - "categories": [ - 14 - ], - "date_created": "2024-06-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5454, - "documentation": "https://notifiarr.wiki/", - "website": "https://notifiarr.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/notifiarr.webp", - "config_path": "/etc/notifiarr/notifiarr.conf", - "description": "Notifiarr is a purpose built system to bring many applications together to manage and customize notifications via Discord. You can monitor many aspects of your network(s), be notified of downtime, be notified of health issues, etc", - "install_methods": [ - { - "type": "default", - "script": "ct/notifiarr.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Manually edit `/etc/notifiarr/notifiarr.conf`to enter the API key from Notifiarr.com, and create a password for the UI.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/npmplus.json b/frontend/public/json/npmplus.json deleted file mode 100644 index 2e74d4e5b..000000000 --- a/frontend/public/json/npmplus.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "NPMplus", - "slug": "npmplus", - "categories": [ - 21 - ], - "date_created": "2025-03-13", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 81, - "documentation": "https://github.com/ZoeyVid/NPMplus/blob/develop/README.md", - "website": "https://github.com/ZoeyVid/NPMplus", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nginx-proxy-manager.webp", - "config_path": "/opt/compose.yaml", - "description": "NPMplus is an enhanced version of Nginx Proxy Manager. It simplifies the process of setting up reverse proxies with TLS termination through a user-friendly web interface. Key features include HTTP/3 support, integration with CrowdSec IPS, inclusion of GoAccess for real-time log analysis, and support for ModSecurity with the Core Rule Set.", - "install_methods": [ - { - "type": "default", - "script": "ct/npmplus.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 3, - "os": "alpine", - "version": "3.23" - } - }, - { - "type": "alpine", - "script": "ct/npmplus.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 3, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": "admin@local.com", - "password": "helper-scripts.com" - }, - "notes": [ - { - "text": "This uses Docker under the hood, as this can not easily be installed bare-metal. ", - "type": "info" - }, - { - "text": "The initial starting process can take 1-2min. ", - "type": "info" - } - ] -} diff --git a/frontend/public/json/ntfy.json b/frontend/public/json/ntfy.json deleted file mode 100644 index d35900f21..000000000 --- a/frontend/public/json/ntfy.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "ntfy", - "slug": "ntfy", - "categories": [ - 19 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.ntfy.sh/", - "website": "https://ntfy.sh/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ntfy.webp", - "config_path": "/etc/ntfy/server.yml", - "description": "ntfy (pronounced notify) is a simple HTTP-based pub-sub notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, and/or using a REST API. It's infinitely flexible, and 100% free software.", - "install_methods": [ - { - "type": "default", - "script": "ct/ntfy.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/nxwitness.json b/frontend/public/json/nxwitness.json deleted file mode 100644 index df1a6b918..000000000 --- a/frontend/public/json/nxwitness.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Nx Witness", - "slug": "nxwitness", - "categories": [ - 15 - ], - "date_created": "2025-02-13", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 7001, - "documentation": "https://support.networkoptix.com/hc/en-us/articles/360006863413-Access-the-Nx-Witness-User-Manual", - "website": "https://www.networkoptix.com/nx-witness", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nx-witness.webp", - "config_path": "", - "description": "Nx Witness is a professional video management system (VMS) designed for IP cameras and surveillance systems. It provides real-time video streaming, recording, and remote access with an intuitive user interface. The software supports AI-based video analytics, integrates with third-party security systems, and offers advanced search and event management features. It is used for security monitoring, business intelligence, and large-scale surveillance deployments.", - "install_methods": [ - { - "type": "default", - "script": "ct/nxwitness.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/nzbget.json b/frontend/public/json/nzbget.json deleted file mode 100644 index a23723ad8..000000000 --- a/frontend/public/json/nzbget.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "NZBGet", - "slug": "nzbget", - "categories": [ - 11 - ], - "date_created": "2024-10-31", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6789, - "documentation": "https://nzbget.com/documentation/", - "website": "https://nzbget.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/nzbget.webp", - "config_path": "/usr/share/nzbget/nzbget.conf", - "description": "NZBGet is a Usenet downloader focused on efficiency and performance, designed to handle NZB files for downloading content from Usenet. It automates downloading, checking, repairing, and extracting files, optimizing resources to run well on lower-powered devices.", - "install_methods": [ - { - "type": "default", - "script": "ct/nzbget.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "nzbget", - "password": "tegbzn6789" - }, - "notes": [] -} diff --git a/frontend/public/json/oauth2-proxy.json b/frontend/public/json/oauth2-proxy.json deleted file mode 100644 index e7815308b..000000000 --- a/frontend/public/json/oauth2-proxy.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "OAuth2-Proxy", - "slug": "oauth2-proxy", - "categories": [ - 4, - 6 - ], - "date_created": "2025-05-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://oauth2-proxy.github.io/oauth2-proxy/configuration/overview", - "website": "https://oauth2-proxy.github.io/oauth2-proxy/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/oauth2-proxy.webp", - "config_path": "/opt/oauth2-proxy/config.toml", - "description": "A reverse proxy that provides authentication with Google, Azure, OpenID Connect and many more identity providers.", - "install_methods": [ - { - "type": "default", - "script": "ct/oauth2-proxy.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 3, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This application includes a blank configuration file by default due to the wide range of available configuration options. We recommend referring to the official documentation for guidance: `https://oauth2-proxy.github.io/oauth2-proxy/configuration/overview`. With this you can make your config.toml file accordingly to your needs.", - "type": "info" - }, - { - "text": "After changing the config restart OAuth2-Proxy with: `systemctl restart oauth2-proxy`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/octoprint.json b/frontend/public/json/octoprint.json deleted file mode 100644 index 2ec135255..000000000 --- a/frontend/public/json/octoprint.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "OctoPrint", - "slug": "octoprint", - "categories": [ - 24 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 5000, - "documentation": "https://docs.octoprint.org/en/master/", - "website": "https://octoprint.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/octoprint.webp", - "config_path": "", - "description": "OctoPrint is a free and open-source web-based 3D printer control software that allows you to remotely control and monitor your 3D printer from a web interface. It was designed to be compatible with a wide range of 3D printers.", - "install_methods": [ - { - "type": "default", - "script": "ct/octoprint.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/odoo.json b/frontend/public/json/odoo.json deleted file mode 100644 index dea3defab..000000000 --- a/frontend/public/json/odoo.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Odoo", - "slug": "odoo", - "categories": [ - 25 - ], - "date_created": "2025-05-14", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8069, - "documentation": "https://www.odoo.com/en_EN/page/docs", - "website": "https://www.odoo.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/odoo.webp", - "config_path": "/etc/odoo/odoo.conf", - "description": "Odoo is a comprehensive open-source business platform made up of modular apps that cover key areas such as CRM, accounting, inventory, sales, project management, HR, helpdesk, and e-commerce. All modules are tightly integrated, allowing businesses to fully digitize and automate their workflows. Its modular design makes it suitable for both small companies and large enterprises, with flexibility to adapt to different industries. Odoo combines user-friendliness with powerful functionality, offering a unified solution for managing a wide range of business operations.", - "install_methods": [ - { - "type": "default", - "script": "ct/odoo.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [ - { - "text": "Database Credentials: `cat ~/odoo.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/olivetin.json b/frontend/public/json/olivetin.json deleted file mode 100644 index 441161830..000000000 --- a/frontend/public/json/olivetin.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "OliveTin", - "slug": "olivetin", - "categories": [ - 10 - ], - "date_created": "2024-05-02", - "type": "addon", - "updateable": false, - "privileged": false, - "interface_port": 1337, - "documentation": "https://docs.olivetin.app/", - "website": "https://www.olivetin.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/olivetin.webp", - "config_path": "/etc/OliveTin/config.yaml", - "description": "OliveTin provides a secure and straightforward way to execute pre-determined shell commands through a web-based interface.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/olivetin.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Configuration Path: `/etc/OliveTin/config.yaml`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/ollama.json b/frontend/public/json/ollama.json deleted file mode 100644 index 02f4fc717..000000000 --- a/frontend/public/json/ollama.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Ollama", - "slug": "ollama", - "categories": [ - 20 - ], - "date_created": "2025-04-30", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 11434, - "documentation": "https://github.com/ollama/ollama/tree/main/docs", - "config_path": "/usr/local/lib/ollama", - "website": "https://ollama.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ollama.webp", - "description": "Ollama is a tool that allows you to run large language models locally on your own computer. This means you can experiment with and use these AI models without needing an internet connection or relying on cloud-based services. It simplifies the process of managing and running these models, offering a way to keep your data private and potentially work faster. 1 You can use Ollama to create local chatbots, conduct AI research, develop privacy-focused AI applications, and integrate AI into existing systems.", - "install_methods": [ - { - "type": "default", - "script": "ct/ollama.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 40, - "os": "Ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/omada.json b/frontend/public/json/omada.json deleted file mode 100644 index c640abb85..000000000 --- a/frontend/public/json/omada.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Omada Controller", - "slug": "omada", - "categories": [ - 4 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8043, - "documentation": null, - "website": "https://www.tp-link.com/us/support/download/omada-software-controller/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/omada.webp", - "config_path": "", - "description": "Omada Controller is a software application used to manage TP-Link's Omada EAP (Enterprise Access Point) devices. It allows administrators to centrally manage a large number of EAPs, monitor network performance, and control user access to the network. The software provides an intuitive interface for network configuration, firmware upgrades, and network monitoring. By using the Omada Controller, administrators can streamline the management process, reduce manual intervention, and improve the overall security and reliability of the network.", - "install_methods": [ - { - "type": "default", - "script": "ct/omada.sh", - "resources": { - "cpu": 2, - "ram": 3072, - "hdd": 8, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/ombi.json b/frontend/public/json/ombi.json deleted file mode 100644 index 4717e0bc5..000000000 --- a/frontend/public/json/ombi.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Ombi", - "slug": "ombi", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5000, - "documentation": "https://docs.ombi.app/", - "website": "https://ombi.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ombi.webp", - "config_path": "", - "description": "Ombi is a self-hosted web application designed to empower shared Plex, Emby or Jellyfin users with automated content request capabilities. By integrating with various TV Show and Movie DVR tools, Ombi ensures a smooth and comprehensive experience for your users, allowing them to effortlessly request content on their own.", - "install_methods": [ - { - "type": "default", - "script": "ct/ombi.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/omv.json b/frontend/public/json/omv.json deleted file mode 100644 index e97ea2a47..000000000 --- a/frontend/public/json/omv.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "OpenMediaVault", - "slug": "omv", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.openmediavault.org/en/stable/", - "website": "https://www.openmediavault.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/openmediavault.webp", - "config_path": "", - "description": "OpenMediaVault is a next-generation network-attached storage (NAS) solution based on Debian Linux. It provides a web-based interface for managing and storing digital data, making it easy to use and set up. OpenMediaVault supports various storage protocols, including SMB/CIFS, NFS, and FTP, and provides a wide range of features for data management, such as user and group management, disk quotas, and data backup and recovery. The software is designed to be flexible and scalable, making it a valuable solution for both personal and enterprise use. OpenMediaVault provides a stable and reliable platform for managing and storing digital data, making it a popular choice for those who want to host their own data and ensure its security and privacy. With OpenMediaVault, users can access their data from anywhere and easily share it with others, making it a valuable tool for collaboration and data management.", - "install_methods": [ - { - "type": "default", - "script": "ct/omv.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "openmediavault" - }, - "notes": [ - { - "text": "Running OVM in a LXC container may require a complicated disk setup, consider using a VM instead. More info: `https://github.com/community-scripts/ProxmoxVE/discussions/175`", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/onedev.json b/frontend/public/json/onedev.json deleted file mode 100644 index d61f78cad..000000000 --- a/frontend/public/json/onedev.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "OneDev", - "slug": "onedev", - "categories": [ - 20 - ], - "date_created": "2024-11-30", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6610, - "documentation": "https://docs.onedev.io/", - "website": "https://onedev.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/onedev.webp", - "config_path": "/opt/onedev/conf", - "description": "Git server with CI/CD, kanban, and packages.", - "install_methods": [ - { - "type": "default", - "script": "ct/onedev.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/onlyoffice.json b/frontend/public/json/onlyoffice.json deleted file mode 100644 index 0b7913bfe..000000000 --- a/frontend/public/json/onlyoffice.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "ONLYOFFICE Docs", - "slug": "onlyoffice", - "categories": [ - 12 - ], - "date_created": "2025-06-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://helpcenter.onlyoffice.com/docs", - "website": "https://onlyoffice.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/onlyoffice.webp", - "config_path": "/etc/onlyoffice/documentserver/local.json", - "description": "ONLYOFFICE Docs is an open-source online office suite that provides collaborative editors for text documents, spreadsheets, and presentations, fully compatible with Microsoft Office formats (DOCX, XLSX, PPTX).", - "install_methods": [ - { - "type": "default", - "script": "ct/onlyoffice.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [ - { - "text": "Database / RabbitMQ Credentials: `cat ~/onlyoffice.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/open-archiver.json b/frontend/public/json/open-archiver.json deleted file mode 100644 index 4ca0fda25..000000000 --- a/frontend/public/json/open-archiver.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Open-Archiver", - "slug": "open-archiver", - "categories": [ - 7 - ], - "date_created": "2025-10-18", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.openarchiver.com/", - "config_path": "/opt/openarchiver/.env", - "website": "https://openarchiver.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/open-archiver.webp", - "description": "Open Archiver is a secure, self-hosted email archiving solution, and it's completely open source. Get an email archiver that enables full-text search across email and attachments. Create a permanent, searchable, and compliant mail archive from Google Workspace, Microsoft 35, and any IMAP server.", - "install_methods": [ - { - "type": "default", - "script": "ct/open-archiver.sh", - "resources": { - "cpu": 2, - "ram": 3072, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Data directory is: `/opt/openarchiver-data`. If you have a lot of email, you might consider mounting external storage to this directory.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/opencloud.json b/frontend/public/json/opencloud.json deleted file mode 100644 index bc2909222..000000000 --- a/frontend/public/json/opencloud.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "name": "OpenCloud", - "slug": "opencloud", - "categories": [ - 11 - ], - "date_created": "2026-02-05", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 443, - "documentation": "https://docs.opencloud.eu", - "config_path": "/etc/opencloud/opencloud.env, /etc/opencloud/opencloud.yaml, /etc/opencloud/csp.yaml", - "website": "https://opencloud.eu", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/opencloud.webp", - "description": "OpenCloud is the file sharing and collaboration solution of the Heinlein Group. Through intelligent file management and a strong open source community, files become valuable resources, effectively structured and usable in the long term. With flexible data rooms and intelligent access rights, teams can access and work together on data anytime, anywhere without barriers, but with a lot of productivity.", - "install_methods": [ - { - "type": "default", - "script": "ct/opencloud.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 20, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "randomly generated during the installation process" - }, - "notes": [ - { - "text": "Valid TLS certificates and fully-qualified domain names behind a reverse proxy (Caddy) for 3 services - OpenCloud (port: 9200), Collabora (port: 9980), and WOPI (port: 9300) are **REQUIRED**", - "type": "warning" - }, - { - "text": "Forgot your admin password? Check `admin_password` in the 'idm' section in `/etc/opencloud/opencloud.yaml`", - "type": "info" - }, - { - "text": "**Optional External Apps**: extract zip archives from App Store to `/etc/opencloud/web/assets/apps`", - "type": "info" - }, - { - "text": "**Optional CalDAV and CardDAV**: requires separate Radicale install. Edit and rename `/etc/opencloud/proxy.yaml.bak` and change your Radicale config to use `http_x_remote_user` as the auth method", - "type": "info" - }, - { - "text": "**Optional OpenID**: Authelia and PocketID supported. Uncomment relevant lines in `/etc/opencloud/opencloud.env` and consult OpenCloud GitHub discussions for configuration tips", - "type": "info" - }, - { - "text": "**Optional Full-text Search with Apache Tika**: requires your own Tika LXC. See `https://community-scripts.github.io/ProxmoxVE/scripts?id=apache-tika`", - "type": "info" - }, - { - "text": "**Relevant services**: `opencloud.service`, `opencloud-wopi.service`, `coolwsd.service`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/opengist.json b/frontend/public/json/opengist.json deleted file mode 100644 index 82f60261d..000000000 --- a/frontend/public/json/opengist.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Opengist", - "slug": "opengist", - "categories": [ - 20 - ], - "date_created": "2025-01-14", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6157, - "documentation": "https://opengist.io/docs/", - "website": "https://opengist.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/opengist.webp", - "config_path": "/opt/opengist/config.yml", - "description": "Self-hosted pastebin powered by Git, open-source alternative to Github Gist.", - "install_methods": [ - { - "type": "default", - "script": "ct/opengist.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/openhab.json b/frontend/public/json/openhab.json deleted file mode 100644 index 65e8d8fca..000000000 --- a/frontend/public/json/openhab.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "openHAB", - "slug": "openhab", - "categories": [ - 16 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8443, - "documentation": "https://www.openhab.org/docs/", - "website": "https://www.openhab.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/openhab.webp", - "config_path": "", - "description": "openHAB is a popular open-source home automation platform that provides a vendor and technology agnostic solution for integrating and automating various smart home devices and services. It supports a wide range of devices and protocols, making it easy to bring together different systems and devices into a unified smart home ecosystem. With its user-friendly interface and powerful automation capabilities, openHAB makes it easy to create custom automations and monitor and control your smart home devices and systems, all from a single interface.", - "install_methods": [ - { - "type": "default", - "script": "ct/openhab.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/openobserve.json b/frontend/public/json/openobserve.json deleted file mode 100644 index 90286beaa..000000000 --- a/frontend/public/json/openobserve.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "OpenObserve", - "slug": "openobserve", - "categories": [ - 9 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5080, - "documentation": "https://openobserve.ai/docs/", - "website": "https://openobserve.ai/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/openobserve.webp", - "config_path": "/opt/openobserve/data/.env", - "description": "OpenObserve is a simple yet sophisticated log search, infrastructure monitoring, and APM solution.", - "install_methods": [ - { - "type": "default", - "script": "ct/openobserve.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 3, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Show Login Credentials: `cat /opt/openobserve/data/.env`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/openproject.json b/frontend/public/json/openproject.json deleted file mode 100644 index 7e115953c..000000000 --- a/frontend/public/json/openproject.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "OpenProject", - "slug": "openproject", - "categories": [ - 25 - ], - "date_created": "2025-04-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://www.openproject.org", - "website": "https://www.openproject.org", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/openproject.webp", - "config_path": "/etc/openproject/conf.d/env", - "description": "OpenProject is a web-based project management software. Use OpenProject to manage your projects, tasks and goals. Collaborate via work packages and link them to your pull requests on Github. Read more about the OpenProject GitHub integration.", - "install_methods": [ - { - "type": "default", - "script": "ct/openproject.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [ - { - "text": "If you want to update from v15.x to v17.x, please read `https://www.openproject.org/docs/installation-and-operations/operation/upgrading/#major-upgrades` before doing so.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/openwebui.json b/frontend/public/json/openwebui.json deleted file mode 100644 index 97da18871..000000000 --- a/frontend/public/json/openwebui.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Open WebUI", - "slug": "openwebui", - "categories": [ - 20 - ], - "date_created": "2024-10-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://docs.openwebui.com/", - "website": "https://openwebui.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/open-webui.webp", - "config_path": "/root/.env", - "description": "OpenWebUI is a self-hosted, web-based interface that allows you to run AI models entirely offline. It integrates with various LLM runners, such as OpenAI and Ollama, and supports features like markdown and LaTeX rendering, model management, and voice/video calls. It also offers multilingual support and the ability to generate images using APIs like DALL-E or ComfyUI", - "install_methods": [ - { - "type": "default", - "script": "ct/openwebui.sh", - "resources": { - "cpu": 4, - "ram": 8192, - "hdd": 50, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Script contains optional installation of Ollama.", - "type": "info" - }, - { - "text": "Initial run of the application/container can take some time, depending on your host speed, as the application is installed/updated at runtime. Please be patient!", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/openwrt-vm.json b/frontend/public/json/openwrt-vm.json deleted file mode 100644 index fca6463be..000000000 --- a/frontend/public/json/openwrt-vm.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "name": "OpenWrt", - "slug": "openwrt-vm", - "categories": [ - 4, - 2 - ], - "date_created": "2024-05-02", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://openwrt.org/docs/start", - "website": "https://openwrt.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/openwrt.webp", - "config_path": "", - "description": "OpenWrt is a powerful open-source firmware that can transform a wide range of networking devices into highly customizable and feature-rich routers, providing users with greater control and flexibility over their network infrastructure.", - "install_methods": [ - { - "type": "default", - "script": "vm/openwrt-vm.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 0.5, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "If you use VLANs (default LAN is set to VLAN 999), make sure the Proxmox Linux Bridge is configured as VLAN-aware, otherwise the VM may fail to start.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/openziti-controller.json b/frontend/public/json/openziti-controller.json deleted file mode 100644 index 1bbda8806..000000000 --- a/frontend/public/json/openziti-controller.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "openziti-controller", - "slug": "openziti-controller", - "categories": [ - 4 - ], - "date_created": "2025-04-14", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://openziti.io/docs/reference/tunnelers/docker/", - "website": "https://www.openziti.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/openziti.webp", - "config_path": "/opt/openziti/etc/controller/bootstrap.env", - "description": "OpenZiti is an open-source, zero trust networking platform that enables secure connectivity between applications, services, and devices. It provides secure, encrypted connections between clients and services, and can be used to create secure, zero trust networks.", - "install_methods": [ - { - "type": "default", - "script": "ct/openziti-controller.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The Openziti Controller installation will prompt for configuration settings during installation.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/openziti-tunnel.json b/frontend/public/json/openziti-tunnel.json deleted file mode 100644 index 6775d43be..000000000 --- a/frontend/public/json/openziti-tunnel.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "openziti-tunnel", - "slug": "openziti-tunnel", - "categories": [ - 4 - ], - "date_created": "2025-04-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://openziti.io/docs/reference/tunnelers/docker/", - "website": "https://www.openziti.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/openziti.webp", - "config_path": "", - "description": "OpenZiti is an open-source, zero trust networking platform that enables secure connectivity between applications, services, and devices. It provides secure, encrypted connections between clients and services, and can be used to create secure, zero trust networks.", - "install_methods": [ - { - "type": "default", - "script": "ct/openziti-tunnel.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "Ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The Openziti tunnel is installed in host mode; please see documentation for more information", - "type": "info" - }, - { - "text": "Openziti tunnel prompts for identity enrollment token during installation", - "type": "info" - } - ] -} diff --git a/frontend/public/json/opnsense-vm.json b/frontend/public/json/opnsense-vm.json deleted file mode 100644 index b3e69a116..000000000 --- a/frontend/public/json/opnsense-vm.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "name": "OPNsense", - "slug": "opnsense-vm", - "categories": [ - 4, - 2 - ], - "date_created": "2025-11-23", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": 443, - "documentation": "https://docs.opnsense.org/", - "website": "https://opnsense.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/opnsense.webp", - "config_path": "", - "description": "OPNsense is an open-source firewall and routing platform based on FreeBSD. It provides advanced security features, including intrusion detection, VPN support, traffic shaping, and web filtering, with an intuitive web interface for easy management. Known for its reliability and regular updates, OPNsense is a popular choice for both businesses and home networks.", - "disable": false, - "disable_description": "This script has been temporarily disabled due to installation failures. The OPNsense bootstrap process was not completing successfully, resulting in a plain FreeBSD VM instead of a functional OPNsense installation. The issue is being investigated and the script will be re-enabled once resolved. For more details, see: https://github.com/community-scripts/ProxmoxVE/issues/6183", - "install_methods": [ - { - "type": "default", - "script": "vm/opnsense-vm.sh", - "resources": { - "cpu": 4, - "ram": 8192, - "hdd": 10, - "os": "FreeBSD", - "version": "latest" - } - } - ], - "default_credentials": { - "username": "root", - "password": "opnsense" - }, - "notes": [ - { - "text": "It will fail with default settings if there is no vmbr0 and vmbr1 on your node. Use advanced settings in this case.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/ots.json b/frontend/public/json/ots.json deleted file mode 100644 index b47940ea9..000000000 --- a/frontend/public/json/ots.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "OTS", - "slug": "ots", - "categories": [ - 6 - ], - "date_created": "2025-08-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 443, - "documentation": "https://github.com/Luzifer/ots/wiki", - "config_path": "/opt/ots/.env", - "website": "https://github.com/Luzifer/ots", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ots.webp", - "description": "One-Time-Secret sharing platform with a symmetric 256bit AES encryption in the browser.", - "install_methods": [ - { - "type": "default", - "script": "ct/ots.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 3, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "When it is in used external please use it behind reverse proxy or create your own certificates", - "type": "info" - } - ] -} diff --git a/frontend/public/json/outline.json b/frontend/public/json/outline.json deleted file mode 100644 index 29fa23b6f..000000000 --- a/frontend/public/json/outline.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Outline", - "slug": "outline", - "categories": [ - 12 - ], - "date_created": "2025-02-26", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.getoutline.com/s/hosting/", - "website": "https://www.getoutline.com", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/outline.webp", - "config_path": "/opt/outline/.env", - "description": "The fastest knowledge base for growing teams. Beautiful, realtime collaborative, feature packed, and markdown compatible. It’s time to get your team’s knowledge organized.", - "install_methods": [ - { - "type": "default", - "script": "ct/outline.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After installation finishes, application will do a database migration so web UI might be unavailable for a minute or two. Also you need to manually add authentication and/or enable HTTPS.", - "type": "info" - }, - { - "text": "Configuration file is at: `/opt/outline/.env`. Modify to suit your environment.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/owncast.json b/frontend/public/json/owncast.json deleted file mode 100644 index 9936cf42a..000000000 --- a/frontend/public/json/owncast.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Owncast", - "slug": "owncast", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://owncast.online/docs/", - "website": "https://owncast.online/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/owncast.webp", - "config_path": "", - "description": "Owncast is a free and open source live video and web chat server for use with existing popular broadcasting software.", - "install_methods": [ - { - "type": "default", - "script": "ct/owncast.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "abc123" - }, - "notes": [] -} diff --git a/frontend/public/json/owncloud-vm.json b/frontend/public/json/owncloud-vm.json deleted file mode 100644 index bfba4a9ca..000000000 --- a/frontend/public/json/owncloud-vm.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "ownCloud", - "slug": "owncloud-vm", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://doc.owncloud.com/", - "website": "https://www.turnkeylinux.org/owncloud", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/owncloud.webp", - "config_path": "", - "description": "TurnKey ownCloud is an open-source file sharing server and collaboration platform that can store your personal content, like documents and pictures, in a centralized location.", - "install_methods": [ - { - "type": "default", - "script": "vm/owncloud-vm.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 12, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": null - }, - "notes": [ - { - "text": "This VM requires extra installation steps, see install guide at `https://github.com/community-scripts/ProxmoxVE/discussions/144`", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/pairdrop.json b/frontend/public/json/pairdrop.json deleted file mode 100644 index a7c687e15..000000000 --- a/frontend/public/json/pairdrop.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "PairDrop", - "slug": "pairdrop", - "categories": [ - 11 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/schlagmichdoch/PairDrop/blob/master/docs/host-your-own.md", - "website": "https://github.com/schlagmichdoch/PairDrop", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pairdrop.webp", - "config_path": "", - "description": "PairDrop: Local file sharing in your browser.", - "install_methods": [ - { - "type": "default", - "script": "ct/pairdrop.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/pangolin.json b/frontend/public/json/pangolin.json deleted file mode 100644 index 894fc51ae..000000000 --- a/frontend/public/json/pangolin.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Pangolin", - "slug": "pangolin", - "categories": [ - 21 - ], - "date_created": "2025-11-01", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 443, - "documentation": "https://docs.pangolin.net/", - "config_path": "/opt/pangolin/config/config.yml", - "website": "https://pangolin.net/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pangolin.webp", - "description": "Pangolin securely routes traffic over WireGuard tunnels to any private network. It works like a reverse proxy that spans multiple networks — no public IPs, DNS setup, or certificates required.", - "install_methods": [ - { - "type": "default", - "script": "ct/pangolin.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 10, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Type `journalctl -u pangolin | grep -oP 'Token:\\s*\\K\\w+'` into LXC console to get admin token which you will use to create admin account.", - "type": "info" - }, - { - "text": "LXC has 4GB of RAM set initially for the build stage. After installation finishes, you can decrease the RAM allocated to 1024MB or 512MB even.", - "type": "info" - }, - { - "text": "Make sure you edit `/opt/pangolin/config/config.yml` and change it to match your needs", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/paperless-ai.json b/frontend/public/json/paperless-ai.json deleted file mode 100644 index 65462aa3c..000000000 --- a/frontend/public/json/paperless-ai.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "PaperlessAI", - "slug": "paperless-ai", - "categories": [ - 20 - ], - "date_created": "2025-02-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/clusterzx/paperless-ai/wiki/1.-Home", - "website": "https://github.com/clusterzx/paperless-ai", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/paperless-ai.webp", - "config_path": "/opt/paperless-ai/data/.env", - "description": "An automated document analyzer for Paperless-ngx using OpenAI API, Ollama and all OpenAI API compatible Services to automatically analyze and tag your documents.It features: Automode, Manual Mode, Ollama and OpenAI, a Chat function to query your documents with AI, a modern and intuitive Webinterface.", - "install_methods": [ - { - "type": "default", - "script": "ct/paperless-ai.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 20, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/paperless-gpt.json b/frontend/public/json/paperless-gpt.json deleted file mode 100644 index f1089a91c..000000000 --- a/frontend/public/json/paperless-gpt.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Paperless-GPT", - "slug": "paperless-gpt", - "categories": [ - 20 - ], - "date_created": "2025-03-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://github.com/icereed/paperless-gpt/blob/main/README.md", - "website": "https://github.com/icereed/paperless-gpt", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/paperless-ngx-light.webp", - "config_path": "/opt/paperless-gpt-data/.env", - "description": "Paperless-GPT seamlessly pairs with paperless-ngx to generate AI-powered document titles and tags, saving you hours of manual sorting. While other tools may offer AI chat features, paperless-gpt stands out by supercharging OCR with LLMs-ensuring high accuracy, even with tricky scans. If you’re craving next-level text extraction and effortless document organization, this is your solution.", - "install_methods": [ - { - "type": "default", - "script": "ct/paperless-gpt.sh", - "resources": { - "cpu": 3, - "ram": 2048, - "hdd": 7, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Configuration File: `/opt/paperless-gpt-data/.env`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/paperless-ngx.json b/frontend/public/json/paperless-ngx.json deleted file mode 100644 index 3fe72afe5..000000000 --- a/frontend/public/json/paperless-ngx.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Paperless-ngx", - "slug": "paperless-ngx", - "categories": [ - 12 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://docs.paperless-ngx.com/", - "website": "https://docs.paperless-ngx.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/paperless-ngx.webp", - "config_path": "/opt/paperless/paperless.conf", - "description": "Paperless-ngx is a software tool designed for digitizing and organizing paper documents. It provides a web-based interface for scanning, uploading, and organizing paper documents, making it easier to manage, search, and access important information. Paperless-ngx uses the OCR (Optical Character Recognition) technology to extract text from scanned images and makes it searchable, thus increasing the efficiency of document management.", - "install_methods": [ - { - "type": "default", - "script": "ct/paperless-ngx.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 12, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Show Login Credentials, type `cat ~/paperless-ngx.creds` in the LXC console", - "type": "info" - }, - { - "text": "Script installs English as default OCR language. To install additional languages, use `apt-get install tesseract-ocr-[lang]`, where [lang] is the language code (e.g. `apt-get install tesseract-ocr-deu`).", - "type": "info" - }, - { - "text": "Paperless-NGX use uv, so all calls need to start with `uv run` f.e. `uv run python3 manage.py document_exporter $path` instead `python3 manage.py`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/papra.json b/frontend/public/json/papra.json deleted file mode 100644 index 7f812520b..000000000 --- a/frontend/public/json/papra.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "name": "Papra", - "slug": "papra", - "categories": [ - 12 - ], - "date_created": "2026-03-05", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 1221, - "documentation": "https://github.com/papra-hq/papra", - "website": "https://github.com/papra-hq/papra", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/papra.webp", - "config_path": "/opt/papra/.env", - "description": "Papra is a modern, self-hosted document management system with full-text search, OCR support, and automatic document processing. Built with Node.js and featuring a clean web interface for organizing and managing your documents.", - "install_methods": [ - { - "type": "default", - "script": "ct/papra.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "First visit will prompt you to create an account", - "type": "info" - }, - { - "text": "Tesseract OCR is pre-installed for all languages", - "type": "info" - }, - { - "text": "Documents are stored in /opt/papra/app-data/documents", - "type": "info" - }, - { - "text": "Ingestion folder available at /opt/papra/ingestion for automatic document import", - "type": "info" - }, - { - "text": "Email functionality runs in dry-run mode by default", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/part-db.json b/frontend/public/json/part-db.json deleted file mode 100644 index e66dad94a..000000000 --- a/frontend/public/json/part-db.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Part-DB", - "slug": "part-db", - "categories": [ - 25 - ], - "date_created": "2024-12-18", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.part-db.de/", - "website": "https://github.com/Part-DB/Part-DB-server", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/part-db.webp", - "config_path": "/opt/partdb/.env.local", - "description": "Part-DB is an Open source inventory management system for your electronic components", - "install_methods": [ - { - "type": "default", - "script": "ct/part-db.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Show login and database credentials: `cat partdb.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/passbolt.json b/frontend/public/json/passbolt.json deleted file mode 100644 index 1cc1cb575..000000000 --- a/frontend/public/json/passbolt.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Passbolt", - "slug": "passbolt", - "categories": [ - 6 - ], - "date_created": "2025-11-17", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 443, - "documentation": "https://www.passbolt.com/docs/", - "config_path": "/etc/passbolt/passbolt.php", - "website": "https://www.passbolt.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/passbolt.webp", - "description": "Passbolt is a hybrid credential platform. It is built-first for modern IT teams, yet simple enough for everyone. A sovereign, battle-tested solution that delivers for a team of 5, or an organisation of 5000.", - "install_methods": [ - { - "type": "default", - "script": "ct/passbolt.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 2, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Type `cat ~/passbolt.creds` to see MariaDB database credentials. You will need those to setup Passbolt.", - "type": "info" - }, - { - "text": "The application uses self-signed certificates. You can also use Let's Encrypt to get a valid certificate for your domain. Please read the documentation for more information.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/patchmon.json b/frontend/public/json/patchmon.json deleted file mode 100644 index 4cb919fa8..000000000 --- a/frontend/public/json/patchmon.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "PatchMon", - "slug": "patchmon", - "categories": [ - 9 - ], - "date_created": "2025-10-25", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.patchmon.net", - "website": "https://patchmon.net", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/patchmon.webp", - "config_path": "/opt/patchmon/backend/.env, /opt/patchmon/frontend/.env", - "description": "Monitor Linux patches across all your hosts with real-time visibility, security update tracking, and comprehensive package management.", - "install_methods": [ - { - "type": "default", - "script": "ct/patchmon.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/paymenter.json b/frontend/public/json/paymenter.json deleted file mode 100644 index 53bdaf364..000000000 --- a/frontend/public/json/paymenter.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Paymenter", - "slug": "paymenter", - "categories": [ - 21 - ], - "date_created": "2025-01-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://paymenter.org/docs", - "website": "https://paymenter.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/paymenter.webp", - "config_path": "/opt/paymenter/.env", - "description": "Paymenter is an open source webshop solution for hosting companies. It's developed to provide an more easy way to manage your hosting company.", - "install_methods": [ - { - "type": "default", - "script": "ct/paymenter.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin@paymenter.org", - "password": "paymenter" - }, - "notes": [ - { - "text": "After installation, navigate to the directory with `cd /opt/paymenter` and run `php artisan app:init`. An interactive setup will prompt you to enter your company name and application URL.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/pbs-microcode.json b/frontend/public/json/pbs-microcode.json deleted file mode 100644 index 96ed3af15..000000000 --- a/frontend/public/json/pbs-microcode.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "PBS Processor Microcode", - "slug": "pbs-microcode", - "categories": [ - 1 - ], - "date_created": "2025-02-07", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "Processor Microcode is a layer of low-level software that runs on the processor and provides patches or updates to its firmware. Microcode updates can fix hardware bugs, improve performance, and enhance security features of the processor. This script is adapted for the Proxmox Backup Server environment and will only run on bare metal systems. If running in a virtualized environment, the script will exit. Note that firmware update mechanisms, such as Intel's Management Engine (ME) or AMD's Platform Security Processor (PSP), may vary depending on your processor and its implementation. Please consult your processor's documentation to verify if firmware updates can be applied through the operating system.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/pbs_microcode.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox Backup Server shell on a bare metal system. The script will exit if it detects that it is running in a virtualized environment.", - "type": "info" - }, - { - "text": "After a reboot, you can check whether any microcode updates are currently in effect by running the following command: `journalctl -k | grep -E \"microcode\" | head -n 1`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/pbs4-upgrade.json b/frontend/public/json/pbs4-upgrade.json deleted file mode 100644 index 56969bc01..000000000 --- a/frontend/public/json/pbs4-upgrade.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "PBS 4 Upgrade", - "slug": "pbs4-upgrade", - "categories": [ - 1 - ], - "date_created": "2025-08-26", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": "https://pbs.proxmox.com/wiki/Upgrade_from_3_to_4", - "website": "https://www.proxmox.com/en/proxmox-backup-server", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "This script guides you through upgrading Proxmox Backup Server from version 3.x (Debian 12 Bookworm) to version 4.0 (Debian 13 Trixie). It adjusts the Debian base sources, configures PBS 4 repositories in deb822 format, updates enterprise/no-subscription/test repos, runs a full system upgrade, and finalizes with a reboot.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/pbs4-upgrade.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute this script directly on the PBS 3.x host as root.", - "type": "info" - }, - { - "text": "Ensure you have a verified backup of /etc/proxmox-backup before starting.", - "type": "warning" - }, - { - "text": "Do not run this on an already upgraded PBS 4.x system.", - "type": "warning" - }, - { - "text": "A reboot is strongly recommended after upgrade to activate the new kernel and services.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/peanut.json b/frontend/public/json/peanut.json deleted file mode 100644 index ca558b09b..000000000 --- a/frontend/public/json/peanut.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "PeaNUT", - "slug": "peanut", - "categories": [ - 4 - ], - "date_created": "2024-06-14", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://github.com/Brandawg93/PeaNUT/blob/main/README.md", - "website": "https://github.com/Brandawg93/PeaNUT/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/peanut.webp", - "config_path": "/etc/peanut/settings.yml", - "description": "PeaNUT is a small dashboard for Network UPS Tools (NUT). It provides a web interface to monitor and manage UPS devices. PeaNUT allows users to view device status, retrieve information, and manage UPS parameters through its API. It's customizable for different UPS devices and supports integration with the Homepage dashboard.", - "install_methods": [ - { - "type": "default", - "script": "ct/peanut.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 7, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/pelican-panel.json b/frontend/public/json/pelican-panel.json deleted file mode 100644 index 915c5703d..000000000 --- a/frontend/public/json/pelican-panel.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Pelican Panel", - "slug": "pelican-panel", - "categories": [ - 24 - ], - "date_created": "2025-02-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://pelican.dev/docs/panel/getting-started", - "website": "https://pelican.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pelican-panel.webp", - "config_path": "", - "description": "Pelican Panel is a web-based control panel for managing game and application servers. It provides an intuitive interface to start, stop, configure, and monitor servers easily. It works alongside Pelican Wings, a lightweight daemon that handles server deployments and resource management.", - "install_methods": [ - { - "type": "default", - "script": "ct/pelican-panel.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Database credentials: `cat ~/pelican-panel.creds`", - "type": "info" - }, - { - "text": "Step 5 of the Panel installer can be skipped because it has already been set up by the script.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/pelican-wings.json b/frontend/public/json/pelican-wings.json deleted file mode 100644 index 8eda5457f..000000000 --- a/frontend/public/json/pelican-wings.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Pelican Wings", - "slug": "pelican-wings", - "categories": [ - 24 - ], - "date_created": "2025-02-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://pelican.dev/docs/wings/install", - "website": "https://pelican.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pelican-panel.webp", - "config_path": "", - "description": "Pelican Wings is Pelican's server control plane, built for the rapidly changing gaming industry and designed to be highly performant and secure. Wings provides an HTTP API allowing you to interface directly with running server instances, fetch server logs, generate backups, and control all aspects of the server lifecycle.", - "install_methods": [ - { - "type": "default", - "script": "ct/pelican-wings.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After installation, you need to use the Auto Deploy command generated by Pelican Panel and, after running the command, restart the Wings service with `systemctl restart wings.service`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/pf2etools.json b/frontend/public/json/pf2etools.json deleted file mode 100644 index 8b7c13d87..000000000 --- a/frontend/public/json/pf2etools.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Pf2eTools", - "slug": "pf2etools", - "categories": [ - 24 - ], - "date_created": "2025-02-13", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/Pf2eToolsOrg/Pf2eTools/wiki", - "website": "https://pf2etools.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pf2etools.webp", - "config_path": "", - "description": "Pf2eTools is an open-source website aiming to provide tools and information for Pathfinder 2nd Edition players and gamemasters. It's built using basic web technologies to ensure wide compatibility, and utilises client-side caching for speed, efficiency, and offline access.", - "install_methods": [ - { - "type": "default", - "script": "ct/pf2etools.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 6, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/photoprism.json b/frontend/public/json/photoprism.json deleted file mode 100644 index f455a40ca..000000000 --- a/frontend/public/json/photoprism.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PhotoPrism", - "slug": "photoprism", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 2342, - "documentation": "https://docs.photoprism.app/", - "website": "https://photoprism.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/photoprism.webp", - "config_path": "/opt/photoprism/config/.env", - "description": "PhotoPrism is an AI-Powered Photos App for the Decentralized Web. It makes use of the latest technologies to tag and find pictures automatically without getting in your way.", - "install_methods": [ - { - "type": "default", - "script": "ct/photoprism.sh", - "resources": { - "cpu": 2, - "ram": 3072, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "changeme" - }, - "notes": [ - { - "text": "Please note that Ubuntu 22.04 and Debian 12 are supported, while older Linux distributions may not be compatible.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/phpmyadmin.json b/frontend/public/json/phpmyadmin.json deleted file mode 100644 index 7031023cf..000000000 --- a/frontend/public/json/phpmyadmin.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "PhpMyAdmin", - "slug": "phpmyadmin", - "categories": [ - 8 - ], - "date_created": "2025-10-01", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://www.phpmyadmin.net/docs/", - "config_path": "Debian/Ubuntu: /var/www/html/phpMyAdmin | Alpine: /usr/share/phpmyadmin", - "website": "https://www.phpmyadmin.net/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/phpmyadmin.webp", - "description": "phpMyAdmin is a free software tool written in PHP, intended to handle the administration of MySQL over the Web. phpMyAdmin supports a wide range of operations on MySQL and MariaDB. Frequently used operations (managing databases, tables, columns, relations, indexes, users, permissions, etc) can be performed via the user interface, while you still have the ability to directly execute any SQL statement.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/phpmyadmin.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within an existing LXC Console", - "type": "warning" - }, - { - "text": "To update or uninstall run bash call again", - "type": "info" - } - ] -} diff --git a/frontend/public/json/pialert.json b/frontend/public/json/pialert.json deleted file mode 100644 index ae0f3b8ea..000000000 --- a/frontend/public/json/pialert.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Pi.Alert", - "slug": "pialert", - "categories": [ - 4 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://github.com/leiweibau/Pi.Alert/blob/main/README.md", - "website": "https://github.com/leiweibau/Pi.Alert/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pi-alert.webp", - "config_path": "/opt/pialert/config/pialert.conf", - "description": "Pi.Alert is a WIFI / LAN intruder detector. Checks the devices connected and alert you with unknown devices. It also warns of the disconnection of \"always connected\" devices.", - "install_methods": [ - { - "type": "default", - "script": "ct/pialert.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 3, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/pihole-exporter.json b/frontend/public/json/pihole-exporter.json deleted file mode 100644 index c26ff074e..000000000 --- a/frontend/public/json/pihole-exporter.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "Pi-Hole Exporter", - "slug": "pihole-exporter", - "categories": [ - 9 - ], - "date_created": "2025-12-21", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 9617, - "documentation": "https://github.com/eko/pihole-exporter", - "website": "https://github.com/eko/pihole-exporter", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pi-hole.webp", - "config_path": "/opt/pihole-exporter.env", - "description": "A Prometheus exporter for PI-Hole's Raspberry PI ad blocker", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/pihole-exporter.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - }, - { - "type": "alpine", - "script": "tools/addon/pihole-exporter.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/pihole.json b/frontend/public/json/pihole.json deleted file mode 100644 index d945f30fc..000000000 --- a/frontend/public/json/pihole.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "Pi-Hole", - "slug": "pihole", - "categories": [ - 5 - ], - "date_created": "2024-04-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.pi-hole.net/", - "website": "https://pi-hole.net/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pi-hole.webp", - "config_path": "/etc/pihole/pihole.toml", - "description": "Pi-hole is a free, open-source network-level advertisement and Internet tracker blocking application. It runs on a Raspberry Pi or other Linux-based systems and acts as a DNS sinkhole, blocking unwanted traffic before it reaches a user's device. Pi-hole can also function as a DHCP server, providing IP addresses and other network configuration information to devices on a network. The software is highly configurable and supports a wide range of customizations, such as allowing or blocking specific domains, setting up blocklists and whitelists, and customizing the appearance of the web-based interface. The main purpose of Pi-hole is to protect users' privacy and security by blocking unwanted and potentially malicious content, such as ads, trackers, and malware. It is designed to be easy to set up and use, and can be configured through a web-based interface or through a terminal-based command-line interface.", - "install_methods": [ - { - "type": "default", - "script": "ct/pihole.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To set your password, log in to the container, and type the following: `pihole setpassword`", - "type": "info" - }, - { - "text": "With an option to add Unbound", - "type": "info" - }, - { - "text": "With an option to configure Unbound as a forwarding DNS server (using DNS-over-TLS (DoT)) as opposed to a recursive DNS server", - "type": "info" - }, - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/pimox-haos-vm.json b/frontend/public/json/pimox-haos-vm.json deleted file mode 100644 index 2afe767c7..000000000 --- a/frontend/public/json/pimox-haos-vm.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PiMox HAOS", - "slug": "pimox-haos-vm", - "categories": [ - 16 - ], - "date_created": "2024-04-29", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": 8123, - "documentation": null, - "website": "https://github.com/jiangcuo/Proxmox-Port", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/home-assistant.webp", - "config_path": "", - "description": "The script automates the manual process of finding, downloading and extracting the aarch64 (qcow2) disk image provided by the Home Assistant Team, creating a VM with user defined settings, importing and attaching the disk, setting the boot order and starting the VM.", - "install_methods": [ - { - "type": "default", - "script": "vm/pimox-haos-vm.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 32, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After the script completes, click on the VM, then on the Summary or Console tab to find the VM IP.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/planka.json b/frontend/public/json/planka.json deleted file mode 100644 index 1d490fa7f..000000000 --- a/frontend/public/json/planka.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PLANKA", - "slug": "planka", - "categories": [ - 12 - ], - "date_created": "2025-06-19", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 1337, - "documentation": "https://docs.planka.cloud/", - "website": "https://planka.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/planka.webp", - "config_path": "/opt/planka/.env", - "description": "Planka is a powerful, project management platform that transforms how teams collaborate. Create projects with multiple boards, organize tasks with intuitive drag-and-drop cards, attach files, write rich markdown descriptions, set due dates, assign team members, and keep conversations flowing with comments and labels—all with seamless real-time updates and smart notifications.", - "install_methods": [ - { - "type": "default", - "script": "ct/planka.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Type `cat ~/planka.creds` inside LXC to see admin user and database credentials.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/plant-it.json b/frontend/public/json/plant-it.json deleted file mode 100644 index 16dc3606f..000000000 --- a/frontend/public/json/plant-it.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Plant-it", - "slug": "plant-it", - "categories": [ - 24 - ], - "date_created": "2025-03-11", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.plant-it.org/latest/", - "website": "https://plant-it.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/plant-it.webp", - "config_path": "/opt/plant-it/backend/server.env", - "description": "Plant-it is a self-hosted, open-source app designed to help users manage and track plant care. It allows users to add plants, log activities like watering, fertilizing, and pruning, set reminders, and upload photos for visual tracking. The app includes a database of over 400,000 plant species, offering detailed care information such as optimal growing conditions and maintenance tips.\n\nUsers can organize their plant collection, monitor growth, and share data with others. The app is free to use and works on Android, iOS, and the web.", - "install_methods": [ - { - "type": "default", - "script": "ct/plant-it.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This Script use Adoptium JDK 21 - Sometimes the installation of these takes a while", - "type": "info" - }, - { - "text": "After the initial start, the backend requires approx. 1-2 minutes boot time", - "type": "info" - } - ] -} diff --git a/frontend/public/json/plex.json b/frontend/public/json/plex.json deleted file mode 100644 index 0dedffc09..000000000 --- a/frontend/public/json/plex.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Plex Media Server", - "slug": "plex", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 32400, - "documentation": "https://support.plex.tv/articles/", - "website": "https://www.plex.tv/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/plex.webp", - "config_path": "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Preferences.xml", - "description": "Plex personal media server magically scans and organizes your files, sorting your media intuitively and beautifully.", - "install_methods": [ - { - "type": "default", - "script": "ct/plex.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "With Privileged/Unprivileged Hardware Acceleration Support", - "type": "info" - } - ] -} diff --git a/frontend/public/json/pocketbase.json b/frontend/public/json/pocketbase.json deleted file mode 100644 index 0745af81a..000000000 --- a/frontend/public/json/pocketbase.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Pocketbase", - "slug": "pocketbase", - "categories": [ - 8 - ], - "date_created": "2024-05-07", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://pocketbase.io/docs/", - "website": "https://pocketbase.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pocketbase.webp", - "config_path": "", - "description": "PocketBase is an open source backend consisting of embedded database (SQLite) with realtime subscriptions, built-in auth management, convenient dashboard UI and simple REST-ish API.", - "install_methods": [ - { - "type": "default", - "script": "ct/pocketbase.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Type `/opt/pocketbase/pocketbase superuser create YOUREMAIL PASSWORD` to create your superuser account.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/pocketid.json b/frontend/public/json/pocketid.json deleted file mode 100644 index 02fb2decf..000000000 --- a/frontend/public/json/pocketid.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Pocket ID", - "slug": "pocketid", - "categories": [ - 6 - ], - "date_created": "2025-01-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 1411, - "documentation": "https://pocket-id.org/docs/", - "website": "https://github.com/pocket-id/pocket-id", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pocket-id.webp", - "config_path": "/opt/pocket-id/.env", - "description": "Pocket ID is a simple OIDC provider that allows users to authenticate with their passkeys to your services.", - "install_methods": [ - { - "type": "default", - "script": "ct/pocketid.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Pocket ID requires https to work.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/podman-homeassistant.json b/frontend/public/json/podman-homeassistant.json deleted file mode 100644 index ad7a66984..000000000 --- a/frontend/public/json/podman-homeassistant.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Podman Home Assistant Container", - "slug": "podman-homeassistant", - "categories": [ - 16 - ], - "date_created": "2024-04-29", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8123, - "documentation": "https://www.home-assistant.io/docs/", - "website": "https://www.home-assistant.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/home-assistant.webp", - "config_path": "/var/lib/containers/storage/volumes/hass_config/_data", - "description": "A standalone Podman container-based installation of Home Assistant Core means that the Home Assistant Core software is installed inside a container managed by Podman, separate from the host operating system. This provides a flexible and scalable solution for running the software, as the container can be easily moved between host systems or isolated from other processes for security. Podman is a popular open-source tool for managing containers that is similar to Docker, but designed for use on Linux systems without a daemon.\r\n\r\n\ud83d\udec8 If the LXC is created Privileged, the script will automatically set up USB passthrough.", - "install_methods": [ - { - "type": "default", - "script": "ct/podman-homeassistant.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 16, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "If the LXC is created Privileged, the script will automatically set up USB passthrough.", - "type": "warning" - }, - { - "text": "config path: `/var/lib/containers/storage/volumes/hass_config/_data`", - "type": "info" - }, - { - "text": "Options to Install Portainer or Portainer Agent", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/podman.json b/frontend/public/json/podman.json deleted file mode 100644 index c74a6aafd..000000000 --- a/frontend/public/json/podman.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Podman", - "slug": "podman", - "categories": [ - 3 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://podman.io/docs", - "website": "https://podman.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/podman.webp", - "config_path": "", - "description": "Podman is an open-source, daemonless, and portable container engine that allows users to manage containers on Linux systems without the need for a daemon or system service to be running in the background. It provides an API and a command-line interface that can be used to create, run, and manage containers and their associated networks, volumes, and images. It is built on top of the Open Container Initiative (OCI) runtime specification, making it compatible with other OCI-compliant container engines.", - "install_methods": [ - { - "type": "default", - "script": "ct/podman.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Options to Install Portainer or Portainer Agent", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/post-pbs-install.json b/frontend/public/json/post-pbs-install.json deleted file mode 100644 index 6c3a61caa..000000000 --- a/frontend/public/json/post-pbs-install.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "PBS Post Install", - "slug": "post-pbs-install", - "categories": [ - 1 - ], - "date_created": "2024-04-29", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "The script is designed for Proxmox Backup Server (PBS) and will give options to Disable the Enterprise Repo, Add/Correct PBS Sources, Enable the No-Subscription Repo, Add Test Repo, Disable Subscription Nag, Update Proxmox Backup Server and Reboot PBS.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/post-pbs-install.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Proxmox Backup Server ONLY", - "type": "info" - }, - { - "text": "Execute within the Proxmox Backup Server Shell", - "type": "info" - }, - { - "text": "It is recommended to answer “yes” (y) to all options presented during the process.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/post-pmg-install.json b/frontend/public/json/post-pmg-install.json deleted file mode 100644 index 34195cb56..000000000 --- a/frontend/public/json/post-pmg-install.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "PMG Post Install", - "slug": "post-pmg-install", - "categories": [ - 1 - ], - "date_created": "2025-01-20", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "The script is designed for Proxmox Mail Gateway and will give options to Disable the Enterprise Repo, Add/Correct PMG Sources, Enable the No-Subscription Repo, Add Test Repo, Disable Subscription Nag, Update Proxmox Mail Gateway and Reboot PMG.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/post-pmg-install.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Proxmox Mail Gateway ONLY", - "type": "warning" - }, - { - "text": "Execute within the Proxmox Mail Gateway Shell", - "type": "info" - }, - { - "text": "It is recommended to answer “yes” (y) to all options presented during the process.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/post-pve-install.json b/frontend/public/json/post-pve-install.json deleted file mode 100644 index 176d77252..000000000 --- a/frontend/public/json/post-pve-install.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "PVE Post Install", - "slug": "post-pve-install", - "categories": [ - 1 - ], - "date_created": "2024-04-28", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "This script provides options for managing Proxmox VE repositories, including disabling the Enterprise Repo, adding or correcting PVE sources, enabling the No-Subscription Repo, adding the test Repo, disabling the subscription nag, updating Proxmox VE, and rebooting the system.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/post-pve-install.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - }, - { - "text": "It is recommended to answer “yes” (y) to all options presented during the process.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/postgresql.json b/frontend/public/json/postgresql.json deleted file mode 100644 index 39581f71a..000000000 --- a/frontend/public/json/postgresql.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "PostgreSQL", - "slug": "postgresql", - "categories": [ - 8 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5432, - "documentation": "https://www.postgresql.org/docs/", - "website": "https://www.postgresql.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/postgresql.webp", - "config_path": "", - "description": "PostgreSQL (often referred to as Postgres) is an open-source relational database management system that is known for its extensibility and strict adherence to SQL standards. It is a free and powerful database solution, suitable for a wide range of applications, from small projects to large enterprise systems. PostgreSQL is widely used for its reliability, feature-richness, and robustness.", - "install_methods": [ - { - "type": "default", - "script": "ct/postgresql.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-postgresql.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Set a password after installation for postgres user by running `echo \"ALTER USER postgres with encrypted password 'your_password';\" | sudo -u postgres psql`", - "type": "info" - }, - { - "text": "Debian script offers versions `15, 16, 17, 18`, while Alpine script offers versions `15, 16, 17`.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/powerdns.json b/frontend/public/json/powerdns.json deleted file mode 100644 index c71bdf26a..000000000 --- a/frontend/public/json/powerdns.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PowerDNS", - "slug": "powerdns", - "categories": [ - 5 - ], - "date_created": "2026-03-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://doc.powerdns.com/index.html", - "config_path": "/opt/poweradmin/config/settings.php", - "website": "https://www.powerdns.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/powerdns.webp", - "description": "The PowerDNS Authoritative Server is a versatile nameserver which supports a large number of backends. These backends can either be plain zone files or be more dynamic in nature. PowerDNS has the concepts of ‘backends’. A backend is a datastore that the server will consult that contains DNS records (and some metadata). The backends range from database backends (MySQL, PostgreSQL) and BIND zone files to co-processes and JSON API’s.", - "install_methods": [ - { - "type": "default", - "script": "ct/powerdns.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "For administrator credentials type: `cat ~/poweradmin.creds` inside LXC.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/privatebin.json b/frontend/public/json/privatebin.json deleted file mode 100644 index 1585d0c7e..000000000 --- a/frontend/public/json/privatebin.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "PrivateBin", - "slug": "privatebin", - "categories": [ - 12 - ], - "date_created": "2025-01-29", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/PrivateBin/PrivateBin/wiki", - "website": "https://github.com/PrivateBin/PrivateBin", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/privatebin.webp", - "config_path": "/opt/privatebin/cfg/conf.php", - "description": "PrivateBin is a minimalist, open-source pastebin where the server has zero knowledge of pasted data. Data is encrypted/decrypted in the browser using 256-bit AES.", - "install_methods": [ - { - "type": "default", - "script": "ct/privatebin.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/profilarr.json b/frontend/public/json/profilarr.json deleted file mode 100644 index b85c7d43a..000000000 --- a/frontend/public/json/profilarr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Profilarr", - "slug": "profilarr", - "categories": [ - 14 - ], - "date_created": "2026-03-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6868, - "documentation": "https://github.com/Dictionarry-Hub/profilarr#readme", - "website": "https://github.com/Dictionarry-Hub/profilarr", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/profilarr.webp", - "config_path": "/config", - "description": "Profilarr is a configuration management platform for Radarr and Sonarr that simplifies importing, syncing, and managing quality profiles, custom formats, and release profiles.", - "install_methods": [ - { - "type": "default", - "script": "ct/profilarr.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/projectsend.json b/frontend/public/json/projectsend.json deleted file mode 100644 index ebede6fb1..000000000 --- a/frontend/public/json/projectsend.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "ProjectSend", - "slug": "projectsend", - "categories": [ - 12 - ], - "date_created": "2025-01-20", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.projectsend.org/", - "website": "https://projectsend.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/projectsend.webp", - "config_path": "/opt/projectsend/includes/sys.config.php", - "description": "ProjectSend is a free, open source software that lets you share files with your clients, focused on ease of use and privacy. It supports clients groups, system users roles, statistics, multiple languages, detailed logs... and much more!", - "install_methods": [ - { - "type": "default", - "script": "ct/projectsend.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After running the update script, logging in as a system user in ProjectSend is necessary to upgrade the database.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/prometheus-alertmanager.json b/frontend/public/json/prometheus-alertmanager.json deleted file mode 100644 index 87a45f91e..000000000 --- a/frontend/public/json/prometheus-alertmanager.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Prometheus Alertmanager", - "slug": "prometheus-alertmanager", - "categories": [ - 9 - ], - "date_created": "2025-01-09", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9093, - "documentation": "https://prometheus.io/docs/alerting/latest/overview/", - "website": "https://prometheus.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/prometheus.webp", - "config_path": "/etc/alertmanager/alertmanager.yml", - "description": "Alerting with Prometheus is separated into two parts. Alerting rules in Prometheus servers send alerts to an Alertmanager. The Alertmanager then manages those alerts, including silencing, inhibition, aggregation and sending out notifications via methods such as email, on-call notification systems, and chat platforms.", - "install_methods": [ - { - "type": "default", - "script": "ct/prometheus-alertmanager.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/prometheus-blackbox-exporter.json b/frontend/public/json/prometheus-blackbox-exporter.json deleted file mode 100644 index d99193f36..000000000 --- a/frontend/public/json/prometheus-blackbox-exporter.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "name": "Prometheus Blackbox Exporter", - "slug": "prometheus-blackbox-exporter", - "categories": [ - 1, - 9 - ], - "date_created": "2025-10-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9115, - "documentation": "https://github.com/prometheus/blackbox_exporter", - "website": "https://github.com/prometheus/blackbox_exporter", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/prometheus.webp", - "config_path": "/opt/blackbox-exporter/blackbox.yml", - "description": "An exporter allows blackbox probing of endpoints over HTTP, HTTPS, DNS, TCP, ICMP and gRPC for use by the Prometheus monitoring system.", - "install_methods": [ - { - "type": "default", - "script": "ct/prometheus-blackbox-exporter.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Please adjust the Proxmox credentials in the configuration file!", - "type": "info" - } - ] -} diff --git a/frontend/public/json/prometheus-paperless-ngx-exporter.json b/frontend/public/json/prometheus-paperless-ngx-exporter.json deleted file mode 100644 index ac5741c48..000000000 --- a/frontend/public/json/prometheus-paperless-ngx-exporter.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Prometheus Paperless NGX Exporter", - "slug": "prometheus-paperless-ngx-exporter", - "categories": [ - 9 - ], - "date_created": "2025-02-07", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 8081, - "documentation": "https://github.com/hansmi/prometheus-paperless-exporter", - "website": "https://github.com/hansmi/prometheus-paperless-exporter", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/paperless-ngx.webp", - "config_path": "/etc/prometheus-paperless-ngx-exporter/config.env", - "description": "Prometheus metrics exporter for Paperless-NGX, a document management system transforming physical documents into a searchable online archive. The exporter relies on Paperless' REST API.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/prometheus-paperless-ngx-exporter.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/prometheus-pve-exporter.json b/frontend/public/json/prometheus-pve-exporter.json deleted file mode 100644 index 881ba9060..000000000 --- a/frontend/public/json/prometheus-pve-exporter.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "name": "Prometheus Proxmox VE Exporter", - "slug": "prometheus-pve-exporter", - "categories": [ - 1, - 9 - ], - "date_created": "2025-01-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9221, - "documentation": "https://github.com/prometheus-pve/prometheus-pve-exporter", - "website": "https://github.com/prometheus-pve/prometheus-pve-exporter", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "/opt/prometheus-pve-exporter/pve.yml", - "description": "An exporter that exposes information gathered from Proxmox VE node for use by the Prometheus monitoring system.", - "install_methods": [ - { - "type": "default", - "script": "ct/prometheus-pve-exporter.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Please adjust the Proxmox credentials in the configuration file!", - "type": "info" - } - ] -} diff --git a/frontend/public/json/prometheus.json b/frontend/public/json/prometheus.json deleted file mode 100644 index adcf63b42..000000000 --- a/frontend/public/json/prometheus.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "Prometheus", - "slug": "prometheus", - "categories": [ - 9 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9090, - "documentation": "https://prometheus.io/docs/introduction/overview/", - "website": "https://prometheus.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/prometheus.webp", - "config_path": "/etc/prometheus/prometheus.yml", - "description": "Prometheus is widely used to monitor the performance and health of various infrastructure components and applications, and trigger alerts based on predefined rules. It has a multi-dimensional data model and supports various data sources and exporters, making it an extremely flexible and scalable monitoring solution.", - "install_methods": [ - { - "type": "default", - "script": "ct/prometheus.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-prometheus.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/prowlarr.json b/frontend/public/json/prowlarr.json deleted file mode 100644 index 095e4f13e..000000000 --- a/frontend/public/json/prowlarr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Prowlarr", - "slug": "prowlarr", - "categories": [ - 14 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9696, - "documentation": "https://wiki.servarr.com/prowlarr", - "website": "https://github.com/Prowlarr/Prowlarr", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/prowlarr.webp", - "config_path": "/var/lib/prowlarr/", - "description": "Prowlarr is a software tool designed to integrate with various PVR (Personal Video Recorder) apps. It is built on a popular *arr .net/ReactJS base stack and serves as an indexer manager and proxy. Prowlarr makes it easy to manage and organize TV show and movie collections, by integrating with popular PVR apps and automating the downloading and organizing of media files. The software provides a web-based interface for managing and organizing TV shows and movies, making it easy to search and find content. Prowlarr also supports metadata management, including show and movie information, making it easy for users to keep their media collection organized and up-to-date. The software is designed to be easy to use and provides a simple and intuitive interface for managing and organizing media collections, making it a valuable tool for media enthusiasts who want to keep their collection organized and up-to-date. With Prowlarr, users can enjoy their media collection from anywhere, making it a powerful tool for managing and sharing media files.", - "install_methods": [ - { - "type": "default", - "script": "ct/prowlarr.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/proxmox-backup-server.json b/frontend/public/json/proxmox-backup-server.json deleted file mode 100644 index 85d4db3e1..000000000 --- a/frontend/public/json/proxmox-backup-server.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Proxmox Backup Server (PBS)", - "slug": "proxmox-backup-server", - "categories": [ - 1 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8007, - "documentation": "https://pbs.proxmox.com/docs/", - "website": "https://www.proxmox.com/en/proxmox-backup-server/overview", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "Proxmox Backup Server is an enterprise backup solution, for backing up and restoring VMs, containers, and physical hosts. By supporting incremental, fully deduplicated backups, Proxmox Backup Server significantly reduces network load and saves valuable storage space.", - "install_methods": [ - { - "type": "default", - "script": "ct/proxmox-backup-server.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "root", - "password": null - }, - "notes": [ - { - "text": "Set a root password if using autologin. This will be the PBS password. `passwd root`", - "type": "warning" - }, - { - "text": "Advanced Install is only possible with IPv6 disabled! Otherwise, the installation may get stuck.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/proxmox-datacenter-manager.json b/frontend/public/json/proxmox-datacenter-manager.json deleted file mode 100644 index 5220184cc..000000000 --- a/frontend/public/json/proxmox-datacenter-manager.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Proxmox Datacenter Manager (PDM)", - "slug": "proxmox-datacenter-manager", - "categories": [ - 1 - ], - "date_created": "2024-12-25", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8443, - "documentation": "https://pve.proxmox.com/wiki/Proxmox_Datacenter_Manager_Roadmap", - "website": "https://pve.proxmox.com/wiki/Proxmox_Datacenter_Manager_Roadmap", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "The Proxmox Datacenter Manager project has been developed with the objective of providing a centralized overview of all your individual nodes and clusters. It also enables basic management like migrations of virtual guests without any cluster network requirements. ", - "install_methods": [ - { - "type": "default", - "script": "ct/proxmox-datacenter-manager.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Set a root password if using autologin. This will be the Proxmox-Datacenter-Manager password. `sudo passwd root`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/proxmox-mail-gateway.json b/frontend/public/json/proxmox-mail-gateway.json deleted file mode 100644 index 3b3b11479..000000000 --- a/frontend/public/json/proxmox-mail-gateway.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Proxmox Mail Gateway (PMG)", - "slug": "proxmox-mail-gateway", - "categories": [ - 1 - ], - "date_created": "2025-02-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8006, - "documentation": "https://pmg.proxmox.com/pmg-docs/pmg-admin-guide.html", - "website": "https://www.proxmox.com/en/products/proxmox-mail-gateway/overview", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "Proxmox Mail Gateway is the leading open-source email security solution helping you to protect your mail server against all email threats from the moment they emerge.", - "install_methods": [ - { - "type": "default", - "script": "ct/proxmox-mail-gateway.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "root", - "password": null - }, - "notes": [ - { - "text": "Set a root password if using autologin. This will be the PMG password. `passwd root`", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/ps5-mqtt.json b/frontend/public/json/ps5-mqtt.json deleted file mode 100644 index e58316433..000000000 --- a/frontend/public/json/ps5-mqtt.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PS5-MQTT", - "slug": "ps5-mqtt", - "categories": [ - 18 - ], - "date_created": "2025-01-09", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8645, - "documentation": null, - "website": "https://github.com/FunkeyFlo/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ps5-mqtt.webp", - "config_path": "/opt/.config/ps5-mqtt/config.json", - "description": "Integrate your Sony Playstation 5 devices with Home Assistant using MQTT.", - "install_methods": [ - { - "type": "default", - "script": "ct/ps5-mqtt.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 3, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After installation, the MQTT endpoint must be configured. The configuration file is located within the LXC container at: `/opt/.config/ps5-mqtt/config.json`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/pterodactyl-panel.json b/frontend/public/json/pterodactyl-panel.json deleted file mode 100644 index 51d87b4b5..000000000 --- a/frontend/public/json/pterodactyl-panel.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Pterodactyl Panel", - "slug": "pterodactyl-panel", - "categories": [ - 24 - ], - "date_created": "2025-03-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://pterodactyl.io/panel/1.0/getting_started.html", - "website": "https://pterodactyl.io", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pterodactyl.webp", - "config_path": "/opt/pterodactyl-panel/.env", - "description": "Pterodactyl Panel is a web-based control panel for managing game and application servers. It provides an intuitive interface to start, stop, configure, and monitor servers easily. It works alongside Pterodactyl Wings, a lightweight daemon that handles server deployments and resource management.", - "install_methods": [ - { - "type": "default", - "script": "ct/pterodactyl-panel.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Show login and database credentials: `cat ~/pterodactyl-panel.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/pterodactyl-wings.json b/frontend/public/json/pterodactyl-wings.json deleted file mode 100644 index ce5bfea80..000000000 --- a/frontend/public/json/pterodactyl-wings.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Pterodactyl Wings", - "slug": "pterodactyl-wings", - "categories": [ - 24 - ], - "date_created": "2025-03-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://pterodactyl.io/wings/1.0/installing.html", - "website": "https://pterodactyl.io", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pterodactyl.webp", - "config_path": "/etc/pterodactyl/config.yml", - "description": "Pterodactyl Wings is Pterodactyl's server control plane, built for the rapidly changing gaming industry and designed to be highly performant and secure. Wings provides an HTTP API allowing you to interface directly with running server instances, fetch server logs, generate backups, and control all aspects of the server lifecycle.", - "install_methods": [ - { - "type": "default", - "script": "ct/pterodactyl-wings.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After installation, you need to use the Auto Deploy command generated by Pterodactyl Panel and, after running the command, restart the Wings service with `systemctl restart wings.service`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/pulse.json b/frontend/public/json/pulse.json deleted file mode 100644 index bfcd0eeed..000000000 --- a/frontend/public/json/pulse.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Pulse", - "slug": "pulse", - "categories": [ - 9 - ], - "date_created": "2025-05-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7655, - "documentation": null, - "website": "https://github.com/rcourtman/Pulse", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/pulse.webp", - "config_path": "/etc/pulse", - "description": "A lightweight monitoring application for Proxmox VE that displays real-time status for VMs and containers via a simple web interface.", - "install_methods": [ - { - "type": "default", - "script": "ct/pulse.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Create Proxmox-API-Token first: `https://github.com/rcourtman/Pulse?tab=readme-ov-file#creating-api-token`", - "type": "Info" - }, - { - "text": "After installation, access the web interface to configure your Proxmox connection details through the built-in setup wizard", - "type": "Info" - }, - { - "text": "Configure authentication in the Web UI => Settings => Security", - "type": "Info" - } - ] -} diff --git a/frontend/public/json/pve-privilege-converter.json b/frontend/public/json/pve-privilege-converter.json deleted file mode 100644 index 7ec6ffe2e..000000000 --- a/frontend/public/json/pve-privilege-converter.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "PVE Privilege Converter", - "slug": "pve-privilege-converter", - "categories": [ - 1 - ], - "date_created": "2025-06-02", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": "https://github.com/onethree7/proxmox-lxc-privilege-converter", - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "This script allows converting Proxmox LXC containers between privileged and unprivileged modes using vzdump backup and restore. It guides you through container selection, backup storage, ID assignment, and privilege flipping via automated restore. Useful for applying changes that require different LXC modes.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/pve-privilege-converter.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute this script inside the Proxmox shell as root.", - "type": "info" - }, - { - "text": "Ensure that the backup and target storage have enough space.", - "type": "warning" - }, - { - "text": "The container will be recreated with a new ID and desired privilege setting.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/pve-scripts-local.json b/frontend/public/json/pve-scripts-local.json deleted file mode 100644 index 986ca4d51..000000000 --- a/frontend/public/json/pve-scripts-local.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "PVEScriptsLocal", - "slug": "pve-scripts-local", - "categories": [ - 1 - ], - "date_created": "2025-10-03", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/community-scripts/ProxmoxVE-Local", - "config_path": "/opt/PVEScripts-Local/.env", - "website": "https://community-scripts.github.io/ProxmoxVE", - "logo": "https://raw.githubusercontent.com/community-scripts/ProxmoxVE-Local/refs/heads/main/.github/logo.png", - "description": "A modern web-based management interface for Proxmox VE (PVE) helper scripts. This tool provides a user-friendly way to discover, download, and execute community-sourced Proxmox scripts locally with real-time terminal output streaming.", - "install_methods": [ - { - "type": "default", - "script": "ct/pve-scripts-local.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/qbittorrent-exporter.json b/frontend/public/json/qbittorrent-exporter.json deleted file mode 100644 index b5753a9f4..000000000 --- a/frontend/public/json/qbittorrent-exporter.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "qbittorrent Exporter", - "slug": "qbittorrent-exporter", - "categories": [ - 9 - ], - "date_created": "2025-12-18", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 8090, - "documentation": "https://github.com/martabal/qbittorrent-exporter", - "website": "https://github.com/martabal/qbittorrent-exporter", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/qbittorrent.webp", - "config_path": "/opt/qbittorrent-exporter.env", - "description": "A fast and lightweight prometheus exporter for qBittorrent ", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/qbittorrent-exporter.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - }, - { - "type": "alpine", - "script": "tools/addon/qbittorrent-exporter.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/qbittorrent.json b/frontend/public/json/qbittorrent.json deleted file mode 100644 index 472b5851b..000000000 --- a/frontend/public/json/qbittorrent.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "qBittorrent", - "slug": "qbittorrent", - "categories": [ - 11 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8090, - "documentation": "https://github.com/qbittorrent/qBittorrent/wiki/", - "website": "https://www.qbittorrent.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/qbittorrent.webp", - "config_path": "$HOME/.config/qBittorrent/qBittorrent.conf", - "description": "qBittorrent offers a user-friendly interface that allows users to search for and download torrent files easily. It also supports magnet links, which allow users to start downloading files without the need for a torrent file.", - "install_methods": [ - { - "type": "default", - "script": "ct/qbittorrent.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "changeme" - }, - "notes": [] -} diff --git a/frontend/public/json/qdrant.json b/frontend/public/json/qdrant.json deleted file mode 100644 index 5e3c6243e..000000000 --- a/frontend/public/json/qdrant.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Qdrant", - "slug": "qdrant", - "categories": [ - 8 - ], - "date_created": "2025-11-27", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/etc/qdrant/config.yaml", - "interface_port": 6333, - "documentation": "https://github.com/qdrant/qdrant", - "website": "https://qdrant.tech/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/qdrant.webp", - "description": "Qdrant is a vector search engine and vector database that allows you to store, search, and manage high-dimensional vectors efficiently.", - "install_methods": [ - { - "type": "default", - "script": "ct/qdrant.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/qui.json b/frontend/public/json/qui.json deleted file mode 100644 index 3b8624402..000000000 --- a/frontend/public/json/qui.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Qui", - "slug": "qui", - "categories": [ - 11 - ], - "date_created": "2026-01-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7476, - "documentation": "https://github.com/autobrr/qui", - "website": "https://getqui.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/qui.webp", - "config_path": "/root/.config/qui/config.toml", - "description": "Qui is a modern, self-hosted web interface for managing multiple qBittorrent instances with support for 10k+ torrents. It provides a clean and responsive interface for monitoring and controlling your qBittorrent downloads across multiple servers.", - "install_methods": [ - { - "type": "default", - "script": "ct/qui.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/rabbitmq.json b/frontend/public/json/rabbitmq.json deleted file mode 100644 index 79ea47b2e..000000000 --- a/frontend/public/json/rabbitmq.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "RabbitMQ", - "slug": "rabbitmq", - "categories": [ - 18 - ], - "date_created": "2024-06-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 15672, - "documentation": "https://www.rabbitmq.com/docs", - "website": "https://www.rabbitmq.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/rabbitmq.webp", - "config_path": "/etc/rabbitmq/rabbitmq.conf", - "description": "RabbitMQ is a robust messaging broker widely used for message queuing, streaming, and decoupling services. It supports multiple messaging protocols, ensures reliable message delivery, and offers features like routing, clustering, and federation. RabbitMQ is suitable for various use cases, including microservices communication, real-time data processing, and IoT applications.", - "install_methods": [ - { - "type": "default", - "script": "ct/rabbitmq.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "proxmox", - "password": "proxmox" - }, - "notes": [] -} diff --git a/frontend/public/json/radarr.json b/frontend/public/json/radarr.json deleted file mode 100644 index 64319f9c5..000000000 --- a/frontend/public/json/radarr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Radarr", - "slug": "radarr", - "categories": [ - 14 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7878, - "documentation": "https://wiki.servarr.com/radarr", - "website": "https://radarr.video/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radarr.webp", - "config_path": "", - "description": "Radarr is a movie management tool designed for Usenet and BitTorrent users. It allows users to manage and organize their movie collection with ease. Radarr integrates with popular Usenet and BitTorrent clients, such as Sonarr and Lidarr, to automate the downloading and organizing of movie files. The software provides a web-based interface for managing and organizing movies, making it easy to search and find titles, genres, and release dates. Radarr also supports metadata management, including movie posters and information, making it easy for users to keep their movie collection organized and up-to-date. The software is designed to be easy to use and provides a simple and intuitive interface for managing and organizing movie collections, making it a valuable tool for movie enthusiasts who want to keep their collection organized and up-to-date. With Radarr, users can enjoy their movie collection from anywhere, making it a powerful tool for managing and sharing movie files.", - "install_methods": [ - { - "type": "default", - "script": "ct/radarr.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/radicale.json b/frontend/public/json/radicale.json deleted file mode 100644 index 385cc1746..000000000 --- a/frontend/public/json/radicale.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Radicale", - "slug": "radicale", - "categories": [ - 0 - ], - "date_created": "2025-02-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5232, - "documentation": "https://radicale.org/master.html#documentation-1", - "website": "https://radicale.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/radicale.webp", - "config_path": "/etc/radicale/config", - "description": "Radicale is a small but powerful CalDAV (calendars, to-do lists) and CardDAV (contacts)", - "install_methods": [ - { - "type": "default", - "script": "ct/radicale.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To view the user credentials : `cat radicale.creds`", - "type": "info" - }, - { - "text": "This application requires additional configuration. Please refer to https://github.com/community-scripts/ProxmoxVE/discussions/2073.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/rclone.json b/frontend/public/json/rclone.json deleted file mode 100644 index f0d755164..000000000 --- a/frontend/public/json/rclone.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "Rclone", - "slug": "rclone", - "categories": [ - 11 - ], - "date_created": "2025-05-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://rclone.org/docs/", - "website": "https://rclone.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/rclone.webp", - "config_path": "~/.config/rclone/rclone.conf", - "description": "Rclone is a command-line program to manage files on cloud storage. It is a feature-rich alternative to cloud vendors' web storage interfaces", - "install_methods": [ - { - "type": "default", - "script": "ct/rclone.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-rclone.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "type": "info", - "text": "`cat ~/rclone.creds` to view login credentials" - }, - { - "type": "info", - "text": "`htpasswd -b -B /opt/login.pwd newuser newuserpassword` to add more users." - } - ] -} diff --git a/frontend/public/json/rdtclient.json b/frontend/public/json/rdtclient.json deleted file mode 100644 index 56c0d201b..000000000 --- a/frontend/public/json/rdtclient.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Real-Debrid Torrent Client", - "slug": "rdtclient", - "categories": [ - 11 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6500, - "documentation": "https://github.com/rogerfar/rdt-client/blob/main/README.md", - "website": "https://github.com/rogerfar/rdt-client", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/real-debrid.webp", - "config_path": "/opt/rdtc/appsettings.json", - "description": "RDTClient is a web interface to manage your torrents on Real-Debrid, AllDebrid or Premiumize.", - "install_methods": [ - { - "type": "default", - "script": "ct/rdtclient.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/reactive-resume.json b/frontend/public/json/reactive-resume.json deleted file mode 100644 index f4a73ee72..000000000 --- a/frontend/public/json/reactive-resume.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Reactive Resume", - "slug": "reactive-resume", - "categories": [ - 12 - ], - "date_created": "2025-04-22", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.rxresume.org/", - "website": "https://rxresume.org", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/reactive-resume.webp", - "config_path": "/opt/reactive-resume/.env", - "description": "A one-of-a-kind resume builder that keeps your privacy in mind. Completely secure, customizable, portable, open-source and free forever.", - "install_methods": [ - { - "type": "default", - "script": "ct/reactive-resume.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/readeck.json b/frontend/public/json/readeck.json deleted file mode 100644 index ec6ecd26f..000000000 --- a/frontend/public/json/readeck.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Readeck", - "slug": "readeck", - "categories": [ - 12 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://readeck.org/en/docs/", - "website": "https://readeck.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/readeck.webp", - "config_path": "/opt/readeck/config.toml", - "description": "Readeck helps you keep all that web content you want to revisit in an hour, tomorrow, or in 20 years.", - "install_methods": [ - { - "type": "default", - "script": "ct/readeck.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/recyclarr.json b/frontend/public/json/recyclarr.json deleted file mode 100644 index 780c7e35d..000000000 --- a/frontend/public/json/recyclarr.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Recyclarr", - "slug": "recyclarr", - "categories": [ - 14 - ], - "date_created": "2024-11-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://recyclarr.dev/wiki/", - "website": "https://recyclarr.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/recyclarr.webp", - "config_path": "", - "description": "Recyclarr is an automation tool that integrates with media management software like Sonarr and Radarr. It helps users manage and organize their media libraries by automatically searching for and adding content from indexers, based on user-defined criteria. It streamlines the process of maintaining and updating media collections without manual intervention.", - "install_methods": [ - { - "type": "default", - "script": "ct/recyclarr.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "type": "warning", - "text": "Configure your Radarr/Sonarr instances in `/root/.config/recyclarr/recyclarr.yml` before the first sync." - }, - { - "type": "info", - "text": "Automatic daily sync is configured via `/etc/cron.d/recyclarr`. Sync logs are saved to `/root/.config/recyclarr/sync.log`." - } - ] -} diff --git a/frontend/public/json/redis.json b/frontend/public/json/redis.json deleted file mode 100644 index bf8d63c7b..000000000 --- a/frontend/public/json/redis.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "Redis ", - "slug": "redis", - "categories": [ - 8 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": "https://redis.io/docs/latest/", - "website": "https://redis.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/redis.webp", - "config_path": "Debian /etc/redis/redis.conf | Alpine: /etc/redis.conf", - "description": "Redis is an open-source, in-memory data store used by millions of developers as a cache, vector database, document database, streaming engine, and message broker.", - "install_methods": [ - { - "type": "default", - "script": "ct/redis.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-redis.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Redis Configuration: `nano /etc/redis/redis.conf` or in Alpine: `nano /etc/redis.conf`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/redlib.json b/frontend/public/json/redlib.json deleted file mode 100644 index 7a85c762f..000000000 --- a/frontend/public/json/redlib.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Redlib", - "slug": "alpine-redlib", - "categories": [ - 10 - ], - "date_created": "2025-08-25", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5252, - "documentation": "https://github.com/redlib-org/redlib/blob/main/README.md", - "website": "https://github.com/redlib-org/redlib", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/redlib.webp", - "config_path": "/opt/redlib/redlib.conf", - "description": "An alternative private front-end to Reddit. Redlib hopes to provide an easier way to browse Reddit, without the ads, trackers, and bloat.", - "install_methods": [ - { - "type": "default", - "script": "ct/alpine-redlib.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/reitti.json b/frontend/public/json/reitti.json deleted file mode 100644 index e162051d3..000000000 --- a/frontend/public/json/reitti.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Reitti", - "slug": "reitti", - "categories": [ - 21 - ], - "date_created": "2025-10-30", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://www.dedicatedcode.com/projects/reitti/", - "config_path": "/opt/reitti/.env", - "website": "https://www.dedicatedcode.com/projects/reitti/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/reitti.webp", - "description": "Reitti is a self-hosted location tracking and analysis platform that detects significant places, trip patterns, and integrates with OwnTracks, GPSLogger, and Immich. It uses PostgreSQL + PostGIS, RabbitMQ, Redis, and an optional Photon geocoder.", - "install_methods": [ - { - "type": "default", - "script": "ct/reitti.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 15, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [ - { - "text": "Photon Geocoder running at http://127.0.0.1:2322. Photon is fully setup, but without sample data. (filesize is big) -> checkout our guide: `https://github.com/community-scripts/ProxmoxVE/discussions/8737`.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/resiliosync.json b/frontend/public/json/resiliosync.json deleted file mode 100644 index 885d2650f..000000000 --- a/frontend/public/json/resiliosync.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Resilio Sync", - "slug": "resiliosync", - "categories": [ - 11 - ], - "date_created": "2025-09-06", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/etc/resilio-sync/config.json", - "interface_port": 8888, - "documentation": "https://help.resilio.com/", - "website": "https://www.resilio.com/sync", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/resilio-sync.webp", - "description": "Fast, reliable, and simple file sync and share solution, powered by P2P technology. Sync files across all your devices without storing them in the cloud.", - "install_methods": [ - { - "type": "default", - "script": "ct/resiliosync.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After free registration, you will receive a license keyfile to your email address. Upload it into any LXC directory and select on first run.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/revealjs.json b/frontend/public/json/revealjs.json deleted file mode 100644 index 6aab62902..000000000 --- a/frontend/public/json/revealjs.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "RevealJS", - "slug": "revealjs", - "categories": [ - 12 - ], - "date_created": "2025-03-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://github.com/hakimel/reveal.js/wiki", - "website": "https://github.com/hakimel/reveal.js", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/reveal-js.webp", - "config_path": "/opt/revealjs/gulpfile.js", - "description": "reveal.js is an open source HTML presentation framework. It's a tool that enables anyone with a web browser to create fully-featured and beautiful presentations for free.", - "install_methods": [ - { - "type": "default", - "script": "ct/revealjs.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "LiveReload is on port: 35729", - "type": "info" - } - ] -} diff --git a/frontend/public/json/romm.json b/frontend/public/json/romm.json deleted file mode 100644 index 3b98e6a3a..000000000 --- a/frontend/public/json/romm.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "RomM", - "slug": "romm", - "categories": [ - 24 - ], - "date_created": "2026-02-16", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.romm.app/latest/", - "website": "https://romm.app/", - "config_path": "/opt/romm/.env", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/romm.webp", - "description": "RomM (ROM Manager) allows you to scan, enrich, browse and play your game collection with a clean and responsive interface. Support for multiple platforms, various naming schemes, and custom tags.", - "install_methods": [ - { - "type": "default", - "script": "ct/romm.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 20, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/runtipi.json b/frontend/public/json/runtipi.json deleted file mode 100644 index fc517447b..000000000 --- a/frontend/public/json/runtipi.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "Runtipi", - "slug": "runtipi", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "addon", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://runtipi.io/docs/introduction", - "website": "https://runtipi.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/runtipi.webp", - "config_path": "/opt/runtipi/state/settings.json", - "description": "Runtipi lets you install all your favorite self-hosted apps without the hassle of configuring and managing each service. One-click installs and updates for more than 180 popular apps.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/runtipi.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This is an addon script intended to be used on top of an existing Docker container.", - "type": "info" - }, - { - "text": "Execute within an existing LXC console (Debian / Ubuntu only)", - "type": "info" - }, - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - }, - { - "text": "To update via CLI, run the addon script again and select Update, or use: bash <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/addon/runtipi.sh)", - "type": "info" - } - ] -} diff --git a/frontend/public/json/rustdeskserver.json b/frontend/public/json/rustdeskserver.json deleted file mode 100644 index 7a68c2859..000000000 --- a/frontend/public/json/rustdeskserver.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "name": "RustDesk Server", - "slug": "rustdeskserver", - "categories": [ - 21 - ], - "date_created": "2025-02-13", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 21114, - "documentation": "https://rustdesk.com/docs/en/", - "website": "https://rustdesk.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/rustdesk.webp", - "config_path": "", - "description": "RustDesk is a full-featured open source remote control alternative for self-hosting and security with minimal configuration.", - "install_methods": [ - { - "type": "default", - "script": "ct/rustdeskserver.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-rustdeskserver.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Check our configuration guide for help: `https://github.com/community-scripts/ProxmoxVE/discussions/2388`", - "type": "info" - }, - { - "text": "To set admin password on Debian, type `cd /var/lib/rustdesk-api && rustdesk-api reset-admin-pwd ` inside LXC.", - "type": "info" - }, - { - "text": "To see admin password on Alpine, type `cat ~/rustdesk.creds` inside LXC.", - "type": "info" - }, - { - "text": "This script uses hbbs/hbbr builds from `lejianwen/rustdesk-server` (instead of the official repo) for full compatibility with the RustDesk API (SSO/OAuth). See `https://github.com/community-scripts/ProxmoxVE/issues/12079`.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/rustypaste.json b/frontend/public/json/rustypaste.json deleted file mode 100644 index ddc719a4c..000000000 --- a/frontend/public/json/rustypaste.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "RustyPaste", - "slug": "rustypaste", - "categories": [ - 11 - ], - "date_created": "2026-02-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://github.com/orhun/rustypaste", - "config_path": "/opt/rustypaste/config.toml", - "website": "https://github.com/orhun/rustypaste", - "logo": "https://github.com/orhun/rustypaste/raw/master/img/rustypaste_logo.png", - "description": "Rustypaste is a minimal file upload/pastebin service.", - "install_methods": [ - { - "type": "default", - "script": "ct/rustypaste.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 20, - "os": "Debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-rustypaste.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 4, - "os": "Alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "When updating the script it will backup the whole project including all the uploaded files, make sure to extract it to a safe location or remove", - "type": "info" - } - ] -} diff --git a/frontend/public/json/sabnzbd.json b/frontend/public/json/sabnzbd.json deleted file mode 100644 index 80003ea1c..000000000 --- a/frontend/public/json/sabnzbd.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "SABnzbd", - "slug": "sabnzbd", - "categories": [ - 11 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7777, - "documentation": "https://sabnzbd.org/wiki/", - "website": "https://sabnzbd.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sabnzbd.webp", - "config_path": "/.sabnzbd/sabnzbd.ini", - "description": "SABnzbd is a free, open-source software program for downloading binary files from Usenet newsgroups. It is designed to be easy to use, and provides a number of features to simplify the downloading process, such as automatic error detection and repair, download scheduling, and integration with other applications. SABnzbd is a binary newsreader, which means it is specifically designed for downloading binary files, such as images, music, and video, from Usenet newsgroups. With its user-friendly interface and powerful features, SABnzbd makes it easy to manage your Usenet downloads and keep your download queue organized.", - "install_methods": [ - { - "type": "default", - "script": "ct/sabnzbd.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/salt.json b/frontend/public/json/salt.json deleted file mode 100644 index 99c173af2..000000000 --- a/frontend/public/json/salt.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Salt", - "slug": "salt", - "categories": [ - 19 - ], - "date_created": "2025-07-22", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/opt/salt/.env", - "interface_port": null, - "documentation": "https://docs.saltproject.io/salt/install-guide/en/latest/", - "website": "https://saltproject.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/salt.webp", - "description": "SaltStack Salt is a software for automating the management and configuration of IT infrastructure and applications. It is an event-driven automation tool and framework used to deploy, configure, and manage complex IT systems. Its primary functions include configuration management, where it ensures consistent configurations and manages operating system deployment and software installation. It also automates and orchestrates routine IT processes and can create self-aware, self-healing systems.", - "install_methods": [ - { - "type": "default", - "script": "ct/salt.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 3, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/scaling-governor.json b/frontend/public/json/scaling-governor.json deleted file mode 100644 index db6d8ca1e..000000000 --- a/frontend/public/json/scaling-governor.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PVE CPU Scaling Governor", - "slug": "scaling-governor", - "categories": [ - 1 - ], - "date_created": "2024-04-29", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": "https://www.kernel.org/doc/html/latest/admin-guide/pm/cpufreq.html?#generic-scaling-governors", - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "The CPU scaling governor determines how the CPU frequency is adjusted based on the workload, with the goal of either conserving power or improving performance. By scaling the frequency up or down, the operating system can optimize the CPU usage and conserve energy when possible. Generic Scaling Governors", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/scaling-governor.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - } - ] -} diff --git a/frontend/public/json/scanopy.json b/frontend/public/json/scanopy.json deleted file mode 100644 index 0798067f3..000000000 --- a/frontend/public/json/scanopy.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Scanopy", - "slug": "scanopy", - "categories": [ - 9 - ], - "date_created": "2025-11-13", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 60072, - "documentation": "https://github.com/scanopy/scanopy", - "config_path": "/opt/scanopy/.env, OIDC: /opt/scanopy/oidc.toml", - "website": "https://scanopy.net", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/scanopy.webp", - "description": "Automatically discover and visually document network infrastructure", - "install_methods": [ - { - "type": "default", - "script": "ct/scanopy.sh", - "resources": { - "cpu": 2, - "ram": 3072, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To configure the integrated daemon after install is complete, use the `Create Daemon` menu in the UI and follow the instructions", - "type": "info" - }, - { - "text": "The integrated daemon config is located at `/root/.config/daemon/`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/scraparr.json b/frontend/public/json/scraparr.json deleted file mode 100644 index 259ce0e75..000000000 --- a/frontend/public/json/scraparr.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Scraparr", - "slug": "scraparr", - "categories": [ - 14 - ], - "date_created": "2025-09-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7100, - "documentation": "https://github.com/thecfu/scraparr/blob/main/README.md", - "website": "https://github.com/thecfu/scraparr", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/scraparr-dark.webp", - "config_path": "/scraparr/config/config.yaml", - "description": "Scraparr is a Prometheus exporter for the *arr suite (Sonarr, Radarr, Lidarr, etc.). It provides metrics that can be scraped by Prometheus to monitor and visualize the health and performance of your *arr applications.", - "install_methods": [ - { - "type": "default", - "script": "ct/scraparr.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Edit config file then restart the scraparr service: `systemctl restart scraparr`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/searxng.json b/frontend/public/json/searxng.json deleted file mode 100644 index edb1dd82a..000000000 --- a/frontend/public/json/searxng.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "SearXNG", - "slug": "searxng", - "categories": [ - 0 - ], - "date_created": "2025-08-26", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8888, - "documentation": "https://docs.searxng.org/", - "website": "https://github.com/searxng/searxng", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/searxng.webp", - "config_path": "/etc/searxng/settings.yml", - "description": "SearXNG is a free internet metasearch engine which aggregates results from up to 215 search services. Users are neither tracked nor profiled. Additionally, SearXNG can be used over Tor for online anonymity.", - "install_methods": [ - { - "type": "default", - "script": "ct/searxng.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 7, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/seaweedfs.json b/frontend/public/json/seaweedfs.json deleted file mode 100644 index 76ebc5ada..000000000 --- a/frontend/public/json/seaweedfs.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "SeaweedFS", - "slug": "seaweedfs", - "categories": [ - 11 - ], - "date_created": "2026-02-23", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9333, - "documentation": "https://github.com/seaweedfs/seaweedfs/wiki", - "website": "https://seaweedfs.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/seaweedfs.webp", - "config_path": "", - "description": "SeaweedFS is a fast distributed storage system for blobs, objects, files, and data lakes, with O(1) disk seek, S3 API, FUSE mount, WebDAV, and cloud tiering support.", - "install_methods": [ - { - "type": "default", - "script": "ct/seaweedfs.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 16, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Master UI available at port 9333, Filer UI at port 8888, S3 API at port 8333.", - "type": "info" - }, - { - "text": "Data is stored in /opt/seaweedfs-data.", - "type": "info" - }, - { - "text": "FUSE mounting requires fuse3 (pre-installed).", - "type": "info" - } - ] -} diff --git a/frontend/public/json/seelf.json b/frontend/public/json/seelf.json deleted file mode 100644 index fc6f27309..000000000 --- a/frontend/public/json/seelf.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "seelf", - "slug": "seelf", - "categories": [ - 4 - ], - "date_created": "2025-02-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://yuukanoo.github.io/seelf/guide/quickstart.html", - "website": "https://yuukanoo.github.io/seelf/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/seelf.webp", - "config_path": "", - "description": "seelf is a self-hosted software which makes it easy to deploy your own applications on your own hardware using an easy to use interface.", - "install_methods": [ - { - "type": "default", - "script": "ct/seelf.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Initial admin email and password: `cat ~/seelf.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/seerr.json b/frontend/public/json/seerr.json deleted file mode 100644 index 99aedcdfe..000000000 --- a/frontend/public/json/seerr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Seerr", - "slug": "seerr", - "categories": [ - 13 - ], - "date_created": "2026-02-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5055, - "documentation": "https://docs.seerr.dev/", - "website": "https://seerr.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/seerr.webp", - "config_path": "/etc/seerr/seerr.conf", - "description": "Open-source media request and discovery manager for Jellyfin, Plex, and Emby. Unified version of Overseerr and Jellyseerr.", - "install_methods": [ - { - "type": "default", - "script": "ct/seerr.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 12, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/semaphore.json b/frontend/public/json/semaphore.json deleted file mode 100644 index b070bce0e..000000000 --- a/frontend/public/json/semaphore.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Semaphore", - "slug": "semaphore", - "categories": [ - 19 - ], - "date_created": "2025-01-01", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.semaphoreui.com/", - "website": "https://semaphoreui.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/semaphore-ui.webp", - "config_path": "/opt/semaphore/config.json", - "description": "Semaphore UI is a modern web interface for managing popular DevOps tools", - "install_methods": [ - { - "type": "default", - "script": "ct/semaphore.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": "admin", - "password": null - }, - "notes": [ - { - "text": "This instance uses SQLite", - "type": "info" - }, - { - "text": "Admin password: `cat ~/semaphore.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/sftpgo.json b/frontend/public/json/sftpgo.json deleted file mode 100644 index 3026b61b4..000000000 --- a/frontend/public/json/sftpgo.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "SFTPGo", - "slug": "sftpgo", - "categories": [ - 11 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://docs.sftpgo.com/latest/", - "website": "https://github.com/drakkan/sftpgo", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sftpgo.webp", - "config_path": "/etc/sftpgo/sftpgo.json", - "description": "SFTPGo is a fully featured and highly configurable SFTP server with optional HTTP/S, FTP/S and WebDAV support. Several storage backends are supported: local filesystem, encrypted local filesystem, S3 (compatible) Object Storage, Google Cloud Storage, Azure Blob Storage, SFTP.", - "install_methods": [ - { - "type": "default", - "script": "ct/sftpgo.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/shelfmark.json b/frontend/public/json/shelfmark.json deleted file mode 100644 index b0e6de2b8..000000000 --- a/frontend/public/json/shelfmark.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Shelfmark", - "slug": "shelfmark", - "categories": [ - 13 - ], - "date_created": "2026-01-31", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8084, - "documentation": "https://github.com/calibrain/shelfmark/tree/main/docs", - "website": "https://github.com/calibrain/shelfmark", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/shelfmark.webp", - "config_path": "/etc/shelfmark", - "description": "Shelfmark is a unified web interface for searching and aggregating books and audiobook downloads from multiple sources - all in one place.", - "install_methods": [ - { - "type": "default", - "script": "ct/shelfmark.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The configuration at `/etc/shelfmark/.env` is for bootstrapping the initial install. Customize the configuration via the Shelfmark UI.", - "type": "info" - }, - { - "text": "This version of the application does not support routing through Tor (the `USING_TOR` env var).", - "type": "info" - } - ] -} diff --git a/frontend/public/json/shinobi.json b/frontend/public/json/shinobi.json deleted file mode 100644 index 2c4fd4d0f..000000000 --- a/frontend/public/json/shinobi.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Shinobi NVR", - "slug": "shinobi", - "categories": [ - 15 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://docs.shinobi.video/", - "website": "https://shinobi.video/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/shinobi.webp", - "config_path": "/opt/Shinobi/conf.json", - "description": "Shinobi is an open-source, self-hosted network video recording (NVR) software. It allows you to manage and monitor security cameras and record video footage. Shinobi can be run on various platforms including Linux, macOS, and Raspberry Pi, and offers features such as real-time streaming, motion detection, and email notifications.", - "install_methods": [ - { - "type": "default", - "script": "ct/shinobi.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": "admin@shinobi.video", - "password": "admin" - }, - "notes": [] -} diff --git a/frontend/public/json/signoz.json b/frontend/public/json/signoz.json deleted file mode 100644 index be4fdbe16..000000000 --- a/frontend/public/json/signoz.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "SigNoz", - "slug": "signoz", - "categories": [ - 9 - ], - "date_created": "2025-09-15", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://signoz.io/docs/introduction/", - "config_path": "/opt/signoz/conf/systemd.env", - "website": "https://signoz.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/signoz.webp", - "description": "SigNoz is an open-source Datadog or New Relic alternative. Get APM, logs, traces, metrics, exceptions, & alerts in a single tool.", - "install_methods": [ - { - "type": "default", - "script": "ct/signoz.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 20, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The first user you register will be the admin user.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/silverbullet.json b/frontend/public/json/silverbullet.json deleted file mode 100644 index e9089e395..000000000 --- a/frontend/public/json/silverbullet.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Silverbullet", - "slug": "silverbullet", - "categories": [ - 12 - ], - "date_created": "2024-12-03", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://silverbullet.md/Manual", - "website": "https://silverbullet.md", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/silverbullet.webp", - "config_path": "", - "description": "SilverBullet is a note-taking application optimized for people with a hacker mindset.", - "install_methods": [ - { - "type": "default", - "script": "ct/silverbullet.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/slskd.json b/frontend/public/json/slskd.json deleted file mode 100644 index 20900c1f9..000000000 --- a/frontend/public/json/slskd.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Slskd", - "slug": "slskd", - "categories": [ - 11 - ], - "date_created": "2025-03-31", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5030, - "documentation": "https://github.com/slskd/slskd/tree/master/docs", - "website": "https://github.com/slskd/slskd", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/slskd.webp", - "config_path": "/opt/slskd/config/slskd.yml", - "description": "A modern client-server application for the Soulseek file sharing network. ", - "install_methods": [ - { - "type": "default", - "script": "ct/slskd.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "slskd", - "password": "slskd" - }, - "notes": [ - { - "text": "See /opt/slskd/config/slskd.yml to add your Soulseek credentials", - "type": "info" - } - ] -} diff --git a/frontend/public/json/smokeping.json b/frontend/public/json/smokeping.json deleted file mode 100644 index 72fd9e84a..000000000 --- a/frontend/public/json/smokeping.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "SmokePing", - "slug": "smokeping", - "categories": [ - 4 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://oss.oetiker.ch/smokeping/doc/index.en.html", - "website": "https://oss.oetiker.ch/smokeping/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/smokeping.webp", - "config_path": "/etc/smokeping/config.d/Targets", - "description": "SmokePing is a deluxe latency measurement tool. It can measure, store and display latency, latency distribution and packet loss.", - "install_methods": [ - { - "type": "default", - "script": "ct/smokeping.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/snipeit.json b/frontend/public/json/snipeit.json deleted file mode 100644 index e623129f0..000000000 --- a/frontend/public/json/snipeit.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "SnipeIT", - "slug": "snipeit", - "categories": [ - 25 - ], - "date_created": "2024-12-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://snipe-it.readme.io/docs/overview", - "website": "https://snipeitapp.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/snipe-it.webp", - "config_path": "/opt/snipe-it/.env", - "description": "This is a FOSS project for asset management in IT Operations. Knowing who has which laptop, when it was purchased in order to depreciate it correctly, handling software licenses, etc.", - "install_methods": [ - { - "type": "default", - "script": "ct/snipeit.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Post Install: `https://github.com/community-scripts/ProxmoxVE/discussions/671`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/snowshare.json b/frontend/public/json/snowshare.json deleted file mode 100644 index dd6dc0de4..000000000 --- a/frontend/public/json/snowshare.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "SnowShare", - "slug": "snowshare", - "categories": [ - 11 - ], - "date_created": "2025-12-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/TuroYT/snowshare", - "config_path": "/opt/snowshare.env", - "website": "https://github.com/TuroYT/snowshare", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/png/snowshare.png", - "description": "A modern, secure file and link sharing platform built with Next.js, Prisma, and NextAuth. Share URLs, code snippets, and files with customizable expiration, privacy, and QR codes.", - "install_methods": [ - { - "type": "default", - "script": "ct/snowshare.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 20, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/sonarqube.json b/frontend/public/json/sonarqube.json deleted file mode 100644 index 61a6aa9c0..000000000 --- a/frontend/public/json/sonarqube.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "name": "sonarqube", - "slug": "sonarqube", - "categories": [ - 20, - 19 - ], - "date_created": "2025-10-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9000, - "documentation": "https://docs.sonarsource.com/sonarqube-server", - "config_path": "/opt/sonarqube/conf/sonar.properties", - "website": "https://www.sonarsource.com/products/sonarqube/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sonarqube.webp", - "description": "SonarQube Server automates code quality and security reviews and provides actionable code intelligence so developers can focus on building better, faster.", - "install_methods": [ - { - "type": "default", - "script": "ct/sonarqube.sh", - "resources": { - "cpu": 4, - "ram": 6144, - "hdd": 25, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [] -} diff --git a/frontend/public/json/sonarr.json b/frontend/public/json/sonarr.json deleted file mode 100644 index 009014508..000000000 --- a/frontend/public/json/sonarr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Sonarr", - "slug": "sonarr", - "categories": [ - 14 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8989, - "documentation": "https://wiki.servarr.com/sonarr", - "website": "https://sonarr.tv/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sonarr.webp", - "config_path": "/var/lib/sonarr/config.xml", - "description": "Sonarr is a personal video recorder (PVR) software designed for Usenet and BitTorrent users. It allows users to manage and organize their TV show collection with ease. Sonarr integrates with popular Usenet and BitTorrent clients, such as NZBget and Transmission, to automate the downloading and organizing of TV show files. The software provides a web-based interface for managing and organizing TV shows, making it easy to search and find titles, seasons, and episodes. Sonarr also supports metadata management, including TV show posters and information, making it easy for users to keep their TV show collection organized and up-to-date. The software is designed to be easy to use and provides a simple and intuitive interface for managing and organizing TV show collections, making it a valuable tool for TV show enthusiasts who want to keep their collection organized and up-to-date. With Sonarr, users can enjoy their TV show collection from anywhere, making it a powerful tool for managing and sharing TV show files.", - "install_methods": [ - { - "type": "default", - "script": "ct/sonarr.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/sonobarr.json b/frontend/public/json/sonobarr.json deleted file mode 100644 index 885a4a1ed..000000000 --- a/frontend/public/json/sonobarr.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Sonobarr", - "slug": "sonobarr", - "categories": [ - 14 - ], - "date_created": "2026-02-23", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5000, - "documentation": "https://github.com/Dodelidoo-Labs/sonobarr", - "config_path": "/etc/sonobarr/.env", - "website": "https://github.com/Dodelidoo-Labs/sonobarr", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sonobarr.webp", - "description": "Sonobarr marries your existing Lidarr library with Last.fm’s discovery graph to surface artists you'll actually like. It runs as a Flask + Socket.IO application, ships with a polished Bootstrap UI, and includes admin tooling so folks can share a single instance safely.", - "install_methods": [ - { - "type": "default", - "script": "ct/sonobarr.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 20, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Default generated admin password is in the env file (sonobarr_superadmin_password)", - "type": "info" - } - ] -} diff --git a/frontend/public/json/sparkyfitness.json b/frontend/public/json/sparkyfitness.json deleted file mode 100644 index fcf301b1f..000000000 --- a/frontend/public/json/sparkyfitness.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "SparkyFitness", - "slug": "sparkyfitness", - "categories": [ - 9 - ], - "date_created": "2026-02-23", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://codewithcj.github.io/SparkyFitness", - "website": "https://github.com/CodeWithCJ/SparkyFitness", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sparkyfitness.webp", - "config_path": "/etc/sparkyfitness/.env", - "description": "A self-hosted, privacy-first alternative to MyFitnessPal. Track nutrition, exercise, body metrics, and health data while keeping full control of your data.", - "install_methods": [ - { - "type": "default", - "script": "ct/sparkyfitness.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/speedtest-tracker.json b/frontend/public/json/speedtest-tracker.json deleted file mode 100644 index 43b30cc13..000000000 --- a/frontend/public/json/speedtest-tracker.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Speedtest-Tracker", - "slug": "speedtest-tracker", - "categories": [ - 4 - ], - "date_created": "2025-12-11", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.speedtest-tracker.dev/", - "website": "https://github.com/alexjustesen/speedtest-tracker", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/speedtest-tracker.webp", - "config_path": "/opt/speedtest-tracker/.env", - "description": "Speedtest Tracker is a self-hosted application that runs scheduled speed tests using the Ookla Speedtest CLI and saves the results to a database for historical tracking and visualization.", - "install_methods": [ - { - "type": "default", - "script": "ct/speedtest-tracker.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin@example.com", - "password": "password" - }, - "notes": [] -} diff --git a/frontend/public/json/splunk-enterprise.json b/frontend/public/json/splunk-enterprise.json deleted file mode 100644 index 304ff8377..000000000 --- a/frontend/public/json/splunk-enterprise.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Splunk Enterprise", - "slug": "splunk-enterprise", - "categories": [ - 9 - ], - "date_created": "2025-11-12", - "type": "ct", - "updateable": false, - "privileged": false, - "interface_port": 8000, - "documentation": "https://help.splunk.com", - "config_path": "", - "website": "https://www.splunk.com/en_us/download/splunk-enterprise.html", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/splunk-light.webp", - "description": "Platform for searching, monitoring, and analyzing machine-generated data at scale for operational intelligence and security.", - "install_methods": [ - { - "type": "default", - "script": "ct/splunk-enterprise.sh", - "resources": { - "cpu": 4, - "ram": 8192, - "hdd": 40, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The credentials to login can be found in splunk.creds.", - "type": "info" - }, - { - "text": "Trial license allows indexing 500 MB/Day. After 60 days you can convert to a perpetual free license or purchase a Splunk Enterprise license to continue using the expanded functionality designed for enterprise-scale deployments.", - "type": "warning" - }, - { - "text": "About Splunk Free License: https://help.splunk.com/en/splunk-enterprise/administer/admin-manual/10.0/configure-splunk-licenses/about-splunk-free", - "type": "info" - } - ] -} diff --git a/frontend/public/json/spoolman.json b/frontend/public/json/spoolman.json deleted file mode 100644 index 42365d764..000000000 --- a/frontend/public/json/spoolman.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Spoolman", - "slug": "spoolman", - "categories": [ - 24 - ], - "date_created": "2024-06-13", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7912, - "documentation": "https://github.com/Donkie/Spoolman/wiki/Installation", - "website": "https://github.com/Donkie/Spoolman", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/spoolman.webp", - "config_path": "/opt/spoolman/.env", - "description": "Spoolman is a self-hosted web service designed to help you efficiently manage your 3D printer filament spools and monitor their usage.", - "install_methods": [ - { - "type": "default", - "script": "ct/spoolman.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/sportarr.json b/frontend/public/json/sportarr.json deleted file mode 100644 index beea70b6d..000000000 --- a/frontend/public/json/sportarr.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Sportarr", - "slug": "sportarr", - "categories": [ - 14 - ], - "date_created": "2026-01-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 1867, - "documentation": "https://sportarr.net/docs", - "config_path": "/opt/sportarr/.env, /opt/sportarr-data/config/config.xml", - "website": "https://sportarr.net/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sportarr.webp", - "description": "Sportarr is an automated media management application for all sports. It works similar to Sonarr and Radarr but specifically designed for combat sports, basketball, football, hockey, motorsports, and hundreds of other sports worldwide.", - "install_methods": [ - { - "type": "default", - "script": "ct/sportarr.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The resources assigned to LXC are considered baseline. Please adjust to match your workload.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/sqlserver2022.json b/frontend/public/json/sqlserver2022.json deleted file mode 100644 index 3c672e1a7..000000000 --- a/frontend/public/json/sqlserver2022.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "SQL Server 2022", - "slug": "sqlserver2022", - "categories": [ - 8 - ], - "date_created": "2025-01-14", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 1433, - "documentation": "https://learn.microsoft.com/en-us/sql/sql-server/?view=sql-server-ver16", - "website": "https://www.microsoft.com/en-us/sql-server/sql-server-2022", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/microsoft-sql-server.webp", - "config_path": "", - "description": "Script to automatically set up a SQL Server 2022 installation.", - "install_methods": [ - { - "type": "default", - "script": "ct/sqlserver2022.sh", - "resources": { - "cpu": 1, - "ram": 2048, - "hdd": 10, - "os": "Ubuntu", - "version": "22.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.", - "type": "info" - }, - { - "text": "You can setup the admin account 'SA' during installation", - "type": "info" - }, - { - "text": "Make sure you disable the SA account if you intend to use this in production!", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/sqlserver2025.json b/frontend/public/json/sqlserver2025.json deleted file mode 100644 index 66d49142a..000000000 --- a/frontend/public/json/sqlserver2025.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "name": "SQL Server 2025", - "slug": "sqlserver2025", - "categories": [ - 8 - ], - "date_created": "2026-02-05", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 1433, - "documentation": "https://learn.microsoft.com/en-us/sql/sql-server/?view=sql-server-ver17", - "website": "https://www.microsoft.com/en-us/sql-server/sql-server-2025", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/microsoft-sql-server.webp", - "config_path": "", - "description": "Script to automatically set up a SQL Server 2025 installation with Ubuntu 24.04 support.", - "install_methods": [ - { - "type": "default", - "script": "ct/sqlserver2025.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "Ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "SQL Server (2025) SQLPAL is incompatible with Proxmox VE 9 (Kernel 6.12+) in LXC containers. Use a VM instead or the SQL-Server 2022 LXC.", - "type": "warning" - }, - { - "text": "If you choose not to run the installation setup, execute: `/opt/mssql/bin/mssql-conf setup` in LXC shell.", - "type": "info" - }, - { - "text": "You can setup the admin account 'SA' during installation", - "type": "info" - }, - { - "text": "Make sure you disable the SA account if you intend to use this in production!", - "type": "warning" - }, - { - "text": "Ubuntu 24.04 support requires SQL Server 2025 CU1 or later", - "type": "info" - } - ] -} diff --git a/frontend/public/json/stirling-pdf.json b/frontend/public/json/stirling-pdf.json deleted file mode 100644 index 99e83b9d6..000000000 --- a/frontend/public/json/stirling-pdf.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Stirling-PDF", - "slug": "stirling-pdf", - "categories": [ - 12 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": null, - "website": "https://docs.stirlingpdf.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/stirling-pdf.webp", - "config_path": "/opt/Stirling-PDF/.env", - "description": "Stirling-PDF is a powerful locally hosted web based PDF manipulation tool that allows you to perform various operations on PDF files, such as splitting merging, converting, reorganizing, adding images, rotating, compressing, and more.", - "install_methods": [ - { - "type": "default", - "script": "ct/stirling-pdf.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "stirling" - }, - "notes": [] -} diff --git a/frontend/public/json/strapi.json b/frontend/public/json/strapi.json deleted file mode 100644 index bd91adeae..000000000 --- a/frontend/public/json/strapi.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "Strapi", - "slug": "strapi", - "categories": [ - 12 - ], - "date_created": "2026-02-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 1337, - "documentation": "https://docs.strapi.io/", - "website": "https://strapi.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/strapi.webp", - "config_path": "/opt/strapi/.env", - "description": "Strapi is a leading open-source headless CMS that enables developers to build powerful APIs quickly. It features a flexible content structure with customizable content types, supporting both REST and GraphQL APIs. The intuitive admin panel allows non-technical users to manage content easily, while developers can extend functionality through plugins. Built on Node.js, Strapi offers role-based access control, media library management, and internationalization support out of the box.", - "install_methods": [ - { - "type": "default", - "script": "ct/strapi.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "First-time setup requires creating an admin account at http://IP:1337/admin", - "type": "info" - }, - { - "text": "Default installation uses SQLite. For production use, consider configuring PostgreSQL or MySQL.", - "type": "info" - }, - { - "text": "Building the admin panel requires 4GB RAM. Container may take 10-15 minutes to fully initialize.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/streamlink-webui.json b/frontend/public/json/streamlink-webui.json deleted file mode 100644 index 7f574bfe7..000000000 --- a/frontend/public/json/streamlink-webui.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Streamlink WebUI", - "slug": "streamlink-webui", - "categories": [ - 11 - ], - "date_created": "2025-05-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://github.com/CrazyWolf13/streamlink-webui", - "config_path": "/opt/streamlink-webui.env", - "website": "https://github.com/CrazyWolf13/streamlink-webui", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/streamlink.webp", - "description": "a simple web-ui to the well-known streamlink cli application, which allows you to save twitch streams to your local disk.", - "install_methods": [ - { - "type": "default", - "script": "ct/streamlink-webui.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "null", - "password": "null" - }, - "notes": [ - { - "text": "This app requires a Twitch cliend_ID and client_secret, set it in the config file. Look in the application documentation on how to obtain it.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/stylus.json b/frontend/public/json/stylus.json deleted file mode 100644 index 564d22ca2..000000000 --- a/frontend/public/json/stylus.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Stylus", - "slug": "stylus", - "categories": [ - 4 - ], - "date_created": "2025-09-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://mmastrac.github.io/stylus/", - "website": "https://github.com/mmastrac/stylus", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/stylus.webp", - "config_path": "/opt/stylus/config.yaml", - "description": "Stylus (style + status) is a lightweight status page for infrastructure and networks. Configure a set of bash scripts that test the various parts of your infrastructure, set up visualizations with minimal configuration, and Stylus will generate you a dashboard for your system.", - "install_methods": [ - { - "type": "default", - "script": "ct/stylus.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/sure.json b/frontend/public/json/sure.json deleted file mode 100644 index 1221817ca..000000000 --- a/frontend/public/json/sure.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Sure", - "slug": "sure", - "categories": [ - 23 - ], - "date_created": "2026-02-20", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/we-promise/sure", - "website": "https://sure.am", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/sure-finance.webp", - "config_path": "/etc/sure/.env", - "description": "The personal finance app for everyone. NOT affiliated with or endorsed by Maybe Finance Inc..", - "install_methods": [ - { - "type": "default", - "script": "ct/sure.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/swizzin.json b/frontend/public/json/swizzin.json deleted file mode 100644 index 9b1586913..000000000 --- a/frontend/public/json/swizzin.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "Swizzin", - "slug": "swizzin", - "categories": [ - 13 - ], - "date_created": "2025-08-19", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://swizzin.ltd/getting-started", - "config_path": "/etc/swizzin/", - "website": "https://swizzin.ltd/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/swizzin.webp", - "description": "Swizzin is a light-weight, modular, and user-friendly seedbox solution for Debian-based servers. It allows for the easy installation and management of a wide variety of applications commonly used for torrenting and media management, such as rTorrent, Sonarr, Radarr, and Plex, all accessible through a command-line utility or a web-based dashboard.", - "install_methods": [ - { - "type": "default", - "script": "ct/swizzin.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 20, - "os": "Debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Installation might take a long time if choosing to install many apps. Be patient.", - "type": "info" - }, - { - "text": "Swizzin is a management suite, not a single application. Use the 'box' command inside the container to install/manage individual apps like rTorrent, Sonarr, etc. A full list can be found in documentation.", - "type": "info" - }, - { - "text": "It is very recommended to install at least the 'panel' for web access, and 'nginx' for easy access to other apps.", - "type": "warning" - }, - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/syncthing.json b/frontend/public/json/syncthing.json deleted file mode 100644 index 2a007378b..000000000 --- a/frontend/public/json/syncthing.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "Syncthing", - "slug": "syncthing", - "categories": [ - 12 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8384, - "documentation": "https://docs.syncthing.net/", - "website": "https://syncthing.net/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/syncthing.webp", - "config_path": "/root/.local/state/syncthing/config.xml - Alpine: /var/lib/syncthing/.local/state/syncthing/config.xml", - "description": "Syncthing is an open-source file syncing tool that allows users to keep their files in sync across multiple devices by using peer-to-peer synchronization. It doesn't rely on any central server, so all data transfers are directly between devices.", - "install_methods": [ - { - "type": "default", - "script": "ct/syncthing.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-syncthing.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/tandoor.json b/frontend/public/json/tandoor.json deleted file mode 100644 index 16609a34c..000000000 --- a/frontend/public/json/tandoor.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Tandoor Recipes", - "slug": "tandoor", - "categories": [ - 24 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8002, - "documentation": "https://docs.tandoor.dev/", - "website": "https://tandoor.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tandoor-recipes.webp", - "config_path": "/opt/tandoor/.env", - "description": "Tandoor Recipes is an application for managing recipes, planning meals, building shopping lists and much much more!", - "install_methods": [ - { - "type": "default", - "script": "ct/tandoor.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/tasmoadmin.json b/frontend/public/json/tasmoadmin.json deleted file mode 100644 index 8623e47dd..000000000 --- a/frontend/public/json/tasmoadmin.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "TasmoAdmin", - "slug": "tasmoadmin", - "categories": [ - 16 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9999, - "documentation": null, - "website": "https://github.com/TasmoAdmin/TasmoAdmin#readme", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tasmoadmin.webp", - "config_path": "", - "description": "TasmoAdmin is an administrative platform for devices flashed with Tasmota.", - "install_methods": [ - { - "type": "default", - "script": "ct/tasmoadmin.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/tasmocompiler.json b/frontend/public/json/tasmocompiler.json deleted file mode 100644 index 70f4e6ba1..000000000 --- a/frontend/public/json/tasmocompiler.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "TasmoCompiler", - "slug": "tasmocompiler", - "categories": [ - 16 - ], - "date_created": "2025-02-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/benzino77/tasmocompiler/blob/master/README.md", - "website": "https://github.com/benzino77/tasmocompiler", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tasmocompiler.webp", - "config_path": "", - "description": "TasmoCompiler is a simple web GUI which allows you to compile fantastic Tasmota firmware with your own settings.", - "install_methods": [ - { - "type": "default", - "script": "ct/tasmocompiler.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/tautulli.json b/frontend/public/json/tautulli.json deleted file mode 100644 index e603bc2c6..000000000 --- a/frontend/public/json/tautulli.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Tautulli", - "slug": "tautulli", - "categories": [ - 13 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8181, - "documentation": "https://github.com/Tautulli/Tautulli/wiki", - "website": "https://tautulli.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tautulli.webp", - "config_path": "/opt/Tautulli/config.ini", - "description": "Tautulli allows you to monitor and track your Plex Media Server usage, such as viewing statistics and analysis of your media library. It can be used to monitor user activity, get notifications about new media added to your library, and even generate reports on your media usage.", - "install_methods": [ - { - "type": "default", - "script": "ct/tautulli.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/tdarr.json b/frontend/public/json/tdarr.json deleted file mode 100644 index 54b668e3a..000000000 --- a/frontend/public/json/tdarr.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Tdarr", - "slug": "tdarr", - "categories": [ - 14 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8265, - "documentation": "https://docs.tdarr.io/docs/welcome/what", - "website": "https://tdarr.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tdarr.webp", - "config_path": "", - "description": "Tdarr is a media transcoding application designed to automate the transcode and remux management of a media library. It uses conditional-based processing to determine the required encoding and remux operations for each file in the library. The software integrates with popular media management tools, such as Sonarr and Radarr, to ensure that newly added media files are automatically processed and optimized for the user's desired playback device. Tdarr provides a web-based interface for monitoring and managing the transcoding process, and also supports real-time logging and reporting. The software is designed to be flexible and configurable, with a wide range of encoding and remux options available to users. Tdarr is an ideal solution for media enthusiasts who want to optimize their library for seamless playback on a variety of devices, while also streamlining the management and maintenance of their media library.", - "install_methods": [ - { - "type": "default", - "script": "ct/tdarr.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "With Privileged/Unprivileged Hardware Acceleration Support", - "type": "info" - } - ] -} diff --git a/frontend/public/json/teamspeak-server.json b/frontend/public/json/teamspeak-server.json deleted file mode 100644 index 10c8c2859..000000000 --- a/frontend/public/json/teamspeak-server.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "Teamspeak-Server", - "slug": "teamspeak-server", - "categories": [ - 24 - ], - "date_created": "2025-07-21", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9987, - "documentation": "https://support.teamspeak.com/hc/en-us/categories/360000302017-TeamSpeak-3", - "website": "https://teamspeak.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/teamspeak-light.webp", - "config_path": "", - "description": "TeamSpeak is a voice over IP (VoIP) application, primarily used by gamers and teams to chat in real time on dedicated servers. It delivers crystal‑clear, low‑latency voice communication.", - "install_methods": [ - { - "type": "default", - "script": "ct/teamspeak-server.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-teamspeak-server.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 2, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Use `journalctl -u teamspeak-server.service` inside Debian LXC console to check for admin credentials!", - "type": "info" - }, - { - "text": "Use `cat /var/log/teamspeak.err.log` inside Alpine LXC console to check for admin credentials!", - "type": "info" - } - ] -} diff --git a/frontend/public/json/technitiumdns.json b/frontend/public/json/technitiumdns.json deleted file mode 100644 index 59395467e..000000000 --- a/frontend/public/json/technitiumdns.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Technitium DNS", - "slug": "technitiumdns", - "categories": [ - 5 - ], - "date_created": "2024-04-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5380, - "documentation": "https://blog.technitium.com/2017/11/running-dns-server-on-ubuntu-linux.html", - "website": "https://technitium.com/dns/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/technitium.webp", - "config_path": "", - "description": "Technitium DNS Server is a free, open-source and privacy-focused DNS (Domain Name System) server software for Windows, Linux, and macOS. It is designed to provide a secure, fast, and reliable DNS resolution service to its users. The server can be configured through a web-based interface, and it supports a variety of advanced features, such as automatic IP updates, IPv6 support, caching of DNS queries, and the ability to block unwanted domains. It is also designed to be highly secure, with built-in measures to prevent common types of DNS attacks and data leaks. Technitium DNS Server is aimed at providing an alternative to traditional DNS servers, which often have privacy and security concerns associated with them, and it is ideal for users who are looking for a more secure and private DNS resolution service.", - "install_methods": [ - { - "type": "default", - "script": "ct/technitiumdns.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/teddycloud.json b/frontend/public/json/teddycloud.json deleted file mode 100644 index 8971935fb..000000000 --- a/frontend/public/json/teddycloud.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "TeddyCloud", - "slug": "teddycloud", - "categories": [ - 13 - ], - "date_created": "2024-12-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://tonies-wiki.revvox.de/docs/tools/teddycloud/", - "website": "https://tonies-wiki.revvox.de/docs/tools/teddycloud/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/teddycloud.webp", - "config_path": "", - "description": "TeddyCloud is an open source server replacement for the Boxine Cloud.", - "install_methods": [ - { - "type": "default", - "script": "ct/teddycloud.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "First start of the service can take some time due to certificate generation.", - "type": "warn" - } - ] -} diff --git a/frontend/public/json/telegraf.json b/frontend/public/json/telegraf.json deleted file mode 100644 index f37d6ecad..000000000 --- a/frontend/public/json/telegraf.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Telegraf", - "slug": "telegraf", - "categories": [ - 9 - ], - "date_created": "2025-09-11", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://docs.influxdata.com/telegraf/v1/", - "config_path": "/etc/telegraf/telegraf.conf", - "website": "https://github.com/influxdata/telegraf", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/telegraf.webp", - "description": "Telegraf collects and sends time series data from databases, systems, and IoT sensors. It has no external dependencies, is easy to install, and requires minimal memory.", - "install_methods": [ - { - "type": "default", - "script": "ct/telegraf.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Make sure to configure an output for the telegraf config and start the service with `systemctl start telegraf`.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/termix.json b/frontend/public/json/termix.json deleted file mode 100644 index b982eca3a..000000000 --- a/frontend/public/json/termix.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Termix", - "slug": "termix", - "categories": [ - 6 - ], - "date_created": "2026-01-18", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://docs.termix.site/", - "website": "https://termix.site/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/termix.webp", - "config_path": "", - "description": "Termix is an open-source, self-hosted server management platform with SSH terminal access, SSH tunneling, remote file management, Docker management, and multi-platform support.", - "install_methods": [ - { - "type": "default", - "script": "ct/termix.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 10, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/the-lounge.json b/frontend/public/json/the-lounge.json deleted file mode 100644 index 846a2a7ef..000000000 --- a/frontend/public/json/the-lounge.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "The Lounge", - "slug": "the-lounge", - "categories": [ - 22 - ], - "date_created": "2024-11-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9000, - "documentation": "https://thelounge.chat/docs", - "website": "https://thelounge.chat/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/the-lounge.webp", - "config_path": "/etc/thelounge/config.js", - "description": "Modern web IRC client designed for self-hosting ", - "install_methods": [ - { - "type": "default", - "script": "ct/the-lounge.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The Lounge is running in private mode. Use `runuser -u thelounge -- thelounge add usernamehere` to create users.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/thingsboard.json b/frontend/public/json/thingsboard.json deleted file mode 100644 index 238d8c791..000000000 --- a/frontend/public/json/thingsboard.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "ThingsBoard", - "slug": "thingsboard", - "categories": [ - 7 - ], - "date_created": "2026-01-18", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://thingsboard.io/docs/", - "website": "https://thingsboard.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/thingsboard.webp", - "config_path": "/etc/thingsboard/conf/thingsboard.conf", - "description": "ThingsBoard is an open-source IoT platform for data collection, processing, visualization, and device management. It enables device connectivity via industry standard IoT protocols - MQTT, CoAP and HTTP and supports both cloud and on-premises deployments.", - "install_methods": [ - { - "type": "default", - "script": "ct/thingsboard.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 10, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "sysadmin@thingsboard.org", - "password": "sysadmin" - }, - "notes": [ - { - "text": "Please allow up to 90 seconds for the Web UI to start", - "type": "info" - }, - { - "text": "Additional demo accounts: tenant@thingsboard.org / tenant and customer@thingsboard.org / customer", - "type": "info" - }, - { - "text": "Change passwords for all accounts in the account profile page after first login", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/threadfin.json b/frontend/public/json/threadfin.json deleted file mode 100644 index c168d6a36..000000000 --- a/frontend/public/json/threadfin.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Threadfin", - "slug": "threadfin", - "categories": [ - 13 - ], - "date_created": "2024-06-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 34400, - "documentation": "https://github.com/xteve-project/xTeVe-Documentation/blob/master/en/configuration.md", - "website": "https://github.com/Threadfin/Threadfin", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/threadfin.webp", - "config_path": "", - "description": "Threadfin is a M3U proxy for Kernel, Plex, Jellyfin, or Emby, based on xTeVe.", - "install_methods": [ - { - "type": "default", - "script": "ct/threadfin.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/tianji.json b/frontend/public/json/tianji.json deleted file mode 100644 index 3fcca7705..000000000 --- a/frontend/public/json/tianji.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Tianji", - "slug": "tianji", - "categories": [ - 9 - ], - "date_created": "2024-09-14", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 12345, - "documentation": "https://tianji.dev/docs/intro", - "website": "https://tianji.msgbyte.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tianji.webp", - "config_path": "/opt/tianji/src/server/.env", - "description": "Tianji is an open-source tool for website analytics, uptime monitoring, and server status tracking, all in one. It\u2019s lightweight, privacy-focused, and helps teams monitor web traffic, server health, and gather user interaction data", - "install_methods": [ - { - "type": "default", - "script": "ct/tianji.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 12, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [] -} diff --git a/frontend/public/json/tinyauth.json b/frontend/public/json/tinyauth.json deleted file mode 100644 index 21b909a22..000000000 --- a/frontend/public/json/tinyauth.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "Tinyauth", - "slug": "tinyauth", - "categories": [ - 6 - ], - "date_created": "2026-03-03", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://tinyauth.app", - "config_path": "/opt/tinyauth/.env", - "website": "https://tinyauth.app", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tinyauth.webp", - "description": "Tinyauth is a simple authentication middleware that adds simple username/password login or OAuth with Google, Github and any generic provider to all of your docker apps.", - "install_methods": [ - { - "type": "default", - "script": "ct/tinyauth.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-tinyauth.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 2, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The default credentials are located in `/opt/tinyauth/credentials.txt`.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/traccar.json b/frontend/public/json/traccar.json deleted file mode 100644 index bd817c2fb..000000000 --- a/frontend/public/json/traccar.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Traccar", - "slug": "traccar", - "categories": [ - 0 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8082, - "documentation": "https://www.traccar.org/documentation/", - "website": "https://www.traccar.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/traccar.webp", - "config_path": "/opt/traccar/conf/traccar.xml", - "description": "Traccar is an open source GPS tracking system. It supports more than 200 GPS protocols and more than 2000 models of GPS tracking devices.", - "install_methods": [ - { - "type": "default", - "script": "ct/traccar.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/tracearr.json b/frontend/public/json/tracearr.json deleted file mode 100644 index 1f65ae049..000000000 --- a/frontend/public/json/tracearr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Tracearr", - "slug": "tracearr", - "categories": [ - 14 - ], - "date_created": "2026-01-23", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/connorgallopo/Tracearr#readme", - "config_path": "", - "website": "https://github.com/connorgallopo/Tracearr", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tracearr.webp", - "description": "Tracearr is a streaming access manager for Plex, Jellyfin and Emby servers. It answers the question every server owner eventually asks: \"Who's actually using my server, and are they sharing their login?\"", - "install_methods": [ - { - "type": "default", - "script": "ct/tracearr.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 10, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/tracktor.json b/frontend/public/json/tracktor.json deleted file mode 100644 index 5e337c388..000000000 --- a/frontend/public/json/tracktor.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Tracktor", - "slug": "tracktor", - "categories": [ - 9 - ], - "date_created": "2025-08-26", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/javedh-dev/tracktor/tree/main/docs", - "config_path": "/opt/tracktor.env", - "website": "https://github.com/javedh-dev/tracktor", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tracktor.webp", - "description": "Tracktor is an open-source web application for comprehensive vehicle management.\nEasily track fuel consumption, maintenance, insurance, and regulatory documents for all your vehicles in one place.", - "install_methods": [ - { - "type": "default", - "script": "ct/tracktor.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": "123456" - }, - "notes": [ - { - "text": "Please check and update the '/opt/tracktor.env' file if using behind reverse proxy.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/traefik.json b/frontend/public/json/traefik.json deleted file mode 100644 index 61d470f66..000000000 --- a/frontend/public/json/traefik.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "name": "Traefik", - "slug": "traefik", - "categories": [ - 21 - ], - "date_created": "2024-05-20", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://doc.traefik.io/", - "website": "https://traefik.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/traefik.webp", - "config_path": "/etc/traefik/traefik.yaml", - "description": "Traefik (pronounced traffic) is an open-source edge router and reverse proxy that simplifies managing microservices. It automatically discovers services, dynamically updates routing rules without downtime, provides load balancing, handles SSL termination, and supports various middleware for added functionality. Ideal for cloud-native environments, it integrates seamlessly with platforms like Docker and Kubernetes.", - "install_methods": [ - { - "type": "default", - "script": "ct/traefik.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-traefik.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/transmission.json b/frontend/public/json/transmission.json deleted file mode 100644 index f7ba23f00..000000000 --- a/frontend/public/json/transmission.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "Transmission", - "slug": "transmission", - "categories": [ - 11 - ], - "date_created": "2025-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9091, - "documentation": "https://github.com/transmission/transmission/blob/main/docs/README.md", - "website": "https://transmissionbt.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/transmission.webp", - "config_path": "Debian `/etc/transmission-daemon/settings.json` | Alpine `/var/lib/transmission/config/settings.json`", - "description": "Transmission is a free, open-source BitTorrent client known for its fast download speeds and ease of use. It supports various platforms such as Windows, Linux, and macOS and has features like web interface, peer exchange, and encrypted transfers.", - "install_methods": [ - { - "type": "default", - "script": "ct/transmission.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-transmission.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "type": "info", - "text": "Script disables whitelisting by default. Change config to suit your needs." - }, - { - "type": "info", - "text": "Alpine script sets initial disk size to 1GB. Please adjust for your needs after installation ends." - } - ] -} diff --git a/frontend/public/json/trilium.json b/frontend/public/json/trilium.json deleted file mode 100644 index e3f2fac79..000000000 --- a/frontend/public/json/trilium.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Trilium Notes", - "slug": "trilium", - "categories": [ - 12 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://github.com/TriliumNext/trilium/wiki", - "website": "https://github.com/TriliumNext/trilium", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/trilium-notes.webp", - "config_path": "/root/trilium-data/config.ini", - "description": "Trilium Notes is the latest and officially maintained version of the powerful, self-hosted note-taking and personal knowledge management application. It enables users to organize information in a hierarchical tree structure and supports rich text editing, internal linking, images, attachments, and powerful scripting capabilities. This version reflects the most current development efforts under the TriliumNext organization and replaces all prior forks or legacy variants. Trilium is ideal for building personal wikis, structured documentation, and long-term knowledge archives, giving users full local control and privacy.", - "install_methods": [ - { - "type": "default", - "script": "ct/trilium.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/trip.json b/frontend/public/json/trip.json deleted file mode 100644 index f23fe7bd1..000000000 --- a/frontend/public/json/trip.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "TRIP", - "slug": "trip", - "categories": [ - 21 - ], - "date_created": "2026-01-17", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://itskovacs.github.io/trip/docs/intro", - "website": "https://github.com/itskovacs/TRIP", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/trip.webp", - "config_path": "/opt/trip.env", - "description": "Minimalist POI Map Tracker and Trip Planner. Self-hosted.", - "install_methods": [ - { - "type": "default", - "script": "ct/trip.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/truenas-vm.json b/frontend/public/json/truenas-vm.json deleted file mode 100644 index 67fe24f8a..000000000 --- a/frontend/public/json/truenas-vm.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "TrueNAS Community Edition", - "slug": "truenas-vm", - "categories": [ - 2 - ], - "date_created": "2026-02-19", - "type": "vm", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": "https://www.truenas.com/docs/", - "website": "https://www.truenas.com/truenas-community-edition/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/truenas-core.webp", - "config_path": "", - "description": "TrueNAS Community Edition is the world's most deployed storage software. Free, flexible and build on OpenZFS with Docker.", - "install_methods": [ - { - "type": "default", - "script": "vm/truenas-vm.sh", - "resources": { - "cpu": 2, - "ram": 8192, - "hdd": 16, - "os": "Debian", - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Once the script finishes, proceed with the OS installation via the console. For more details, please refer to this discussion: `https://github.com/community-scripts/ProxmoxVE/discussions/11344`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/tududi.json b/frontend/public/json/tududi.json deleted file mode 100644 index 01f7c26c9..000000000 --- a/frontend/public/json/tududi.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Tududi", - "slug": "tududi", - "categories": [ - 12 - ], - "date_created": "2025-08-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3002, - "documentation": "https://github.com/chrisvel/tududi#-getting-started", - "config_path": "/opt/tududi/backend/.env", - "website": "https://tududi.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tududi.webp", - "description": "Self-hosted task management with functional programming architecture, hierarchical organization, and multi-language support.", - "install_methods": [ - { - "type": "default", - "script": "ct/tududi.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Create users like this: `cd /opt/tududi` => `npm run user:create `", - "type": "info" - }, - { - "text": "Database location: `/opt/tududi-db`. Uploads: `/opt/tududi-uploads`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/tunarr.json b/frontend/public/json/tunarr.json deleted file mode 100644 index a5059567b..000000000 --- a/frontend/public/json/tunarr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Tunarr", - "slug": "tunarr", - "categories": [ - 13 - ], - "date_created": "2025-09-19", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/opt/tunarr/.env", - "interface_port": 8000, - "documentation": "https://tunarr.com/", - "website": "https://tunarr.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/tunarr.webp", - "description": "Create a classic TV experience using your own media - IPTV backed by Plex/Jellyfin/Emby.", - "install_methods": [ - { - "type": "default", - "script": "ct/tunarr.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 5, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/turnkey.json b/frontend/public/json/turnkey.json deleted file mode 100644 index c515c09eb..000000000 --- a/frontend/public/json/turnkey.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "TurnKey", - "slug": "turnkey", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "turnkey", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": "https://www.turnkeylinux.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/turnkey-linux.webp", - "config_path": "", - "description": "TurnKey LXC Appliances is an open-source project that provides a collection of free, ready-to-use virtual appliances and installation images for various software applications and services. These appliances are pre-configured and come with all the necessary software and settings to simplify deployment and management. The goal of TurnKey Linux is to make it easier for users to set up and run popular software applications without the need for extensive manual configuration.", - "install_methods": [ - { - "type": "default", - "script": "turnkey/turnkey.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The script creates a `*.creds` file in the Proxmox root directory with the password of the newly created TurnKey LXC Appliance.", - "type": "info" - }, - { - "text": "Retrieve Password: `cat turnkey-name.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/twingate-connector.json b/frontend/public/json/twingate-connector.json deleted file mode 100644 index f60c19af7..000000000 --- a/frontend/public/json/twingate-connector.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "twingate-connector", - "slug": "twingate-connector", - "categories": [ - 4 - ], - "date_created": "2025-08-18", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://www.twingate.com/docs/", - "config_path": "/etc/twingate/connector.conf", - "website": "https://www.twingate.com", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/twingate.webp", - "description": "Twingate Connectors are lightweight software components that establish secure, least-privileged access between private network resources and authorized users without exposing the network to the internet. They act as outbound-only bridges between your protected resources and the Twingate infrastructure, ensuring zero-trust access without the need for a VPN.", - "install_methods": [ - { - "type": "default", - "script": "ct/twingate-connector.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 3, - "os": "Ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "You can get your Twingate access or refresh tokens from the Twingate Admin Console. `https://auth.twingate.com/signup-v2`", - "type": "info" - }, - { - "text": "If you need to update your access or refresh tokens, they can be found in /etc/twingate/connector.conf", - "type": "info" - } - ] -} diff --git a/frontend/public/json/typesense.json b/frontend/public/json/typesense.json deleted file mode 100644 index cb5e05926..000000000 --- a/frontend/public/json/typesense.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "TypeSense", - "slug": "typesense", - "categories": [ - 20 - ], - "date_created": "2025-01-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://typesense.org/docs/", - "website": "https://typesense.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/typesense.webp", - "config_path": "/etc/typesense/typesense-server.ini", - "description": "Typesense is an open-source, fast, and lightweight search engine optimized for delivering instant, relevant, and typo-tolerant search results. Designed for ease of use and high performance, it offers features like real-time indexing, fuzzy matching, customizable relevance ranking, and a simple API for integration. Typesense is particularly well-suited for applications requiring instant search capabilities, such as e-commerce, documentation, or any content-rich websites. It is often compared to tools like Elasticsearch but is more developer-friendly and less resource-intensive.", - "install_methods": [ - { - "type": "default", - "script": "ct/typesense.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This script requires some extra steps after the installation, Please checkout the 'documentation' Button", - "type": "info" - } - ] -} diff --git a/frontend/public/json/ubuntu.json b/frontend/public/json/ubuntu.json deleted file mode 100644 index aa3696cc4..000000000 --- a/frontend/public/json/ubuntu.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Ubuntu", - "slug": "ubuntu", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": "https://ubuntu.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ubuntu.webp", - "config_path": "", - "description": "Ubuntu is a distribution based on Debian, designed to have regular releases and a consistent user experience.", - "install_methods": [ - { - "type": "default", - "script": "ct/ubuntu.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "ubuntu", - "version": "24.04" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/ubuntu2204-vm.json b/frontend/public/json/ubuntu2204-vm.json deleted file mode 100644 index 8194ce758..000000000 --- a/frontend/public/json/ubuntu2204-vm.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Ubuntu 22.04", - "slug": "ubuntu2204-vm", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": "https://ubuntu.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ubuntu.webp", - "config_path": "", - "description": "Ubuntu is a distribution based on Debian, designed to have regular releases and a consistent user experience.", - "install_methods": [ - { - "type": "default", - "script": "vm/ubuntu2204-vm.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This VM uses Cloud-init, for more information see cloud-init discussion: `https://github.com/community-scripts/ProxmoxVE/discussions/272`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/ubuntu2404-vm.json b/frontend/public/json/ubuntu2404-vm.json deleted file mode 100644 index a91507fcd..000000000 --- a/frontend/public/json/ubuntu2404-vm.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Ubuntu 24.04", - "slug": "ubuntu2404-vm", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": "https://ubuntu.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ubuntu.webp", - "config_path": "", - "description": "Ubuntu is a distribution based on Debian, designed to have regular releases and a consistent user experience.", - "install_methods": [ - { - "type": "default", - "script": "vm/ubuntu2404-vm.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 7, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This VM uses Cloud-init, for more information see cloud-init discussion: `https://github.com/community-scripts/ProxmoxVE/discussions/272`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/ubuntu2504-vm.json b/frontend/public/json/ubuntu2504-vm.json deleted file mode 100644 index 698f1f50f..000000000 --- a/frontend/public/json/ubuntu2504-vm.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Ubuntu 25.04", - "slug": "ubuntu2504-vm", - "categories": [ - 2 - ], - "date_created": "2025-06-19", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": "https://ubuntu.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ubuntu.webp", - "config_path": "", - "description": "Ubuntu is a distribution based on Debian, designed to have regular releases and a consistent user experience.", - "install_methods": [ - { - "type": "default", - "script": "vm/ubuntu2504-vm.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This VM uses Cloud-init, for more information see cloud-init discussion: `https://github.com/community-scripts/ProxmoxVE/discussions/272`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/uhf.json b/frontend/public/json/uhf.json deleted file mode 100644 index 2baa99309..000000000 --- a/frontend/public/json/uhf.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "UHF Server", - "slug": "uhf", - "categories": [ - 13 - ], - "date_created": "2025-09-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 7568, - "documentation": "https://www.uhfapp.com/server", - "website": "https://www.uhfapp.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/uhf.webp", - "config_path": "/etc/uhf-server/", - "description": "UHF Server is a powerful companion app that lets you seamlessly schedule and record your favorite shows from the UHF app.", - "install_methods": [ - { - "type": "default", - "script": "ct/uhf.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/umami.json b/frontend/public/json/umami.json deleted file mode 100644 index dfb6f1a18..000000000 --- a/frontend/public/json/umami.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Umami", - "slug": "umami", - "categories": [ - 9 - ], - "date_created": "2024-05-09", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://umami.is/docs", - "website": "https://umami.is/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/umami.webp", - "config_path": "/opt/umami/.env", - "description": "Umami makes it easy to collect, analyze, and understand your web data while maintaining visitor privacy and data ownership.", - "install_methods": [ - { - "type": "default", - "script": "ct/umami.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 12, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "umami" - }, - "notes": [ - { - "text": "To view the database credentials : `cat umami.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/umbrel-os-vm.json b/frontend/public/json/umbrel-os-vm.json deleted file mode 100644 index ca8af8134..000000000 --- a/frontend/public/json/umbrel-os-vm.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Umbrel OS", - "slug": "umbrel-os-vm", - "categories": [ - 2 - ], - "date_created": "2025-05-28", - "type": "vm", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://community.umbrel.com/c/guides/", - "website": "https://umbrel.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/umbrelos.webp", - "config_path": "", - "description": "Take control of your digital life with Umbrel — a private, personal server that lets you self-host powerful open source apps with ease. Whether you want to run a Bitcoin or Lightning node, manage your files with Nextcloud, stream media, block ads across your network, or explore self-hosted AI tools, Umbrel gives you full ownership and privacy. All through a beautiful, user-friendly interface — no cloud, no tracking, just your data under your control.", - "install_methods": [ - { - "type": "default", - "script": "vm/umbrel-os-vm.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 32, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": "umbrel", - "password": "umbrel" - }, - "notes": [ - { - "text": "This VM use as base a Debian 12 OS with a custom kernel and some additional packages to run Umbrel OS. It is not a standard Debian VM, but it is optimized for Umbrel OS.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/umlautadaptarr.json b/frontend/public/json/umlautadaptarr.json deleted file mode 100644 index 4d67ced4a..000000000 --- a/frontend/public/json/umlautadaptarr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "UmlautAdaptarr", - "slug": "umlautadaptarr", - "categories": [ - 14 - ], - "date_created": "2025-04-28", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5005, - "documentation": "https://github.com/PCJones/UmlautAdaptarr/blob/master/README.md", - "website": "https://github.com/PCJones/UmlautAdaptarr", - "logo": "https://raw.githubusercontent.com/community-scripts/ProxmoxVE/refs/heads/main/misc/images/logo.png", - "config_path": "/opt/UmlautAdaptarr/appsettings.json", - "description": "UmlautAdaptarr is a tool that fixes issues with how Sonarr, Radarr, Lidarr, and Readarr handle foreign languages. It sits between these apps and the indexers, changing searches and results, and renaming releases so the apps recognize them correctly.", - "install_methods": [ - { - "type": "default", - "script": "ct/umlautadaptarr.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/unbound.json b/frontend/public/json/unbound.json deleted file mode 100644 index 95212cfb7..000000000 --- a/frontend/public/json/unbound.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Unbound", - "slug": "unbound", - "categories": [ - 5 - ], - "date_created": "2024-12-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5335, - "documentation": "https://unbound.docs.nlnetlabs.nl/en/latest/", - "website": "https://www.nlnetlabs.nl/projects/unbound/about/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/unbound.webp", - "config_path": "/etc/unbound/unbound.conf.d/unbound.conf", - "description": "Unbound is a validating, recursive, caching DNS resolver. It is designed to be fast and lean and incorporates modern features based on open standards.", - "install_methods": [ - { - "type": "default", - "script": "ct/unbound.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Unbound Configuration Path: `/etc/unbound/unbound.conf.d/unbound.conf`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/unifi-os-server.json b/frontend/public/json/unifi-os-server.json deleted file mode 100644 index e8ea081d5..000000000 --- a/frontend/public/json/unifi-os-server.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Unifi OS Server", - "slug": "unifi-os-server", - "categories": [ - 4 - ], - "date_created": "2026-01-16", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 11443, - "documentation": "https://help.ui.com/hc/en-us", - "website": "https://www.ui.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/ubiquiti-unifi.webp", - "config_path": "", - "description": "Unifi OS Server is the operating system that powers Ubiquiti's UniFi line of network devices. It provides a centralized platform for managing and monitoring UniFi access points, switches, and security gateways, offering features such as network configuration, device provisioning, and performance analytics.", - "install_methods": [ - { - "type": "default", - "script": "ct/unifi-os-server.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 20, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/unmanic.json b/frontend/public/json/unmanic.json deleted file mode 100644 index 3d5921ccc..000000000 --- a/frontend/public/json/unmanic.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Unmanic", - "slug": "unmanic", - "categories": [ - 0 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 8888, - "documentation": "https://docs.unmanic.app/docs/", - "website": "https://docs.unmanic.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/unmanic.webp", - "config_path": "", - "description": "Unmanic is a simple tool for optimising your file library. You can use it to convert your files into a single, uniform format, manage file movements based on timestamps, or execute custom commands against a file based on its file size.", - "install_methods": [ - { - "type": "default", - "script": "ct/unmanic.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/update-apps.json b/frontend/public/json/update-apps.json deleted file mode 100644 index c92e5801c..000000000 --- a/frontend/public/json/update-apps.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "name": "PVE LXC Apps Updater", - "slug": "update-apps", - "categories": [ - 1 - ], - "date_created": "2026-02-06", - "type": "pve", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://github.com/community-scripts/ProxmoxVE/discussions/11532", - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/proxmox.webp", - "config_path": "", - "description": "This script updates community-scripts managed LXC containers on a Proxmox VE node. It detects the installed service, verifies available update scripts, and applies updates interactively or unattended. Optionally, containers can be backed up before the update process. If additional build resources (CPU/RAM) are required, the script adjusts container resources temporarily and restores them after the update. Containers requiring a reboot will be listed at the end of the process.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/update-apps.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell.", - "type": "info" - }, - { - "text": "Full Guide can be found here: `https://github.com/community-scripts/ProxmoxVE/discussions/11532`", - "type": "info" - }, - { - "text": "By default, only containers with `community-script` or `proxmox-helper-scripts` tags are listed for update. You can override this by using `var_tags='custom-tag|othertag'`.", - "type": "info" - }, - { - "text": "Optionally performs a vzdump backup before updating containers.", - "type": "warning" - }, - { - "text": "If required, the script will temporarily increase container CPU/RAM resources for the build process and restore them after completion.", - "type": "info" - }, - { - "text": "At the end of the update, containers requiring a reboot will be listed, and you may choose to reboot them directly.", - "type": "info" - }, - { - "text": "Use `var_backup=yes|no` to enable/disable backup (skip prompt).", - "type": "info" - }, - { - "text": "Use `var_backup_storage=` to set backup storage location.", - "type": "info" - }, - { - "text": "Use `var_container=all|all_running|all_stopped|101,102,...` to select containers.", - "type": "info" - }, - { - "text": "Use `var_unattended=yes|no` to run updates without interaction.", - "type": "info" - }, - { - "text": "Use `var_skip_confirm=yes` to skip initial confirmation dialog.", - "type": "info" - }, - { - "text": "Use `var_auto_reboot=yes|no` to auto-reboot containers after update.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/update-lxcs.json b/frontend/public/json/update-lxcs.json deleted file mode 100644 index e7043a007..000000000 --- a/frontend/public/json/update-lxcs.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "PVE LXC Updater", - "slug": "update-lxcs", - "categories": [ - 1 - ], - "date_created": "2024-04-29", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linuxcontainers.webp", - "config_path": "", - "description": "This script has been created to simplify and speed up the process of updating the operating system running inside LXC containers across various Linux distributions, such as Ubuntu, Debian, Devuan, Alpine Linux, CentOS-Rocky-Alma, Fedora, and ArchLinux. It's designed to automatically skip templates and specific containers during the update, enhancing its convenience and usability.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/update-lxcs.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - }, - { - "text": "The script updates only the operating system of the LXC container. It DOES NOT update the application installed within the container!", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/update-repo.json b/frontend/public/json/update-repo.json deleted file mode 100644 index d522ccf36..000000000 --- a/frontend/public/json/update-repo.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "PVE Update Repositories", - "slug": "update-repo", - "categories": [ - 1 - ], - "date_created": "2024-11-04", - "type": "pve", - "updateable": false, - "privileged": false, - "interface_port": null, - "documentation": null, - "website": null, - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/linuxcontainers.webp", - "config_path": "", - "description": "This script updates repository links in LXC containers, replacing old links from the tteck repository with links to the new community-scripts repository to fix issues related to updating scripts.", - "install_methods": [ - { - "type": "default", - "script": "tools/pve/update-repo.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Execute within the Proxmox shell", - "type": "info" - } - ] -} diff --git a/frontend/public/json/upgopher.json b/frontend/public/json/upgopher.json deleted file mode 100644 index 9e752d983..000000000 --- a/frontend/public/json/upgopher.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Upgopher", - "slug": "upgopher", - "categories": [ - 11 - ], - "date_created": "2025-11-22", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 9090, - "documentation": "https://github.com/wanetty/upgopher#readme", - "config_path": "", - "website": "https://github.com/wanetty/upgopher", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/upgopher.webp", - "description": "A simple Go web server for file upload, download, and browsing. Cross-platform alternative to Python-based file servers with no library dependencies. Features file upload via web interface, directory navigation, URL copying to clipboard, optional basic authentication, HTTPS support, and hidden files toggle.", - "install_methods": [ - { - "type": "default", - "script": "ct/upgopher.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Default uploads directory: `/opt/upgopher/uploads`", - "type": "info" - }, - { - "text": "Set application startup options by editing `/etc/systemd/system/upgopher.service`. Read documentation for available options.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/upsnap.json b/frontend/public/json/upsnap.json deleted file mode 100644 index 3e3fba805..000000000 --- a/frontend/public/json/upsnap.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "UpSnap", - "slug": "upsnap", - "categories": [ - 4 - ], - "date_created": "2025-09-23", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8090, - "documentation": "https://github.com/seriousm4x/UpSnap/wiki", - "config_path": "", - "website": "https://github.com/seriousm4x/UpSnap", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/upsnap.webp", - "description": "UpSnap is a self-hosted web app that lets you wake up, manage and monitor devices on your network with ease. Built with SvelteKit, Go and PocketBase, it offers a clean dashboard, scheduled wake-ups, device discovery and secure user management.", - "install_methods": [ - { - "type": "default", - "script": "ct/upsnap.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The first user you register will be the admin user.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/uptimekuma.json b/frontend/public/json/uptimekuma.json deleted file mode 100644 index 01cfa445b..000000000 --- a/frontend/public/json/uptimekuma.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Uptime Kuma", - "slug": "uptimekuma", - "categories": [ - 9 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3001, - "documentation": "https://github.com/louislam/uptime-kuma/wiki", - "website": "https://github.com/louislam/uptime-kuma#uptime-kuma", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/uptime-kuma.webp", - "config_path": "", - "description": "Uptime Kuma is a monitoring and alerting system that tracks the availability and performance of servers, websites, and other internet-connected devices. It can be self-hosted and is open-source, offering a visually appealing interface for monitoring and receiving notifications about downtime events.", - "install_methods": [ - { - "type": "default", - "script": "ct/uptimekuma.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/urbackupserver.json b/frontend/public/json/urbackupserver.json deleted file mode 100644 index 0d0751394..000000000 --- a/frontend/public/json/urbackupserver.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "UrBackup Server", - "slug": "urbackupserver", - "categories": [ - 7 - ], - "date_created": "2025-01-18", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 55414, - "documentation": "https://www.urbackup.org/documentation.html", - "website": "https://www.urbackup.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/urbackup.webp", - "config_path": "", - "description": "URBackup is an open-source backup software designed for creating reliable and efficient backups of both files and system images. It supports client-server architecture, allowing you to back up multiple computers to a central server. It offers features such as incremental backups, real-time file backup, and scheduling, ensuring minimal data loss and quick recovery", - "install_methods": [ - { - "type": "default", - "script": "ct/urbackupserver.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 16, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "You probably want to drastically extend the storage space to fit whatever clients you want to back up", - "type": "info" - }, - { - "text": "Directory `/opt/urbackup/backups` is set as initial backup path. Change it to your liking", - "type": "info" - } - ] -} diff --git a/frontend/public/json/valkey.json b/frontend/public/json/valkey.json deleted file mode 100644 index ba7f276a1..000000000 --- a/frontend/public/json/valkey.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "Valkey", - "slug": "valkey", - "categories": [ - 8 - ], - "date_created": "2025-11-29", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6379, - "documentation": "https://valkey.io/docs/", - "config_path": "/etc/valkey/valkey.conf", - "website": "https://valkey.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/valkey.webp", - "description": "Valkey is an open source (BSD) high-performance key/value datastore that supports a variety of workloads such as caching, message queues, and can act as a primary database. The project is backed by the Linux Foundation, ensuring it will remain open source forever.", - "install_methods": [ - { - "type": "default", - "script": "ct/valkey.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "Debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-valkey.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Show Login Credentials, type `cat ~/valkey.creds` in the LXC console", - "type": "info" - }, - { - "text": "Alpines Valkey package is compiled without TLS support. For TLS, use the Debian-based valkey script instead.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/vaultwarden.json b/frontend/public/json/vaultwarden.json deleted file mode 100644 index 4cc199397..000000000 --- a/frontend/public/json/vaultwarden.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "Vaultwarden", - "slug": "vaultwarden", - "categories": [ - 6 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://github.com/dani-garcia/vaultwarden/wiki", - "website": "https://github.com/dani-garcia/vaultwarden/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/vaultwarden.webp", - "config_path": "/opt/vaultwarden/.env", - "description": "Vaultwarden is a self-hosted password manager which provides secure and encrypted password storage. It uses client-side encryption and provides access to passwords through a web interface and mobile apps.", - "install_methods": [ - { - "type": "default", - "script": "ct/vaultwarden.sh", - "resources": { - "cpu": 4, - "ram": 6144, - "hdd": 20, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-vaultwarden.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Application uses self-signed certificate for HTTPS to work and is enabled by default. If you need a different setup, please read the documentation.", - "type": "warning" - }, - { - "text": "To set the Admin Token, run the command below (or type update) in the LXC Console.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/verdaccio.json b/frontend/public/json/verdaccio.json deleted file mode 100644 index 5861df1d7..000000000 --- a/frontend/public/json/verdaccio.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Verdaccio", - "slug": "verdaccio", - "categories": [ - 20 - ], - "date_created": "2025-09-29", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 4873, - "documentation": "https://verdaccio.org/docs/what-is-verdaccio", - "website": "https://verdaccio.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/verdaccio.webp", - "config_path": "/opt/verdaccio/config/config.yaml", - "description": "Verdaccio is a lightweight private npm proxy registry built with Node.js. It allows you to host your own npm registry with minimal configuration, providing a private npm repository for your projects. Verdaccio supports npm, yarn, and pnpm, and can cache packages from the public npm registry, allowing for faster installs and protection against npm registry outages. It includes a web interface for browsing packages, authentication and authorization features, and can be easily integrated into your development workflow.", - "install_methods": [ - { - "type": "default", - "script": "ct/verdaccio.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "To create the first user, run: npm adduser --registry http://:4873", - "type": "info" - } - ] -} diff --git a/frontend/public/json/versions.json b/frontend/public/json/versions.json deleted file mode 100644 index fe51488c7..000000000 --- a/frontend/public/json/versions.json +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/frontend/public/json/victoriametrics.json b/frontend/public/json/victoriametrics.json deleted file mode 100644 index de848b5ab..000000000 --- a/frontend/public/json/victoriametrics.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "VictoriaMetrics", - "slug": "victoriametrics", - "categories": [ - 8 - ], - "date_created": "2025-02-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8428, - "documentation": "https://docs.victoriametrics.com/", - "website": "https://victoriametrics.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/victoriametrics.webp", - "config_path": "", - "description": "VictoriaMetrics is a fast, cost-saving, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.", - "install_methods": [ - { - "type": "default", - "script": "ct/victoriametrics.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 16, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Included option to install VictoriaLogs.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/vikunja.json b/frontend/public/json/vikunja.json deleted file mode 100644 index 31ce13a8e..000000000 --- a/frontend/public/json/vikunja.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Vikunja", - "slug": "vikunja", - "categories": [ - 12 - ], - "date_created": "2024-11-05", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3456, - "documentation": "https://vikunja.io/docs/", - "website": "https://vikunja.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/vikunja.webp", - "config_path": "/etc/vikunja/config.yml", - "description": "Vikunja is a powerful self-hosted todo app. It allows you to create and manage to-do lists. You can plan tasks, set priorities and collaborate with others. The best part is that your data is safe with you and you can customize the app to your liking. It's like a personal assistant that helps you stay organized.", - "install_methods": [ - { - "type": "default", - "script": "ct/vikunja.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/wallabag.json b/frontend/public/json/wallabag.json deleted file mode 100644 index df0ad7d5a..000000000 --- a/frontend/public/json/wallabag.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Wallabag", - "slug": "wallabag", - "categories": [ - 12 - ], - "date_created": "2025-12-12", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/opt/wallabag/app/config/parameters.yml", - "interface_port": 8000, - "documentation": "https://doc.wallabag.org/", - "website": "https://wallabag.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wallabag.webp", - "description": "Wallabag is a self-hosted read-it-later application. Save web articles to read them later, offline, in a clean and distraction-free interface. Features tagging, full-text search, and browser/mobile app integrations.", - "install_methods": [ - { - "type": "default", - "script": "ct/wallabag.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Database credentials are stored in ~/wallabag.creds", - "type": "info" - } - ] -} diff --git a/frontend/public/json/wallos.json b/frontend/public/json/wallos.json deleted file mode 100644 index 02a5c9abf..000000000 --- a/frontend/public/json/wallos.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Wallos", - "slug": "wallos", - "categories": [ - 23 - ], - "date_created": "2024-10-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/ellite/wallos", - "website": "https://wallosapp.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wallos.webp", - "config_path": "", - "description": "Wallos is a personal finance and budgeting tool that provides an intuitive interface for tracking expenses, managing subscriptions, and monitoring financial health. It features APIs for categories, notifications, payments, and user settings, making it suitable for automation and custom integrations. Additionally, it supports multi-language functionality.", - "install_methods": [ - { - "type": "default", - "script": "ct/wallos.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/wanderer.json b/frontend/public/json/wanderer.json deleted file mode 100644 index 0e4e16687..000000000 --- a/frontend/public/json/wanderer.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Wanderer", - "slug": "wanderer", - "categories": [ - 24 - ], - "date_created": "2025-12-01", - "type": "ct", - "updateable": true, - "privileged": false, - "config_path": "/opt/wanderer/.env", - "interface_port": 3000, - "documentation": "https://wanderer.to/welcome", - "website": "https://wanderer.to", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wanderer-light.webp", - "description": "Wanderer is a decentralized, self-hosted trail database. You can upload your recorded GPS tracks or create new ones and add various metadata to build an easily searchable catalogue.", - "install_methods": [ - { - "type": "default", - "script": "ct/wanderer.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/warracker.json b/frontend/public/json/warracker.json deleted file mode 100644 index 21e105c70..000000000 --- a/frontend/public/json/warracker.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Warracker", - "slug": "warracker", - "categories": [ - 12 - ], - "date_created": "2025-09-29", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": null, - "config_path": "/opt/.env", - "website": "https://warracker.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/warracker.webp", - "description": "Warracker is an open source, self-hostable warranty tracker to monitor expirations, store receipts, files. You own the data, your rules!", - "install_methods": [ - { - "type": "default", - "script": "ct/warracker.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "The first user you register will be the admin user.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/wastebin.json b/frontend/public/json/wastebin.json deleted file mode 100644 index 7665aff0d..000000000 --- a/frontend/public/json/wastebin.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Wastebin", - "slug": "wastebin", - "categories": [ - 12 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8088, - "documentation": "https://github.com/matze/wastebin/blob/a297749b932ed9ff32569f3af7ee8e4a5b499834/README.md", - "website": "https://github.com/matze/wastebin", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wastebin.webp", - "config_path": "/opt/wastebin-data/.env", - "description": "Wastebin is a minimal pastebin with a design shamelessly copied from bin.", - "install_methods": [ - { - "type": "default", - "script": "ct/wastebin.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/watcharr.json b/frontend/public/json/watcharr.json deleted file mode 100644 index d3f7e1ce7..000000000 --- a/frontend/public/json/watcharr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Watcharr", - "slug": "watcharr", - "categories": [ - 13 - ], - "date_created": "2025-02-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3080, - "documentation": "https://watcharr.app/docs/introduction", - "website": "https://watcharr.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/watcharr.webp", - "config_path": "", - "description": "Open source, self-hostable watched list for all your content with user authentication, modern and clean UI and a very simple setup.", - "install_methods": [ - { - "type": "default", - "script": "ct/watcharr.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/watchyourlan.json b/frontend/public/json/watchyourlan.json deleted file mode 100644 index 4a2b1ff3c..000000000 --- a/frontend/public/json/watchyourlan.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "WatchYourLAN", - "slug": "watchyourlan", - "categories": [ - 4 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8840, - "documentation": "https://github.com/aceberg/WatchYourLAN/blob/main/README.md", - "website": "https://github.com/aceberg/WatchYourLAN", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/watchyourlan.webp", - "config_path": "/data/config.yaml", - "description": "WatchYourLAN is a lightweight network IP scanner with web GUI.", - "install_methods": [ - { - "type": "default", - "script": "ct/watchyourlan.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/wavelog.json b/frontend/public/json/wavelog.json deleted file mode 100644 index b3030de1d..000000000 --- a/frontend/public/json/wavelog.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Wavelog", - "slug": "wavelog", - "categories": [ - 24 - ], - "date_created": "2024-11-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://github.com/wavelog/wavelog/wiki", - "website": "https://www.wavelog.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wavelog.webp", - "config_path": "/opt/wavelog/application/config/config.php", - "description": "Wavelog is a self-hosted PHP application that allows you to log your amateur radio contacts anywhere. All you need is a web browser and active internet connection.", - "install_methods": [ - { - "type": "default", - "script": "ct/wavelog.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 2, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "Database credentials: `cat ~/wavelog.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/wazuh.json b/frontend/public/json/wazuh.json deleted file mode 100644 index a1123ed10..000000000 --- a/frontend/public/json/wazuh.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Wazuh", - "slug": "wazuh", - "categories": [ - 9 - ], - "date_created": "2025-03-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 443, - "documentation": "https://documentation.wazuh.com/", - "website": "https://wazuh.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wazuh.webp", - "config_path": "", - "description": "Wazuh is an open-source security monitoring solution that provides endpoint protection, network monitoring, and log analysis capabilities.", - "install_methods": [ - { - "type": "default", - "script": "ct/wazuh.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 25, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "root", - "password": null - }, - "notes": [ - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - }, - { - "text": "Show password: `cat ~/wazuh.creds`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/wealthfolio.json b/frontend/public/json/wealthfolio.json deleted file mode 100644 index 8c5508b54..000000000 --- a/frontend/public/json/wealthfolio.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Wealthfolio", - "slug": "wealthfolio", - "categories": [ - 23 - ], - "date_created": "2026-02-03", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://wealthfolio.app/docs/introduction/", - "config_path": "/opt/wealthfolio/.env", - "website": "https://wealthfolio.app/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wealthfolio.webp", - "description": "Wealthfolio is a beautiful, privacy-focused investment tracker with local data storage. Track your portfolio across multiple accounts and asset types with detailed performance analytics, goal planning, and multi-currency support.", - "install_methods": [ - { - "type": "default", - "script": "ct/wealthfolio.sh", - "resources": { - "cpu": 4, - "ram": 4096, - "hdd": 10, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": "See ~/wealthfolio.creds" - }, - "notes": [ - { - "text": "Login password is stored in ~/wealthfolio.creds", - "type": "info" - } - ] -} diff --git a/frontend/public/json/web-check.json b/frontend/public/json/web-check.json deleted file mode 100644 index b6bc8efbf..000000000 --- a/frontend/public/json/web-check.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Web-Check", - "slug": "web-check", - "categories": [ - 4 - ], - "date_created": "2025-02-27", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://github.com/Lissy93/web-check/blob/master/.github/README.md", - "website": "https://github.com/lissy93/web-check", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/web-check.webp", - "config_path": "/opt/web-check/.env", - "description": "Get an insight into the inner-workings of a given website: uncover potential attack vectors, analyse server architecture, view security configurations, and learn what technologies a site is using.", - "install_methods": [ - { - "type": "default", - "script": "ct/web-check.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 12, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "This script uses a fork located here: `https://github.com/CrazyWolf13/web-check/` as the original Repository does not provide tagged Github Releases, an issues is open for this: `https://github.com/Lissy93/web-check/issues/248`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/webmin.json b/frontend/public/json/webmin.json deleted file mode 100644 index 3b4c611a2..000000000 --- a/frontend/public/json/webmin.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Webmin System Administration", - "slug": "webmin", - "categories": [ - 1 - ], - "date_created": "2024-05-02", - "type": "addon", - "updateable": false, - "privileged": false, - "interface_port": 10000, - "documentation": "https://webmin.com/docs/", - "website": "https://webmin.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/webmin.webp", - "config_path": "", - "description": "Webmin provides a graphical user interface (GUI) for tasks such as user account management, package management, file system configuration, network configuration, and more.", - "install_methods": [ - { - "type": "default", - "script": "tools/addon/webmin.sh", - "resources": { - "cpu": null, - "ram": null, - "hdd": null, - "os": null, - "version": null - } - } - ], - "default_credentials": { - "username": "root", - "password": "root" - }, - "notes": [ - { - "text": "Execute within an existing LXC Console", - "type": "info" - } - ] -} diff --git a/frontend/public/json/wger.json b/frontend/public/json/wger.json deleted file mode 100644 index 3dc82bfb4..000000000 --- a/frontend/public/json/wger.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "wger", - "slug": "wger", - "categories": [ - 24 - ], - "date_created": "2025-02-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://wger.readthedocs.io/en/latest/index.html#", - "website": "https://wger.de", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wger.webp", - "config_path": "/opt/wikijs/config.yml", - "description": "wger (ˈvɛɡɐ) Workout Manager is a free, open source web application that helps you manage your personal workouts, weight and diet plans and can also be used as a simple gym management utility. It offers a REST API as well, for easy integration with other projects and tools.", - "install_methods": [ - { - "type": "default", - "script": "ct/wger.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "adminadmin" - }, - "notes": [ - { - "text": "This LXC also runs Celery and Redis to synchronize workouts and ingredients", - "type": "info" - } - ] -} diff --git a/frontend/public/json/whisparr.json b/frontend/public/json/whisparr.json deleted file mode 100644 index d1109e864..000000000 --- a/frontend/public/json/whisparr.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Whisparr", - "slug": "whisparr", - "categories": [ - 14 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 6969, - "documentation": "https://wiki.servarr.com/en/whisparr", - "website": "https://github.com/Whisparr/Whisparr", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/whisparr.webp", - "config_path": "", - "description": "Whisparr is an adult movie collection manager for Usenet and BitTorrent users.", - "install_methods": [ - { - "type": "default", - "script": "ct/whisparr.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/wikijs.json b/frontend/public/json/wikijs.json deleted file mode 100644 index 2898f18fe..000000000 --- a/frontend/public/json/wikijs.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Wiki.js", - "slug": "wikijs", - "categories": [ - 12 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://docs.requarks.io/", - "website": "https://js.wiki/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wiki-js.webp", - "config_path": "/opt/wikijs/config.yml", - "description": "Wiki.js is a free, open-source, and modern wiki application built using Node.js. It is designed to be fast, easy to use, and flexible, with a range of features for collaboration, knowledge management, and content creation. Wiki.js supports Markdown syntax for editing pages, and includes features such as version control, page history, and access control, making it easy to manage content and collaborate with others. The software is fully customizable, with a range of themes and extensions available, and can be deployed on a local server or in the cloud, making it an ideal choice for small teams and organizations looking to create and manage a wiki. Wiki.js provides a modern, user-friendly interface, and supports a range of data sources, including local file systems, databases, and cloud storage services.", - "install_methods": [ - { - "type": "default", - "script": "ct/wikijs.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 7, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/wireguard.json b/frontend/public/json/wireguard.json deleted file mode 100644 index 1ad423320..000000000 --- a/frontend/public/json/wireguard.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "name": "WireGuard", - "slug": "wireguard", - "categories": [ - 4 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 10086, - "documentation": "https://www.wireguard.com/quickstart/", - "website": "https://www.wireguard.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wireguard.webp", - "config_path": "/etc/wireguard/wg0.conf", - "description": "WireGuard is a free and open-source virtual private network (VPN) software that uses modern cryptography to secure the data transmitted over a network. It is designed to be fast, secure, and easy to use. WireGuard supports various operating systems, including Linux, Windows, macOS, Android, and iOS. It operates at the network layer and is capable of being used with a wide range of protocols and configurations. Unlike other VPN protocols, WireGuard is designed to be simple and fast, with a focus on security and speed. It is known for its ease of setup and configuration, making it a popular choice for personal and commercial use.", - "install_methods": [ - { - "type": "default", - "script": "ct/wireguard.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-wireguard.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "admin" - }, - "notes": [ - { - "text": "Wireguard and WGDashboard are not the same. More info: `https://wgdashboard.dev/`", - "type": "info" - }, - { - "text": "WGDashboard installation is optional.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/wishlist.json b/frontend/public/json/wishlist.json deleted file mode 100644 index 324356731..000000000 --- a/frontend/public/json/wishlist.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Wishlist", - "slug": "wishlist", - "categories": [ - 12 - ], - "date_created": "2026-02-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3280, - "documentation": "https://github.com/cmintey/wishlist/blob/main/README.md#getting-started", - "website": "https://github.com/cmintey/wishlist", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/cmintey-wishlist.webp", - "config_path": "/opt/wishlist/.env", - "description": "Wishlist is a self-hosted wishlist application that you can share with your friends and family. You no longer have to wonder what to get your family for the holidays, simply check their wishlist and claim any available item!", - "install_methods": [ - { - "type": "default", - "script": "ct/wishlist.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "When using a reverse proxy with this script, please edit the`ORIGIN` value in `/opt/wishlist/.env` to point to your new URL, otherwise creating an admin account or logging in will not work.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/wizarr.json b/frontend/public/json/wizarr.json deleted file mode 100644 index 817e850a6..000000000 --- a/frontend/public/json/wizarr.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "name": "Wizarr", - "slug": "wizarr", - "categories": [ - 14, - 13 - ], - "date_created": "2025-06-19", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 5690, - "documentation": "https://docs.wizarr.dev/", - "website": "https://docs.wizarr.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wizarr.webp", - "config_path": "/opt/wizarr/.env", - "description": "Wizarr is an automatic user invitation system for Plex, Jellyfin and Emby. Create a unique link and share it to a user and they will automatically be invited to your media Server", - "install_methods": [ - { - "type": "default", - "script": "ct/wizarr.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/wordpress.json b/frontend/public/json/wordpress.json deleted file mode 100644 index c8214f19d..000000000 --- a/frontend/public/json/wordpress.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Wordpress", - "slug": "wordpress", - "categories": [ - 21 - ], - "date_created": "2025-01-14", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://wordpress.org/documentation/", - "website": "https://wordpress.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/wordpress.webp", - "config_path": "/var/www/html/wordpress/wp-config.php", - "description": "WordPress is the simplest, most popular way to create your own website or blog. In fact, WordPress powers over 43.6% of all the websites on the Internet. Yes – more than one in four websites that you visit are likely powered by WordPress.\n\nOn a slightly more technical level, WordPress is an open-source content management system licensed under GPLv2, which means that anyone can use or modify the WordPress software for free.", - "install_methods": [ - { - "type": "default", - "script": "ct/wordpress.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/writefreely.json b/frontend/public/json/writefreely.json deleted file mode 100644 index 9c4b07378..000000000 --- a/frontend/public/json/writefreely.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "WriteFreely", - "slug": "writefreely", - "categories": [ - 12 - ], - "date_created": "2026-02-04", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://writefreely.org/docs", - "config_path": "/opt/writefreely/config.ini", - "website": "https://writefreely.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/writefreely-light.webp", - "description": "WriteFreely is free and open source software for easily publishing writing on the web with support for the ActivityPub protocol. Use it to start a personal blog — or an entire community.", - "install_methods": [ - { - "type": "default", - "script": "ct/writefreely.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "After installation execute `writefreely user create --admin :` to create your user.", - "type": "info" - } - ] -} diff --git a/frontend/public/json/yt-dlp-webui.json b/frontend/public/json/yt-dlp-webui.json deleted file mode 100644 index 526480a07..000000000 --- a/frontend/public/json/yt-dlp-webui.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "name": "yt-dlp-webui", - "slug": "yt-dlp-webui", - "categories": [ - 11 - ], - "date_created": "2025-03-24", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3033, - "documentation": "https://github.com/marcopiovanello/yt-dlp-web-ui", - "website": "https://github.com/marcopiovanello/yt-dlp-web-ui", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/youtube-dl.webp", - "config_path": "/opt/yt-dlp-webui/config.conf", - "description": "A not so terrible web ui for yt-dlp.\nHigh performance extendeable web ui and RPC server for yt-dlp with low impact on resources.\nCreated for the only purpose of fetching videos from my server/nas and monitor upcoming livestreams.", - "install_methods": [ - { - "type": "default", - "script": "ct/yt-dlp-webui.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": null - }, - "notes": [ - { - "text": "Show password: `cat ~/yt-dlp-webui.creds`", - "type": "info" - }, - { - "text": "The config file is located in `/opt/yt-dlp-webui/config.conf`", - "type": "info" - }, - { - "text": "Make sure to either mount an external path or increase the Disk space afterwards", - "type": "info" - } - ] -} diff --git a/frontend/public/json/yubal.json b/frontend/public/json/yubal.json deleted file mode 100644 index d3498659b..000000000 --- a/frontend/public/json/yubal.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Yubal", - "slug": "yubal", - "categories": [ - 13 - ], - "date_created": "2026-01-19", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://github.com/guillevc/yubal/blob/master/README.md", - "website": "https://github.com/guillevc/yubal", - "config_path": "/opt/yubal.env", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/yubal.webp", - "description": "YouTube Music album downloader with Spotify metadata auto-tagging.", - "install_methods": [ - { - "type": "default", - "script": "ct/yubal.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 15, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/yunohost.json b/frontend/public/json/yunohost.json deleted file mode 100644 index 1b548d24c..000000000 --- a/frontend/public/json/yunohost.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "YunoHost", - "slug": "yunohost", - "categories": [ - 2 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 80, - "documentation": "https://doc.yunohost.org/", - "website": "https://yunohost.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/yunohost.webp", - "config_path": "", - "description": "YunoHost is an operating system aiming for the simplest administration of a server, and therefore democratize self-hosting, while making sure it stays reliable, secure, ethical and lightweight.", - "install_methods": [ - { - "type": "default", - "script": "ct/yunohost.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 20, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing.", - "type": "warning" - } - ] -} diff --git a/frontend/public/json/zabbix.json b/frontend/public/json/zabbix.json deleted file mode 100644 index 0c295ea56..000000000 --- a/frontend/public/json/zabbix.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "Zabbix", - "slug": "zabbix", - "categories": [ - 9 - ], - "date_created": "2024-06-12", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://www.zabbix.com/documentation/current/en/manual", - "website": "https://www.zabbix.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/zabbix.webp", - "config_path": "/etc/zabbix/zabbix_server.conf", - "description": "Zabbix is an all-in-one monitoring solution with a variety of enterprise-grade features available right out of the box.", - "install_methods": [ - { - "type": "default", - "script": "ct/zabbix.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 6, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "Admin", - "password": "zabbix" - }, - "notes": [ - { - "text": "Database credentials: `cat ~/zabbix.creds`", - "type": "info" - }, - { - "text": "You can choose between Zabbix agent (classic) and agent2 (modern) during installation", - "type": "info" - }, - { - "text": "For agent2 the PostgreSQL plugin is installed by default; all plugins are optional", - "type": "info" - }, - { - "text": "If agent2 with NVIDIA plugin is installed in an environment without GPU, the installer disables it automatically", - "type": "info" - } - ] -} diff --git a/frontend/public/json/zammad.json b/frontend/public/json/zammad.json deleted file mode 100644 index c034faa73..000000000 --- a/frontend/public/json/zammad.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Zammad", - "slug": "zammad", - "categories": [ - 25 - ], - "date_created": "2024-12-18", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": null, - "documentation": "https://docs.zammad.org/en/latest/", - "website": "https://zammad.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/zammad.webp", - "config_path": "/etc/nginx/sites-available/zammad.conf", - "description": "Zammad is a web based open source helpdesk/customer support system with many features to manage customer communication via several channels like telephone, facebook, twitter, chat and emails. It is distributed under version 3 of the GNU AFFERO General Public License (GNU AGPLv3).", - "install_methods": [ - { - "type": "default", - "script": "ct/zammad.sh", - "resources": { - "cpu": 2, - "ram": 4096, - "hdd": 8, - "os": "debian", - "version": "12" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/zerobyte.json b/frontend/public/json/zerobyte.json deleted file mode 100644 index 220804750..000000000 --- a/frontend/public/json/zerobyte.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "name": "Zerobyte", - "slug": "zerobyte", - "categories": [ - 7 - ], - "date_created": "2026-02-25", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 4096, - "documentation": "https://github.com/nicotsx/zerobyte#readme", - "website": "https://github.com/nicotsx/zerobyte", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/zerobyte.webp", - "config_path": "/opt/zerobyte/.env", - "description": "Zerobyte is a backup automation tool built on top of Restic that provides a modern web interface to schedule, manage, and monitor encrypted backups across multiple storage backends including NFS, SMB, WebDAV, SFTP, S3, and local directories.", - "install_methods": [ - { - "type": "default", - "script": "ct/zerobyte.sh", - "resources": { - "cpu": 2, - "ram": 6144, - "hdd": 10, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "For remote mount support (NFS, SMB, WebDAV, SFTP), enable FUSE device passthrough on the LXC container. (FUSE is pre-configured)", - "type": "info" - } - ] -} diff --git a/frontend/public/json/zerotier-one.json b/frontend/public/json/zerotier-one.json deleted file mode 100644 index f74d4199d..000000000 --- a/frontend/public/json/zerotier-one.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Zerotier-One", - "slug": "zerotier-one", - "categories": [ - 4 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3443, - "documentation": "https://docs.zerotier.com/", - "website": "https://www.zerotier.com/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/zerotier.webp", - "config_path": "/opt/key-networks/ztncui/.env", - "description": "ZeroTier is a secure network overlay that allows you to manage all of your network resources as if they were on the same LAN. The software-defined solution can be deployed in minutes from anywhere. No matter how many devices you need to connect, or where they are in the world, ZeroTier makes global networking simple.", - "install_methods": [ - { - "type": "default", - "script": "ct/zerotier-one.sh", - "resources": { - "cpu": 1, - "ram": 512, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "admin", - "password": "password" - }, - "notes": [] -} diff --git a/frontend/public/json/zigbee2mqtt.json b/frontend/public/json/zigbee2mqtt.json deleted file mode 100644 index 6bbfee764..000000000 --- a/frontend/public/json/zigbee2mqtt.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "Zigbee2MQTT", - "slug": "zigbee2mqtt", - "categories": [ - 17 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 9442, - "documentation": "https://www.zigbee2mqtt.io/guide/getting-started/", - "website": "https://www.zigbee2mqtt.io/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/zigbee2mqtt.webp", - "config_path": "debian: /opt/zigbee2mqtt/data/configuration.yaml | alpine: /var/lib/zigbee2mqtt/configuration.yaml", - "description": "Zigbee2MQTT is an open-source software project that allows you to use Zigbee-based smart home devices (such as those sold under the Philips Hue and Ikea Tradfri brands) with MQTT-based home automation systems, like Home Assistant, Node-RED, and others. The software acts as a bridge between your Zigbee devices and MQTT, allowing you to control and monitor these devices from your home automation system.", - "install_methods": [ - { - "type": "default", - "script": "ct/zigbee2mqtt.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 5, - "os": "debian", - "version": "13" - } - }, - { - "type": "alpine", - "script": "ct/alpine-zigbee2mqtt.sh", - "resources": { - "cpu": 1, - "ram": 256, - "hdd": 1, - "os": "alpine", - "version": "3.23" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [ - { - "text": "You can find the post-install guide here: `https://github.com/community-scripts/ProxmoxVE/discussions/410`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/zipline.json b/frontend/public/json/zipline.json deleted file mode 100644 index a6c9fe221..000000000 --- a/frontend/public/json/zipline.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Zipline", - "slug": "zipline", - "categories": [ - 11 - ], - "date_created": "2024-09-16", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 3000, - "documentation": "https://zipline.diced.sh/docs/get-started", - "website": "https://zipline.diced.sh/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/zipline.webp", - "config_path": "/opt/zipline/.env", - "description": "Zipline is a file-sharing and URL-shortening server designed for easy setup and extensive features. It allows users to upload files, organize them into folders, create shortened URLs, and manage uploads through a user-friendly dashboard. Additional features include image compression, video thumbnails, password protection, 2FA, OAuth2 registration, and API access for custom control. It supports integrations with platforms like Discord.", - "install_methods": [ - { - "type": "default", - "script": "ct/zipline.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 5, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "administrator", - "password": "password" - }, - "notes": [] -} diff --git a/frontend/public/json/zitadel.json b/frontend/public/json/zitadel.json deleted file mode 100644 index 7dd65be04..000000000 --- a/frontend/public/json/zitadel.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "Zitadel", - "slug": "zitadel", - "categories": [ - 6 - ], - "date_created": "2025-02-10", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://zitadel.com/docs/guides/overview", - "website": "https://zitadel.com", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/zitadel.webp", - "config_path": "/opt/zitadel/config.yaml", - "description": "Zitadel is an open-source identity and access management (IAM) solution designed to provide secure authentication, authorization, and user management for modern applications and services. Built with a focus on flexibility, scalability, and security, Zitadel offers a comprehensive set of features for developers and organizations looking to implement robust identity management.", - "install_methods": [ - { - "type": "default", - "script": "ct/zitadel.sh", - "resources": { - "cpu": 1, - "ram": 1024, - "hdd": 8, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": "zitadel-admin@zitadel.localhost", - "password": "Password1!" - }, - "notes": [ - { - "text": "Application credentials: `cat ~/zitadel.creds`", - "type": "info" - }, - { - "text": "Change the ExternalDomain value in `/opt/zitadel/config.yaml` to your domain/hostname/IP and run `bash zitadel-rerun.sh`", - "type": "info" - } - ] -} diff --git a/frontend/public/json/zoraxy.json b/frontend/public/json/zoraxy.json deleted file mode 100644 index 0262e6710..000000000 --- a/frontend/public/json/zoraxy.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Zoraxy", - "slug": "zoraxy", - "categories": [ - 21 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8000, - "documentation": "https://github.com/tobychui/zoraxy/wiki", - "website": "https://zoraxy.aroz.org/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/zoraxy.webp", - "config_path": "", - "description": "Zoraxy is an all in one homelab network routing solution.", - "install_methods": [ - { - "type": "default", - "script": "ct/zoraxy.sh", - "resources": { - "cpu": 2, - "ram": 2048, - "hdd": 6, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/zot-registry.json b/frontend/public/json/zot-registry.json deleted file mode 100644 index fec885245..000000000 --- a/frontend/public/json/zot-registry.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Zot Registry", - "slug": "zot", - "categories": [ - 3 - ], - "date_created": "2025-06-06", - "type": "ct", - "updateable": true, - "privileged": false, - "interface_port": 8080, - "documentation": "https://zotregistry.dev/docs/intro/", - "website": "https://zotregistry.dev/", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/zot-registry.webp", - "config_path": "/etc/zot/config.json", - "description": "Zot is a cloud-native OCI image registry focused on extensibility, maintainability, and performance. It supports advanced features such as Web UI, security scanning, authentication via htpasswd and OIDC, and more.", - "install_methods": [ - { - "type": "default", - "script": "ct/zot-registry.sh", - "resources": { - "cpu": 1, - "ram": 4096, - "hdd": 5, - "os": "Debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/json/zwave-js-ui.json b/frontend/public/json/zwave-js-ui.json deleted file mode 100644 index 4fb6b1357..000000000 --- a/frontend/public/json/zwave-js-ui.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "name": "Z-Wave JS UI", - "slug": "zwave-js-ui", - "categories": [ - 17 - ], - "date_created": "2024-05-02", - "type": "ct", - "updateable": true, - "privileged": true, - "interface_port": 8091, - "documentation": "https://zwave-js.github.io/zwave-js-ui/#/", - "website": "https://github.com/zwave-js/zwave-js-ui#", - "logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/z-wave-js-ui.webp", - "config_path": "/opt/.env", - "description": "Z-Wave JS UI is an open-source software that serves as a gateway between Z-Wave devices and MQTT (Message Queuing Telemetry Transport) protocol, allowing users to control and monitor their Z-Wave devices via a user interface. The software provides a configurable platform to manage Z-Wave networks and integrate with other smart home systems through MQTT.", - "install_methods": [ - { - "type": "default", - "script": "ct/zwave-js-ui.sh", - "resources": { - "cpu": 2, - "ram": 1024, - "hdd": 4, - "os": "debian", - "version": "13" - } - } - ], - "default_credentials": { - "username": null, - "password": null - }, - "notes": [] -} diff --git a/frontend/public/logo.png b/frontend/public/logo.png deleted file mode 100644 index 1d2186daa..000000000 Binary files a/frontend/public/logo.png and /dev/null differ diff --git a/frontend/src/app/api/categories/route.ts b/frontend/src/app/api/categories/route.ts deleted file mode 100644 index a2c5f7775..000000000 --- a/frontend/src/app/api/categories/route.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { NextResponse } from "next/server"; -import { promises as fs } from "node:fs"; -import path from "node:path"; - -import type { Metadata, Script } from "@/lib/types"; - -export const dynamic = "force-static"; - -const jsonDir = "public/json"; -const metadataFileName = "metadata.json"; -const versionFileName = "version.json"; -const encoding = "utf-8"; - -async function getMetadata() { - const filePath = path.resolve(jsonDir, metadataFileName); - const fileContent = await fs.readFile(filePath, encoding); - const metadata: Metadata = JSON.parse(fileContent); - return metadata; -} - -async function getScripts() { - const filePaths = (await fs.readdir(jsonDir)) - .filter(fileName => - fileName.endsWith(".json") - && fileName !== metadataFileName - && fileName !== versionFileName, - ) - .map(fileName => path.resolve(jsonDir, fileName)); - - const scripts = await Promise.all( - filePaths.map(async (filePath) => { - const fileContent = await fs.readFile(filePath, encoding); - const script: Script = JSON.parse(fileContent); - return script; - }), - ); - return scripts; -} - -export async function GET() { - try { - const metadata = await getMetadata(); - const scripts = await getScripts(); - - const categories = metadata.categories - .map((category) => { - category.scripts = scripts.filter(script => - script.categories?.includes(category.id), - ); - return category; - }) - .sort((a, b) => a.sort_order - b.sort_order); - - return NextResponse.json(categories); - } - catch (error) { - console.error(error as Error); - return NextResponse.json( - { error: "Failed to fetch categories" }, - { status: 500 }, - ); - } -} diff --git a/frontend/src/app/api/github-versions/route.ts b/frontend/src/app/api/github-versions/route.ts deleted file mode 100644 index b24327607..000000000 --- a/frontend/src/app/api/github-versions/route.ts +++ /dev/null @@ -1,36 +0,0 @@ -import { NextResponse } from "next/server"; -import { promises as fs } from "node:fs"; -import path from "node:path"; - -import type { GitHubVersionsResponse } from "@/lib/types"; - -export const dynamic = "force-static"; - -const jsonDir = "public/json"; -const versionsFileName = "github-versions.json"; -const encoding = "utf-8"; - -async function getVersions(): Promise { - const filePath = path.resolve(jsonDir, versionsFileName); - const fileContent = await fs.readFile(filePath, encoding); - const data: GitHubVersionsResponse = JSON.parse(fileContent); - return data; -} - -export async function GET() { - try { - const versions = await getVersions(); - return NextResponse.json(versions); - } - catch (error) { - console.error(error); - const err = error as globalThis.Error; - return NextResponse.json({ - generated: "", - versions: [], - error: err.message || "An unexpected error occurred", - }, { - status: 500, - }); - } -} diff --git a/frontend/src/app/api/versions/route.ts b/frontend/src/app/api/versions/route.ts deleted file mode 100644 index ca9e19758..000000000 --- a/frontend/src/app/api/versions/route.ts +++ /dev/null @@ -1,48 +0,0 @@ -// import Error from "next/error"; -import { NextResponse } from "next/server"; -import { promises as fs } from "node:fs"; -import path from "node:path"; - -export const dynamic = "force-static"; - -const jsonDir = "public/json"; -const versionsFileName = "versions.json"; -const encoding = "utf-8"; - -interface LegacyVersion { - name: string; - version: string; - date: string; -} - -async function getVersions() { - const filePath = path.resolve(jsonDir, versionsFileName); - const fileContent = await fs.readFile(filePath, encoding); - const versions: LegacyVersion[] = JSON.parse(fileContent); - - const modifiedVersions = versions.map((version) => { - let newName = version.name; - newName = newName.toLowerCase().replace(/[^a-z0-9/]/g, ""); - return { ...version, name: newName, date: new Date(version.date) }; - }); - - return modifiedVersions; -} - -export async function GET() { - try { - const versions = await getVersions(); - return NextResponse.json(versions); - } - catch (error) { - console.error(error); - const err = error as globalThis.Error; - return NextResponse.json({ - name: err.name, - message: err.message || "An unexpected error occurred", - version: "No version found - Error", - }, { - status: 500, - }); - } -} diff --git a/frontend/src/app/data/page.tsx b/frontend/src/app/data/page.tsx deleted file mode 100644 index 374aac0da..000000000 --- a/frontend/src/app/data/page.tsx +++ /dev/null @@ -1,509 +0,0 @@ -"use client"; - -import { - ArrowUpDown, - Box, - CheckCircle2, - ChevronLeft, - ChevronRight, - List, - Loader2, - Trophy, - XCircle, -} from "lucide-react"; -import { useEffect, useMemo, useState } from "react"; -import { Bar, BarChart, CartesianGrid, Cell, LabelList, XAxis } from "recharts"; - -import type { ChartConfig } from "@/components/ui/chart"; - -import { formattedBadge } from "@/components/command-menu"; -import { Badge } from "@/components/ui/badge"; -import { Button } from "@/components/ui/button"; -import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"; -import { ChartContainer, ChartTooltip, ChartTooltipContent } from "@/components/ui/chart"; -import { - Dialog, - DialogContent, - DialogDescription, - DialogHeader, - DialogTitle, - DialogTrigger, -} from "@/components/ui/dialog"; -import { ScrollArea } from "@/components/ui/scroll-area"; -import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"; -import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table"; -import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip"; - -type DataModel = { - id: number; - ct_type: number; - disk_size: number; - core_count: number; - ram_size: number; - os_type: string; - os_version: string; - disableip6: string; - nsapp: string; - created_at: string; - method: string; - pve_version: string; - status: string; - error: string; - type: string; - [key: string]: any; -}; - -type SummaryData = { - total_entries: number; - status_count: Record; - nsapp_count: Record; -}; - -// Chart colors optimized for both light and dark modes -// Medium-toned colors that are visible and not too flashy in both themes -const CHART_COLORS = [ - "#5B8DEF", // blue - medium tone - "#4ECDC4", // teal - medium tone - "#FF8C42", // orange - medium tone - "#A78BFA", // purple - medium tone - "#F472B6", // pink - medium tone - "#38BDF8", // cyan - medium tone - "#4ADE80", // green - medium tone - "#FBBF24", // yellow - medium tone - "#818CF8", // indigo - medium tone - "#FB7185", // rose - medium tone - "#2DD4BF", // turquoise - medium tone - "#C084FC", // violet - medium tone - "#60A5FA", // sky blue - medium tone - "#84CC16", // lime - medium tone - "#F59E0B", // amber - medium tone - "#A855F7", // purple - medium tone - "#10B981", // emerald - medium tone - "#EAB308", // gold - medium tone - "#3B82F6", // royal blue - medium tone - "#EF4444", // red - medium tone -]; - -const chartConfigApps = { - count: { - label: "Installations", - color: "hsl(var(--chart-1))", - }, -} satisfies ChartConfig; - -export default function DataPage() { - const [data, setData] = useState([]); - const [summary, setSummary] = useState(null); - const [summaryLoading, setSummaryLoading] = useState(true); - const [dataLoading, setDataLoading] = useState(true); - const [error, setError] = useState(null); - const [currentPage, setCurrentPage] = useState(1); - const [itemsPerPage, setItemsPerPage] = useState(25); - const [sortConfig, setSortConfig] = useState<{ - key: string; - direction: "ascending" | "descending"; - } | null>(null); - - const nf = new Intl.NumberFormat("en-US", { maximumFractionDigits: 0 }); - - // Fetch summary only once on mount - useEffect(() => { - const fetchSummary = async () => { - try { - const summaryRes = await fetch("https://api.htl-braunau.at/data/summary"); - if (!summaryRes.ok) { - throw new Error(`Failed to fetch summary: ${summaryRes.statusText}`); - } - const summaryData: SummaryData = await summaryRes.json(); - setSummary(summaryData); - } catch (err) { - setError((err as Error).message); - } finally { - setSummaryLoading(false); - } - }; - - fetchSummary(); - }, []); - - useEffect(() => { - const fetchData = async () => { - setDataLoading(true); - try { - const dataRes = await fetch(`https://api.htl-braunau.at/data/paginated?page=${currentPage}&limit=${itemsPerPage}`); - if (!dataRes.ok) { - throw new Error(`Failed to fetch data: ${dataRes.statusText}`); - } - const pageData: DataModel[] = await dataRes.json(); - setData(pageData); - } catch (err) { - setError((err as Error).message); - } finally { - setDataLoading(false); - } - }; - - fetchData(); - }, [currentPage, itemsPerPage]); - - const sortedData = useMemo(() => { - if (!sortConfig) return data; - return [...data].sort((a, b) => { - if (a[sortConfig.key] < b[sortConfig.key]) { - return sortConfig.direction === "ascending" ? -1 : 1; - } - if (a[sortConfig.key] > b[sortConfig.key]) { - return sortConfig.direction === "ascending" ? 1 : -1; - } - return 0; - }); - }, [data, sortConfig]); - - const requestSort = (key: string) => { - let direction: "ascending" | "descending" = "ascending"; - if (sortConfig && sortConfig.key === key && sortConfig.direction === "ascending") { - direction = "descending"; - } - setSortConfig({ key, direction }); - }; - - const formatDate = (dateString: string): string => { - const date = new Date(dateString); - return new Intl.DateTimeFormat("en-US", { - dateStyle: "medium", - timeStyle: "short", - }).format(date); - }; - - const getTypeBadge = (type: string) => { - if (type === "lxc") return formattedBadge("ct"); - if (type === "vm") return formattedBadge("vm"); - return null; - }; - - // Stats calculations - const successCount = summary?.status_count.done ?? 0; - const failureCount = summary?.status_count.failed ?? 0; - const totalCount = summary?.total_entries ?? 0; - const successRate = totalCount > 0 ? (successCount / totalCount) * 100 : 0; - - const allApps = useMemo(() => { - if (!summary?.nsapp_count) return []; - return Object.entries(summary.nsapp_count).sort(([, a], [, b]) => b - a); - }, [summary]); - - const topApps = useMemo(() => { - return allApps.slice(0, 15); - }, [allApps]); - - const mostPopularApp = topApps[0]; - - // Chart Data - const appsChartData = topApps.map(([name, count], index) => ({ - app: name, - count, - fill: CHART_COLORS[index % CHART_COLORS.length], - })); - - if (error) { - return ( -
-

- Error loading data: - {error} -

-
- ); - } - - return ( -
-
-
- {/* Header */} -
-

Analytics

-

Overview of container installations and system statistics.

-
- - {/* Widgets */} -
- - - Total Created - - - -
{nf.format(totalCount)}
-

Total LXC/VM entries found

-
-
- - - - Success Rate - - - -
{successRate.toFixed(1)}%
-

{nf.format(successCount)} successful installations

-
-
- - - - Failures - - - -
{nf.format(failureCount)}
-

Installations encountered errors

-
-
- - - - Most Popular - - - -
{mostPopularApp ? mostPopularApp[0] : "N/A"}
-

- {mostPopularApp ? nf.format(mostPopularApp[1]) : 0} installations -

-
-
-
- - {/* Graphs */} - - -
- Top Applications - The most frequently installed applications. -
- - - - - - - Application Statistics - Installation counts for all {allApps.length} applications. - - -
- {allApps.map(([name, count], index) => ( -
-
- {index + 1}. - {name} -
- {nf.format(count)} -
- ))} -
-
-
-
-
- -
- {summaryLoading ? ( -
- -
- ) : ( - - - - (value.length > 8 ? `${value.slice(0, 8)}...` : value)} - /> - } /> - - {appsChartData.map((entry, index) => ( - - ))} - - - - - )} -
-
-
- - {/* Data Table */} - - -
- Installation Log - Detailed records of all container creation attempts. -
-
- -
-
- -
- - - - requestSort("status")}> - Status - {sortConfig?.key === "status" && } - - requestSort("type")}> - Type - {sortConfig?.key === "type" && } - - requestSort("nsapp")}> - Application - {sortConfig?.key === "nsapp" && } - - requestSort("os_type")}> - OS - {sortConfig?.key === "os_type" && } - - requestSort("disk_size")} - > - Disk Size - {sortConfig?.key === "disk_size" && } - - requestSort("core_count")} - > - Core Count - {sortConfig?.key === "core_count" && } - - requestSort("ram_size")} - > - RAM Size - {sortConfig?.key === "ram_size" && } - - requestSort("created_at")}> - Created At - {sortConfig?.key === "created_at" && } - - - - - {dataLoading ? ( - - -
- Loading data... -
-
-
- ) : sortedData.length > 0 ? ( - sortedData.map((item, idx) => ( - - - {item.status === "done" ? ( - Success - ) : item.status === "failed" ? ( - - - - Failed - - -

Error:

-

{item.error || "Unknown error"}

-
-
-
- ) : item.status === "installing" ? ( - Installing - ) : ( - {item.status} - )} -
- - {getTypeBadge(item.type) || {item.type}} - - {item.nsapp} - - {item.os_type} {item.os_version} - - - {item.disk_size} - GB - - {item.core_count} - - {item.ram_size} - MB - - {formatDate(item.created_at)} -
- )) - ) : ( - - - No results found. - - - )} -
-
-
- -
- -
Page {currentPage}
- -
-
-
-
-
-
- ); -} diff --git a/frontend/src/app/favicon.ico b/frontend/src/app/favicon.ico deleted file mode 100644 index 2ef326ee6..000000000 Binary files a/frontend/src/app/favicon.ico and /dev/null differ diff --git a/frontend/src/app/json-editor/_components/categories.tsx b/frontend/src/app/json-editor/_components/categories.tsx deleted file mode 100644 index 3d8b9f0dd..000000000 --- a/frontend/src/app/json-editor/_components/categories.tsx +++ /dev/null @@ -1,125 +0,0 @@ -import type { z } from "zod"; - -import { memo } from "react"; - -import type { Category } from "@/lib/types"; - -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from "@/components/ui/select"; -import { Label } from "@/components/ui/label"; -import { cn } from "@/lib/utils"; - -import type { Script } from "../_schemas/schemas"; - -type CategoryProps = { - script: Script; - setScript: (script: Script) => void; - setIsValid: (isValid: boolean) => void; - setZodErrors: (zodErrors: z.ZodError | null) => void; - categories: Category[]; -}; - -const CategoryTag = memo(({ - category, - onRemove, -}: { - category: Category; - onRemove: () => void; -}) => ( - - {category.name} - - -)); - -CategoryTag.displayName = "CategoryTag"; - -function Categories({ - script, - setScript, - categories, -}: Omit) { - const addCategory = (categoryId: number) => { - setScript({ - ...script, - categories: [...new Set([...script.categories, categoryId])], - }); - }; - - const removeCategory = (categoryId: number) => { - setScript({ - ...script, - categories: script.categories.filter((id: number) => id !== categoryId), - }); - }; - - const categoryMap = new Map(categories.map(c => [c.id, c])); - - return ( -
- - -
- {script.categories.map((categoryId) => { - const category = categoryMap.get(categoryId); - return category - ? ( - removeCategory(categoryId)} - /> - ) - : null; - })} -
-
- ); -} - -export default memo(Categories); diff --git a/frontend/src/app/json-editor/_components/install-method.tsx b/frontend/src/app/json-editor/_components/install-method.tsx deleted file mode 100644 index b7bf9439b..000000000 --- a/frontend/src/app/json-editor/_components/install-method.tsx +++ /dev/null @@ -1,233 +0,0 @@ -import type { z } from "zod"; - -import { PlusCircle, Trash2 } from "lucide-react"; -import { memo, useCallback, useRef } from "react"; - -import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"; -import { OperatingSystems } from "@/config/site-config"; -import { Button } from "@/components/ui/button"; -import { Input } from "@/components/ui/input"; - -import type { Script } from "../_schemas/schemas"; - -import { InstallMethodSchema, ScriptSchema } from "../_schemas/schemas"; - -type InstallMethodProps = { - script: Script; - setScript: (value: Script | ((prevState: Script) => Script)) => void; - setIsValid: (isValid: boolean) => void; - setZodErrors: (zodErrors: z.ZodError | null) => void; -}; - -function InstallMethod({ script, setScript, setIsValid, setZodErrors }: InstallMethodProps) { - const cpuRefs = useRef<(HTMLInputElement | null)[]>([]); - const ramRefs = useRef<(HTMLInputElement | null)[]>([]); - const hddRefs = useRef<(HTMLInputElement | null)[]>([]); - - const addInstallMethod = useCallback(() => { - setScript((prev) => { - const { type, slug } = prev; - const newMethodType = "default"; - - let scriptPath = ""; - - if (type === "pve") { - scriptPath = `tools/pve/${slug}.sh`; - } - else if (type === "addon") { - scriptPath = `tools/addon/${slug}.sh`; - } - else { - scriptPath = `${type}/${slug}.sh`; - } - - const method = InstallMethodSchema.parse({ - type: newMethodType, - script: scriptPath, - resources: { - cpu: null, - ram: null, - hdd: null, - os: null, - version: null, - }, - }); - - return { - ...prev, - install_methods: [...prev.install_methods, method], - }; - }); - }, [setScript]); - - const updateInstallMethod = useCallback( - ( - index: number, - key: keyof Script["install_methods"][number], - value: Script["install_methods"][number][keyof Script["install_methods"][number]], - ) => { - setScript((prev) => { - const updatedMethods = prev.install_methods.map((method, i) => { - if (i === index) { - const updatedMethod = { ...method, [key]: value }; - - if (key === "type") { - updatedMethod.script - = value === "alpine" ? `${prev.type}/alpine-${prev.slug}.sh` : `${prev.type}/${prev.slug}.sh`; - - // Set OS to Alpine and reset version if type is alpine - if (value === "alpine") { - updatedMethod.resources.os = "Alpine"; - updatedMethod.resources.version = null; - } - } - - return updatedMethod; - } - return method; - }); - - const updated = { - ...prev, - install_methods: updatedMethods, - }; - - const result = ScriptSchema.safeParse(updated); - setIsValid(result.success); - if (!result.success) { - setZodErrors(result.error); - } - else { - setZodErrors(null); - } - return updated; - }); - }, - [setScript, setIsValid, setZodErrors], - ); - - const removeInstallMethod = useCallback( - (index: number) => { - setScript(prev => ({ - ...prev, - install_methods: prev.install_methods.filter((_, i) => i !== index), - })); - }, - [setScript], - ); - - return ( - <> -

Install Methods

- {script.install_methods.map((method, index) => ( -
- -
- { - cpuRefs.current[index] = el; - }} - placeholder="CPU in Cores" - type="number" - value={method.resources.cpu || ""} - onChange={e => - updateInstallMethod(index, "resources", { - ...method.resources, - cpu: e.target.value ? Number(e.target.value) : null, - })} - /> - { - ramRefs.current[index] = el; - }} - placeholder="RAM in MB" - type="number" - value={method.resources.ram || ""} - onChange={e => - updateInstallMethod(index, "resources", { - ...method.resources, - ram: e.target.value ? Number(e.target.value) : null, - })} - /> - { - hddRefs.current[index] = el; - }} - placeholder="HDD in GB" - type="number" - value={method.resources.hdd || ""} - onChange={e => - updateInstallMethod(index, "resources", { - ...method.resources, - hdd: e.target.value ? Number(e.target.value) : null, - })} - /> -
-
- - -
- -
- ))} - - - ); -} - -export default memo(InstallMethod); diff --git a/frontend/src/app/json-editor/_components/note.tsx b/frontend/src/app/json-editor/_components/note.tsx deleted file mode 100644 index 4bb2b20ec..000000000 --- a/frontend/src/app/json-editor/_components/note.tsx +++ /dev/null @@ -1,159 +0,0 @@ -import type { z } from "zod"; - -import { PlusCircle, Trash2 } from "lucide-react"; -import { memo, useCallback, useRef } from "react"; - -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from "@/components/ui/select"; -import { AlertColors } from "@/config/site-config"; -import { Button } from "@/components/ui/button"; -import { Input } from "@/components/ui/input"; -import { cn } from "@/lib/utils"; - -import type { Script } from "../_schemas/schemas"; - -import { ScriptSchema } from "../_schemas/schemas"; - -const NoteItem = memo( - ({ - note, - index, - updateNote, - removeNote, - }: { - note: Script["notes"][number]; - index: number; - updateNote: (index: number, key: keyof Script["notes"][number], value: string) => void; - removeNote: (index: number) => void; - }) => { - const inputRef = useRef(null); - - const handleTextChange = useCallback((e: React.ChangeEvent) => { - updateNote(index, "text", e.target.value); - setTimeout(() => { - inputRef.current?.focus(); - }, 0); - }, [index, updateNote]); - - return ( -
- - - -
- ); - }, -); - -type NoteProps = { - script: Script; - setScript: (script: Script) => void; - setIsValid: (isValid: boolean) => void; - setZodErrors: (zodErrors: z.ZodError | null) => void; -}; - -function Note({ - script, - setScript, - setIsValid, - setZodErrors, -}: NoteProps) { - const inputRefs = useRef<(HTMLInputElement | null)[]>([]); - - const addNote = useCallback(() => { - setScript({ - ...script, - notes: [...script.notes, { text: "", type: "info" }], - }); - }, [script, setScript]); - - const updateNote = useCallback(( - index: number, - key: keyof Script["notes"][number], - value: string, - ) => { - const updated: Script = { - ...script, - notes: script.notes.map((note, i) => - i === index ? { ...note, [key]: value } : note, - ), - }; - const result = ScriptSchema.safeParse(updated); - setIsValid(result.success); - setZodErrors(result.success ? null : result.error); - setScript(updated); - // Restore focus after state update - if (key === "text") { - setTimeout(() => { - inputRefs.current[index]?.focus(); - }, 0); - } - }, [script, setScript, setIsValid, setZodErrors]); - - const removeNote = useCallback((index: number) => { - setScript({ - ...script, - notes: script.notes.filter((_, i) => i !== index), - }); - }, [script, setScript]); - - return ( - <> -

Notes

- {script.notes.map((note, index) => ( - - ))} - - - ); -} - -NoteItem.displayName = "NoteItem"; - -export default memo(Note); diff --git a/frontend/src/app/json-editor/_schemas/schemas.ts b/frontend/src/app/json-editor/_schemas/schemas.ts deleted file mode 100644 index 8b803d2b9..000000000 --- a/frontend/src/app/json-editor/_schemas/schemas.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { z } from "zod"; -import { AlertColors } from "@/config/site-config"; - -export const InstallMethodSchema = z.object({ - type: z.enum(["default", "alpine"], { - message: "Type must be either 'default' or 'alpine'", - }), - script: z.string().min(1, "Script content cannot be empty"), - resources: z.object({ - cpu: z.number().nullable(), - ram: z.number().nullable(), - hdd: z.number().nullable(), - os: z.string().nullable(), - version: z.string().nullable(), - }), -}); - -const NoteSchema = z.object({ - text: z.string().min(1, "Note text cannot be empty"), - type: z.enum(Object.keys(AlertColors) as [keyof typeof AlertColors, ...(keyof typeof AlertColors)[]], { - message: `Type must be one of: ${Object.keys(AlertColors).join(", ")}`, - }), -}); - -export const ScriptSchema = z.object({ - name: z.string().min(1, "Name is required"), - slug: z.string().min(1, "Slug is required"), - categories: z.array(z.number()), - date_created: z.string().regex(/^\d{4}-\d{2}-\d{2}$/, "Date must be in YYYY-MM-DD format").min(1, "Date is required"), - type: z.enum(["vm", "ct", "pve", "addon", "turnkey"], { - message: "Type must be either 'vm', 'ct', 'pve', 'addon' or 'turnkey'", - }), - updateable: z.boolean(), - privileged: z.boolean(), - interface_port: z.number().nullable(), - documentation: z.string().nullable(), - website: z.url().nullable(), - logo: z.url().nullable(), - config_path: z.string(), - description: z.string().min(1, "Description is required"), - disable: z.boolean().optional(), - disable_description: z.string().optional(), - install_methods: z.array(InstallMethodSchema).min(1, "At least one install method is required"), - default_credentials: z.object({ - username: z.string().nullable(), - password: z.string().nullable(), - }), - notes: z.array(NoteSchema).optional().default([]), -}).refine((data) => { - if (data.disable === true && !data.disable_description) { - return false; - } - return true; -}, { - message: "disable_description is required when disable is true", - path: ["disable_description"], -}); - -export type Script = z.infer; diff --git a/frontend/src/app/json-editor/page.tsx b/frontend/src/app/json-editor/page.tsx deleted file mode 100644 index 48e6f2a8f..000000000 --- a/frontend/src/app/json-editor/page.tsx +++ /dev/null @@ -1,590 +0,0 @@ -"use client"; - -import type { z } from "zod"; - -import { githubGist, nord } from "react-syntax-highlighter/dist/esm/styles/hljs"; -import { CalendarIcon, Check, Clipboard, Download } from "lucide-react"; -import { useCallback, useEffect, useMemo, useState } from "react"; -import SyntaxHighlighter from "react-syntax-highlighter"; -import { useTheme } from "next-themes"; -import { format } from "date-fns"; -import { toast } from "sonner"; -import Image from "next/image"; - -import type { Category } from "@/lib/types"; - -import { DropdownMenu, DropdownMenuContent, DropdownMenuGroup, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu"; -import { Dialog, DialogContent, DialogDescription, DialogHeader, DialogTitle, DialogTrigger } from "@/components/ui/dialog"; -import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"; -import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover"; -import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; -import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert"; -import { Calendar } from "@/components/ui/calendar"; -import { Textarea } from "@/components/ui/textarea"; -import { Button } from "@/components/ui/button"; -import { Switch } from "@/components/ui/switch"; -import { basePath } from "@/config/site-config"; -import { Input } from "@/components/ui/input"; -import { Label } from "@/components/ui/label"; -import { fetchCategories } from "@/lib/data"; -import { cn } from "@/lib/utils"; - -import type { Script } from "./_schemas/schemas"; - -import { ScriptItem } from "../scripts/_components/script-item"; -import InstallMethod from "./_components/install-method"; -import { ScriptSchema } from "./_schemas/schemas"; -import Categories from "./_components/categories"; -import Note from "./_components/note"; - -function search(scripts: Script[], query: string): Script[] { - const queryLower = query.toLowerCase().trim(); - const searchWords = queryLower.split(/\s+/).filter(Boolean); - - return scripts - .map((script) => { - const nameLower = script.name.toLowerCase(); - const descriptionLower = (script.description || "").toLowerCase(); - - let score = 0; - - for (const word of searchWords) { - if (nameLower.includes(word)) { - score += 10; - } - if (descriptionLower.includes(word)) { - score += 5; - } - } - - return { script, score }; - }) - .filter(({ score }) => score > 0) - .sort((a, b) => b.score - a.score) - .slice(0, 20) - .map(({ script }) => script); -} - -const initialScript: Script = { - name: "", - slug: "", - categories: [], - date_created: format(new Date(), "yyyy-MM-dd"), - type: "ct", - updateable: false, - privileged: false, - interface_port: null, - documentation: null, - config_path: "", - website: null, - logo: null, - description: "", - disable: undefined, - disable_description: undefined, - install_methods: [], - default_credentials: { - username: null, - password: null, - }, - notes: [], -}; - -export default function JSONGenerator() { - const { theme } = useTheme(); - const [script, setScript] = useState' /opt/drawdb/dist/index.html +msg_ok "Applied Polyfill" + +msg_info "Configuring Nginx" +cat </etc/nginx/conf.d/drawdb.conf +server { + listen 3000; + server_name _; + root /opt/drawdb/dist; + + location / { + try_files \$uri /index.html; + } +} +EOF +rm -f /etc/nginx/sites-enabled/default +systemctl enable -q --now nginx +systemctl reload nginx +msg_ok "Configured Nginx" + +motd_ssh +customize +cleanup_lxc diff --git a/install/elementsynapse-install.sh b/install/elementsynapse-install.sh index 2d4523589..226b965b3 100644 --- a/install/elementsynapse-install.sh +++ b/install/elementsynapse-install.sh @@ -43,6 +43,24 @@ SECRET=$(openssl rand -hex 32) ADMIN_PASS="$(openssl rand -base64 18 | cut -c1-13)" echo "enable_registration_without_verification: true" >>/etc/matrix-synapse/homeserver.yaml echo "registration_shared_secret: ${SECRET}" >>/etc/matrix-synapse/homeserver.yaml + +cat <>/etc/matrix-synapse/homeserver.yaml + +# MatrixRTC / Element Call configuration +experimental_features: + msc3266_enabled: true + msc4222_enabled: true + +max_event_delay_duration: 24h + +rc_message: + per_second: 0.5 + burst_count: 30 + +rc_delayed_event_mgmt: + per_second: 1 + burst_count: 20 +EOF systemctl enable -q --now matrix-synapse $STD register_new_matrix_user -a --user admin --password "$ADMIN_PASS" --config /etc/matrix-synapse/homeserver.yaml { diff --git a/install/emby-install.sh b/install/emby-install.sh index 73ed6143c..e7c29fbac 100644 --- a/install/emby-install.sh +++ b/install/emby-install.sh @@ -13,17 +13,9 @@ setting_up_container network_check update_os -setup_hwaccel - fetch_and_deploy_gh_release "emby" "MediaBrowser/Emby.Releases" "binary" -msg_info "Configuring Emby" -if [[ "$CTTYPE" == "0" ]]; then - sed -i -e 's/^ssl-cert:x:104:$/render:x:104:root,emby/' -e 's/^render:x:108:root,emby$/ssl-cert:x:108:/' /etc/group -else - sed -i -e 's/^ssl-cert:x:104:$/render:x:104:emby/' -e 's/^render:x:108:emby$/ssl-cert:x:108:/' /etc/group -fi -msg_ok "Configured Emby" +setup_hwaccel "emby" motd_ssh customize diff --git a/install/endurain-install.sh b/install/endurain-install.sh index b375880e6..2ec89049c 100644 --- a/install/endurain-install.sh +++ b/install/endurain-install.sh @@ -3,7 +3,7 @@ # Copyright (c) 2021-2026 community-scripts ORG # Author: johanngrobe # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/joaovitoriasilva/endurain +# Source: https://codeberg.org/endurain-project/endurain source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" color @@ -21,7 +21,7 @@ PYTHON_VERSION="3.13" setup_uv NODE_VERSION="24" setup_nodejs PG_VERSION="17" PG_MODULES="postgis" setup_postgresql PG_DB_NAME="enduraindb" PG_DB_USER="endurain" setup_postgresql_db -fetch_and_deploy_gh_release "endurain" "endurain-project/endurain" "tarball" "latest" "/opt/endurain" +fetch_and_deploy_codeberg_release "endurain" "endurain-project/endurain" "tarball" "latest" "/opt/endurain" msg_info "Setting up Endurain" cd /opt/endurain diff --git a/install/erpnext-install.sh b/install/erpnext-install.sh new file mode 100644 index 000000000..757ef95fc --- /dev/null +++ b/install/erpnext-install.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/frappe/erpnext + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + git \ + build-essential \ + python3-dev \ + libffi-dev \ + libssl-dev \ + redis-server \ + nginx \ + supervisor \ + fail2ban \ + xvfb \ + libfontconfig1 \ + libxrender1 \ + fontconfig \ + libjpeg-dev \ + libmariadb-dev \ + python3-pip +msg_ok "Installed Dependencies" + +NODE_VERSION="24" NODE_MODULE="yarn" setup_nodejs +UV_PYTHON="3.13" setup_uv +setup_mariadb + +msg_info "Configuring MariaDB for ERPNext" +cat </etc/mysql/mariadb.conf.d/50-erpnext.cnf +[mysqld] +character-set-server=utf8mb4 +collation-server=utf8mb4_unicode_ci + +[client] +default-character-set=utf8mb4 +EOF +$STD systemctl restart mariadb +msg_ok "Configured MariaDB for ERPNext" + +msg_info "Installing wkhtmltopdf" +WKHTMLTOPDF_URL="https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6.1-3/wkhtmltox_0.12.6.1-3.bookworm_amd64.deb" +$STD curl -fsSL -o /tmp/wkhtmltox.deb "$WKHTMLTOPDF_URL" +$STD apt install -y /tmp/wkhtmltox.deb +rm -f /tmp/wkhtmltox.deb +msg_ok "Installed wkhtmltopdf" + +msg_info "Installing Frappe Bench" +useradd -m -s /bin/bash frappe +chown frappe:frappe /opt +echo "frappe ALL=(ALL) NOPASSWD:ALL" >/etc/sudoers.d/frappe +$STD sudo -u frappe bash -c 'export PATH="$HOME/.local/bin:$PATH"; uv tool install frappe-bench' +msg_ok "Installed Frappe Bench" + +msg_info "Initializing Frappe Bench" +ADMIN_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) +DB_ROOT_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) +mysql -u root -e "ALTER USER 'root'@'localhost' IDENTIFIED BY '${DB_ROOT_PASS}'; FLUSH PRIVILEGES;" +$STD sudo -u frappe bash -c 'export PATH="$HOME/.local/bin:$PATH"; cd /opt && bench init --frappe-branch version-15 frappe-bench' +$STD sudo -u frappe bash -c 'export PATH="$HOME/.local/bin:$PATH"; cd /opt/frappe-bench && bench get-app erpnext --branch version-15' + +msg_info "Starting Redis Services for Site Setup" +$STD sudo -u frappe bash -c 'redis-server /opt/frappe-bench/config/redis_queue.conf --daemonize yes' +$STD sudo -u frappe bash -c 'redis-server /opt/frappe-bench/config/redis_cache.conf --daemonize yes' +sleep 3 +msg_ok "Started Redis Services for Site Setup" + +$STD sudo -u frappe bash -c "export PATH=\"\$HOME/.local/bin:\$PATH\"; cd /opt/frappe-bench && bench new-site site1.local --db-root-username root --db-root-password \"$DB_ROOT_PASS\" --admin-password \"$ADMIN_PASS\" --install-app erpnext --set-default" +msg_ok "Initialized Frappe Bench" + +msg_info "Configuring ERPNext" +cat </opt/frappe-bench/.env +ADMIN_PASSWORD=${ADMIN_PASS} +DB_ROOT_PASSWORD=${DB_ROOT_PASS} +SITE_NAME=site1.local +EOF +{ + echo "ERPNext Credentials" + echo "==================" + echo "Admin Username: Administrator" + echo "Admin Password: ${ADMIN_PASS}" + echo "DB Root Password: ${DB_ROOT_PASS}" + echo "Site Name: site1.local" +} >~/erpnext.creds +$STD systemctl enable --now redis-server +msg_ok "Configured ERPNext" + +msg_info "Setting up Production" +BENCH_PY="/home/frappe/.local/share/uv/tools/frappe-bench/bin/python" +$STD sudo -u frappe bash -c "curl -fsSL https://bootstrap.pypa.io/get-pip.py | \"${BENCH_PY}\"" +$STD sudo -u frappe bash -c 'export PATH="$HOME/.local/bin:$PATH"; uv tool install ansible' +ln -sf /home/frappe/.local/bin/ansible* /usr/local/bin/ +$STD bash -c 'export PATH="/home/frappe/.local/bin:$PATH"; cd /opt/frappe-bench && bench setup production frappe --yes' +ln -sf /opt/frappe-bench/config/supervisor.conf /etc/supervisor/conf.d/frappe-bench.conf +$STD supervisorctl reread +$STD supervisorctl update +$STD systemctl enable --now supervisor +msg_ok "Set up Production" + +motd_ssh +customize +cleanup_lxc diff --git a/install/fileflows-install.sh b/install/fileflows-install.sh index 0043a0edd..b7b2aceef 100644 --- a/install/fileflows-install.sh +++ b/install/fileflows-install.sh @@ -5,7 +5,6 @@ # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE # Source: https://fileflows.com/ -# Import Functions und Setup source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" color verb_ip6 @@ -17,6 +16,7 @@ update_os msg_info "Installing Dependencies" $STD apt install -y \ ffmpeg \ + pciutils \ imagemagick msg_ok "Installed Dependencies" @@ -33,13 +33,26 @@ msg_ok "Installed ASP.NET Core Runtime" fetch_and_deploy_from_url "https://fileflows.com/downloads/zip" "/opt/fileflows" -msg_info "Setup FileFlows" $STD ln -svf /usr/bin/ffmpeg /usr/local/bin/ffmpeg $STD ln -svf /usr/bin/ffprobe /usr/local/bin/ffprobe -cd /opt/fileflows/Server -dotnet FileFlows.Server.dll --systemd install --root true -systemctl enable -q --now fileflows -msg_ok "Setup FileFlows" +$STD rm -rf /opt/fileflows/Server/runtimes/win-* + +read -r -p "${TAB3}Do you want to install FileFlows Server or Node? (S/N): " install_server + +if [[ "$install_server" =~ ^[Ss]$ ]]; then + msg_info "Installing FileFlows Server" + cd /opt/fileflows/Server + $STD dotnet FileFlows.Server.dll --systemd install --root true + systemctl enable -q --now fileflows + msg_ok "Installed FileFlows Server" +else + msg_info "Installing FileFlows Node" + cd /opt/fileflows/Node + $STD dotnet FileFlows.Node.dll + $STD dotnet FileFlows.Node.dll --systemd install --root true + systemctl enable -q --now fileflows-node + msg_ok "Installed FileFlows Node" +fi motd_ssh customize diff --git a/install/fireshare-install.sh b/install/fireshare-install.sh new file mode 100644 index 000000000..982679c6e --- /dev/null +++ b/install/fireshare-install.sh @@ -0,0 +1,174 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Slaviša Arežina (tremor021) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/ShaneIsrael/fireshare + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os +setup_hwaccel + +msg_info "Installing Dependencies" +$STD apt install -y \ + git \ + build-essential \ + cmake \ + pkg-config \ + yasm \ + nasm \ + libx264-dev \ + libx265-dev \ + libvpx-dev \ + libaom-dev \ + libopus-dev \ + libvorbis-dev \ + libass-dev \ + libfreetype6-dev \ + libmp3lame-dev \ + nginx-extras \ + supervisor \ + libldap2-dev \ + libsasl2-dev \ + libssl-dev \ + libffi-dev \ + libc-dev +msg_ok "Installed Dependencies" + +NODE_VERSION=24 setup_nodejs +PYTHON_VERSION=3.14 setup_uv + +fetch_and_deploy_gh_release "fireshare" "ShaneIsrael/fireshare" "tarball" + +msg_info "Compiling SVT-AV1 (Patience)" +cd /tmp +$STD git clone --depth 1 --branch v1.8.0 https://gitlab.com/AOMediaCodec/SVT-AV1.git +cd SVT-AV1/Build +$STD cmake .. -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release +$STD make -j$(nproc) +$STD make install +msg_ok "Compiled SVT-AV1" + +msg_info "Installing NVDEC headers" +cd /tmp +$STD git clone --depth 1 --branch n12.1.14.0 https://github.com/FFmpeg/nv-codec-headers.git +cd nv-codec-headers +$STD make install +$STD ldconfig +msg_ok "Installed NVDEC headers" + +msg_info "Compiling ffmpeg (Patience)" +cd /tmp +curl -fsSL https://ffmpeg.org/releases/ffmpeg-6.1.tar.xz -o "ffmpeg-6.1.tar.xz" +$STD tar -xf ffmpeg-6.1.tar.xz +cd ffmpeg-6.1 +$STD ./configure \ + --prefix=/usr/local \ + --enable-gpl \ + --enable-version3 \ + --enable-nonfree \ + --enable-ffnvcodec \ + --enable-libx264 \ + --enable-libx265 \ + --enable-libvpx \ + --enable-libaom \ + --enable-libopus \ + --enable-libvorbis \ + --enable-libmp3lame \ + --enable-libass \ + --enable-libfreetype \ + --enable-libsvtav1 \ + --disable-debug \ + --disable-doc +$STD make -j$(nproc) +$STD make install +$STD ldconfig +msg_ok "Compiled ffmpeg" + +msg_info "Configuring Fireshare (Patience)" +ADMIN_PASSWORD=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) +SECRET=$(openssl rand -base64 48) +mkdir -p /opt/fireshare-{data,videos,images,processed} +cd /opt/fireshare +$STD uv venv +$STD .venv/bin/python -m ensurepip --upgrade +$STD .venv/bin/python -m pip install --upgrade --break-system-packages pip +ln -sf /usr/local/bin/ffmpeg /usr/bin/ffmpeg +ln -sf /usr/local/bin/ffprobe /usr/bin/ffprobe +echo "/usr/local/lib" >/etc/ld.so.conf.d/usr-local.conf +echo "/usr/local/cuda/lib64" >>/etc/ld.so.conf.d/usr-local.conf +echo "/usr/local/nvidia/lib" >>/etc/ld.so.conf.d/nvidia.conf +echo "/usr/local/nvidia/lib64" >>/etc/ld.so.conf.d/nvidia.conf +ldconfig +$STD .venv/bin/python -m pip install --no-cache-dir --break-system-packages --ignore-installed app/server +cp .venv/bin/fireshare /usr/local/bin/fireshare +export FLASK_APP="/opt/fireshare/app/server/fireshare:create_app()" +export DATA_DIRECTORY=/opt/fireshare-data +export IMAGE_DIRECTORY=/opt/fireshare-images +export VIDEO_DIRECTORY=/opt/fireshare-videos +export PROCESSED_DIRECTORY=/opt/fireshare-processed +$STD uv run flask db upgrade + +cat </opt/fireshare/fireshare.env +FLASK_APP="/opt/fireshare/app/server/fireshare:create_app()" +DOMAIN= +ENVIRONMENT=production +DATA_DIRECTORY=/opt/fireshare-data +IMAGE_DIRECTORY=/opt/fireshare-images +VIDEO_DIRECTORY=/opt/fireshare-videos +PROCESSED_DIRECTORY=/opt/fireshare-processed +TEMPLATE_PATH=/opt/fireshare/app/server/fireshare/templates +SECRET_KEY=${SECRET} +ADMIN_PASSWORD=${ADMIN_PASSWORD} +TZ=UTC +LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/lib:/usr/local/cuda/lib64:\$LD_LIBRARY_PATH +PATH=/usr/local/bin:$PATH +ENABLE_TRANSCODING= +TRANSCODE_GPU= +NVIDIA_DRIVER_CAPABILITIES= +EOF + +cd /opt/fireshare/app/client +$STD npm install +$STD npm run build +systemctl stop nginx +cp /opt/fireshare/app/nginx/prod.conf /etc/nginx/nginx.conf +sed -i 's|root /processed/|root /opt/fireshare-processed/|g' /etc/nginx/nginx.conf +sed -i 's/^user[[:space:]]\+nginx;/user root;/' /etc/nginx/nginx.conf +sed -i 's|root[[:space:]]\+/app/build;|root /opt/fireshare/app/client/build;|' /etc/nginx/nginx.conf +systemctl start nginx + +cat <~/fireshare.creds +Fireshare Admin Credentials +======================== +Username: admin +Password: ${ADMIN_PASSWORD} +EOF +msg_ok "Configured Fireshare" + +msg_info "Creating services" +cat </etc/systemd/system/fireshare.service +[Unit] +Description=Fireshare Service +After=network.target + +[Service] +WorkingDirectory=/opt/fireshare/app/server +ExecStart=/opt/fireshare/.venv/bin/gunicorn --bind=127.0.0.1:5000 "fireshare:create_app(init_schedule=True)" --workers 3 --threads 3 --preload +Restart=always +EnvironmentFile=/opt/fireshare/fireshare.env + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now fireshare +msg_ok "Created services" + +motd_ssh +customize +cleanup_lxc diff --git a/install/flaresolverr-install.sh b/install/flaresolverr-install.sh index da6ff0287..556427526 100644 --- a/install/flaresolverr-install.sh +++ b/install/flaresolverr-install.sh @@ -29,7 +29,9 @@ setup_deb822_repo \ $STD apt update $STD apt install -y google-chrome-stable # remove google-chrome.list added by google-chrome-stable -rm /etc/apt/sources.list.d/google-chrome.list +if [ -f /etc/apt/sources.list.d/google-chrome.list ]; then + rm /etc/apt/sources.list.d/google-chrome.list +fi msg_ok "Installed Chrome" fetch_and_deploy_gh_release "flaresolverr" "FlareSolverr/FlareSolverr" "prebuild" "latest" "/opt/flaresolverr" "flaresolverr_linux_x64.tar.gz" diff --git a/install/flowiseai-install.sh b/install/flowiseai-install.sh index bf47ab06e..81c40b827 100644 --- a/install/flowiseai-install.sh +++ b/install/flowiseai-install.sh @@ -13,10 +13,10 @@ setting_up_container network_check update_os -NODE_VERSION="20" setup_nodejs +NODE_VERSION="20" NODE_MODULE="pnpm" setup_nodejs msg_info "Installing FlowiseAI (Patience)" -$STD npm install -g flowise \ +$STD pnpm add -g flowise \ @opentelemetry/exporter-trace-otlp-grpc \ @opentelemetry/exporter-trace-otlp-proto \ @opentelemetry/sdk-trace-node \ @@ -33,7 +33,7 @@ After=network.target [Service] EnvironmentFile=/opt/flowiseai/.env -ExecStart=npx flowise start +ExecStart=flowise start Restart=always [Install] diff --git a/install/foldergram-install.sh b/install/foldergram-install.sh new file mode 100644 index 000000000..895a619a8 --- /dev/null +++ b/install/foldergram-install.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Slaviša Arežina (tremor021) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/foldergram/foldergram + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y --no-install-recommends ffmpeg +msg_ok "Installed Dependencies" + +NODE_VERSION=25 NODE_MODULE="corepack" setup_nodejs + +fetch_and_deploy_gh_release "foldergram" "foldergram/foldergram" "tarball" + +msg_info "Configuring Foldergram" +export COREPACK_ENABLE_DOWNLOAD_PROMPT=0 +$STD corepack enable +cd /opt/foldergram +$STD pnpm install +$STD pnpm run build +mkdir -p /opt/foldergram_media +cat </opt/foldergram_media/foldergram.env +NODE_ENV=production +SERVER_PORT=4141 +DATA_ROOT=/opt/foldergram_media +GALLERY_ROOT=/opt/foldergram_media/gallery +DB_DIR=/opt/foldergram_media/db +THUMBNAILS_DIR=/opt/foldergram_media/thumbnails +PREVIEWS_DIR=/opt/foldergram_media/previews +IMAGE_DETAIL_SOURCE=preview +DERIVATIVE_MODE=eager +GALLERY_EXCLUDED_FOLDERS= +EOF +msg_ok "Configured Foldergram" + +msg_info "Creating services" +cat </etc/systemd/system/foldergram.service +[Unit] +Description=Foldergram Service +After=network.target + +[Service] +WorkingDirectory=/opt/foldergram +ExecStart=/usr/bin/pnpm start +Restart=always +EnvironmentFile=/opt/foldergram_media/foldergram.env + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now foldergram +msg_ok "Created services" + +motd_ssh +customize +cleanup_lxc diff --git a/install/frigate-install.sh b/install/frigate-install.sh index 231258384..777ea893c 100644 --- a/install/frigate-install.sh +++ b/install/frigate-install.sh @@ -110,7 +110,7 @@ export AUTOGRAPH_VERBOSITY=0 export GLOG_minloglevel=3 export GLOG_logtostderr=0 -fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "v0.17.0" "/opt/frigate" +fetch_and_deploy_gh_release "frigate" "blakeblackshear/frigate" "tarball" "v0.17.1" "/opt/frigate" msg_info "Building Nginx" $STD bash /opt/frigate/docker/main/build_nginx.sh @@ -146,7 +146,7 @@ ldconfig msg_ok "Built libUSB" msg_info "Bootstrapping pip" -wget -q https://bootstrap.pypa.io/get-pip.py -O /tmp/get-pip.py +curl_with_retry "https://bootstrap.pypa.io/get-pip.py" "/tmp/get-pip.py" sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' /tmp/get-pip.py $STD python3 /tmp/get-pip.py "pip" rm -f /tmp/get-pip.py @@ -169,26 +169,54 @@ NODE_VERSION="20" setup_nodejs msg_info "Downloading Inference Models" mkdir -p /models /openvino-model -wget -q -O /edgetpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite -wget -q -O /models/cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite +curl_with_retry "https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite" "/edgetpu_model.tflite" +curl_with_retry "https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite" "/models/cpu_model.tflite" cp /opt/frigate/labelmap.txt /labelmap.txt msg_ok "Downloaded Inference Models" msg_info "Downloading Audio Model" -wget -q -O /tmp/yamnet.tar.gz https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download +curl_with_retry "https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download" "/tmp/yamnet.tar.gz" $STD tar xzf /tmp/yamnet.tar.gz -C / mv /1.tflite /cpu_audio_model.tflite cp /opt/frigate/audio-labelmap.txt /audio-labelmap.txt rm -f /tmp/yamnet.tar.gz msg_ok "Downloaded Audio Model" +msg_info "Installing OpenVino" +$STD pip3 install -r /opt/frigate/docker/main/requirements-ov.txt +msg_ok "Installed OpenVino" + +msg_info "Building OpenVino Model" +cd /models +curl_with_retry "http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz" "ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz" +$STD tar -zxf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz --no-same-owner +if python3 /opt/frigate/docker/main/build_ov_model.py &>/dev/null; then + mkdir -p /openvino-model + cp /models/ssdlite_mobilenet_v2.xml /openvino-model/ + cp /models/ssdlite_mobilenet_v2.bin /openvino-model/ + OV_LABELS=$(python3 -c "import omz_tools; import os; print(os.path.join(omz_tools.__path__[0], 'data/dataset_classes/coco_91cl_bkgr.txt'))" 2>/dev/null) + if [[ -n "$OV_LABELS" && -f "$OV_LABELS" ]]; then + ln -sf "$OV_LABELS" /openvino-model/coco_91cl_bkgr.txt + else + OV_LABELS=$(find /usr/local/lib -name "coco_91cl_bkgr.txt" 2>/dev/null | head -1) + if [[ -n "$OV_LABELS" ]]; then + ln -sf "$OV_LABELS" /openvino-model/coco_91cl_bkgr.txt + else + curl_with_retry "https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/data/dataset_classes/coco_91cl_bkgr.txt" "/openvino-model/coco_91cl_bkgr.txt" + fi + fi + sed -i 's/truck/car/g' /openvino-model/coco_91cl_bkgr.txt + msg_ok "Built OpenVino Model" +else + msg_warn "OpenVino build failed (CPU may not support required instructions). Frigate will use CPU model." +fi + msg_info "Installing HailoRT Runtime" $STD bash /opt/frigate/docker/main/install_hailort.sh cp -a /opt/frigate/docker/main/rootfs/. / sed -i '/^.*unset DEBIAN_FRONTEND.*$/d' /opt/frigate/docker/main/install_deps.sh echo "libedgetpu1-max libedgetpu/accepted-eula boolean true" | debconf-set-selections echo "libedgetpu1-max libedgetpu/install-confirm-max boolean true" | debconf-set-selections -# Allow Frigate's Intel media packages to overwrite files from system GPU driver packages echo 'force-overwrite' >/etc/dpkg/dpkg.cfg.d/force-overwrite $STD bash /opt/frigate/docker/main/install_deps.sh rm -f /etc/dpkg/dpkg.cfg.d/force-overwrite @@ -200,24 +228,6 @@ msg_info "Installing MemryX Runtime" $STD bash /opt/frigate/docker/main/install_memryx.sh msg_ok "Installed MemryX Runtime" -msg_info "Installing OpenVino" -$STD pip3 install -r /opt/frigate/docker/main/requirements-ov.txt -msg_ok "Installed OpenVino" - -msg_info "Building OpenVino Model" -cd /models -wget -q http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz -$STD tar -zxf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz --no-same-owner -if python3 /opt/frigate/docker/main/build_ov_model.py &>/dev/null; then - cp /models/ssdlite_mobilenet_v2.xml /openvino-model/ - cp /models/ssdlite_mobilenet_v2.bin /openvino-model/ - wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O /openvino-model/coco_91cl_bkgr.txt - sed -i 's/truck/car/g' /openvino-model/coco_91cl_bkgr.txt - msg_ok "Built OpenVino Model" -else - msg_warn "OpenVino build failed (CPU may not support required instructions). Frigate will use CPU model." -fi - msg_info "Building Frigate Application (Patience)" cd /opt/frigate $STD pip3 install -r /opt/frigate/docker/main/requirements-dev.txt @@ -236,7 +246,7 @@ msg_info "Configuring Frigate" mkdir -p /config /media/frigate cp -r /opt/frigate/config/. /config -curl -fsSL "https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4" -o "/media/frigate/person-bicycle-car-detection.mp4" +curl_with_retry "https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4" "/media/frigate/person-bicycle-car-detection.mp4" echo "tmpfs /tmp/cache tmpfs defaults 0 0" >>/etc/fstab @@ -279,7 +289,7 @@ detect: enabled: false EOF -if grep -q -o -m1 -E 'avx[^ ]*|sse4_2' /proc/cpuinfo; then +if grep -q -o -m1 -E 'avx[^ ]*|sse4_2' /proc/cpuinfo && [[ -f /openvino-model/ssdlite_mobilenet_v2.xml ]] && [[ -f /openvino-model/coco_91cl_bkgr.txt ]]; then cat <>/config/config.yml ffmpeg: hwaccel_args: auto @@ -300,7 +310,7 @@ else ffmpeg: hwaccel_args: auto model: - path: /cpu_model.tflite + path: /models/cpu_model.tflite EOF fi msg_ok "Configured Frigate" diff --git a/install/geopulse-install.sh b/install/geopulse-install.sh new file mode 100644 index 000000000..96ee07fbd --- /dev/null +++ b/install/geopulse-install.sh @@ -0,0 +1,205 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: CrazyWolf13 +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/tess1o/geopulse + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + openssl \ + nginx +msg_ok "Installed Dependencies" + +PG_VERSION="17" PG_MODULES="postgis" setup_postgresql +PG_DB_NAME="geopulse" PG_DB_USER="geopulse" PG_DB_EXTENSIONS="postgis,postgis_topology" setup_postgresql_db + +msg_info "Generating Security Keys" +mkdir -p /opt/geopulse/{backend,keys} +mkdir -p /etc/geopulse /var/www/geopulse /var/lib/geopulse/dumps +mkdir -p /var/log/geopulse/{backend,nginx} +openssl genpkey -algorithm RSA -out /opt/geopulse/keys/jwt-private-key.pem 2>/dev/null +openssl rsa -pubout -in /opt/geopulse/keys/jwt-private-key.pem -out /opt/geopulse/keys/jwt-public-key.pem 2>/dev/null +openssl rand -base64 32 >/opt/geopulse/keys/ai-encryption-key.txt +chmod 640 /opt/geopulse/keys/jwt-private-key.pem /opt/geopulse/keys/jwt-public-key.pem /opt/geopulse/keys/ai-encryption-key.txt +msg_ok "Generated Security Keys" + +if [[ "$(uname -m)" == "aarch64" ]]; then + if grep -qi "raspberry\|bcm" /proc/cpuinfo 2>/dev/null; then + BINARY_PATTERN="geopulse-backend-native-arm64-compat-*" + else + BINARY_PATTERN="geopulse-backend-native-arm64-[!c]*" + fi +else + if grep -q avx2 /proc/cpuinfo && grep -q bmi2 /proc/cpuinfo && grep -q fma /proc/cpuinfo; then + BINARY_PATTERN="geopulse-backend-native-amd64-[!c]*" + else + BINARY_PATTERN="geopulse-backend-native-amd64-compat-*" + fi +fi + +fetch_and_deploy_gh_release "geopulse-backend" "tess1o/geopulse" "singlefile" "latest" "/opt/geopulse/backend" "${BINARY_PATTERN}" +fetch_and_deploy_gh_release "geopulse-frontend" "tess1o/geopulse" "prebuild" "latest" "/var/www/geopulse" "geopulse-frontend-*.tar.gz" + +msg_info "Configuring GeoPulse" +cat </etc/geopulse/geopulse.env +GEOPULSE_PUBLIC_BASE_URL=http://${LOCAL_IP} +GEOPULSE_UI_URL=http://${LOCAL_IP} +GEOPULSE_CORS_ENABLED=false +GEOPULSE_CORS_ORIGINS= +QUARKUS_HTTP_PORT=8080 +GEOPULSE_POSTGRES_URL=jdbc:postgresql://localhost:5432/${PG_DB_NAME} +GEOPULSE_POSTGRES_HOST=localhost +GEOPULSE_POSTGRES_PORT=5432 +GEOPULSE_POSTGRES_DB=${PG_DB_NAME} +GEOPULSE_POSTGRES_USERNAME=${PG_DB_USER} +GEOPULSE_POSTGRES_PASSWORD=${PG_DB_PASS} +GEOPULSE_JWT_PRIVATE_KEY_LOCATION=file:/opt/geopulse/keys/jwt-private-key.pem +GEOPULSE_JWT_PUBLIC_KEY_LOCATION=file:/opt/geopulse/keys/jwt-public-key.pem +GEOPULSE_AI_ENCRYPTION_KEY_LOCATION=file:/opt/geopulse/keys/ai-encryption-key.txt +QUARKUS_LOG_FILE_ENABLE=true +QUARKUS_LOG_FILE_PATH=/var/log/geopulse/backend/geopulse.log +QUARKUS_LOG_FILE_ROTATION_MAX_FILE_SIZE=10M +QUARKUS_LOG_FILE_ROTATION_MAX_BACKUP_INDEX=5 +EOF +chmod 640 /etc/geopulse/geopulse.env +msg_ok "Configured GeoPulse" + +msg_info "Creating Service" +cat </etc/systemd/system/geopulse-backend.service +[Unit] +Description=GeoPulse Backend +After=network.target postgresql.service +Wants=postgresql.service + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/geopulse/backend +EnvironmentFile=/etc/geopulse/geopulse.env +ExecStart=/opt/geopulse/backend/geopulse-backend -Dquarkus.http.host=0.0.0.0 -XX:MaximumHeapSizePercent=70 -XX:MaximumYoungGenerationSizePercent=15 +Restart=on-failure +RestartSec=10 +StandardOutput=append:/var/log/geopulse/backend/geopulse-stdout.log +StandardError=append:/var/log/geopulse/backend/geopulse-stderr.log + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now geopulse-backend +msg_ok "Created Service" + +msg_info "Configuring Nginx" +mkdir -p /var/cache/nginx/osm_tiles +cat <<'EOF' >/etc/nginx/sites-available/geopulse.conf +proxy_cache_path /var/cache/nginx/osm_tiles levels=1:2 keys_zone=osm_cache:100m max_size=10g inactive=30d use_temp_path=off; + +map $uri $osm_subdomain { + ~^/osm/tiles/a/ "a"; + ~^/osm/tiles/b/ "b"; + ~^/osm/tiles/c/ "c"; + default "a"; +} + +server { + listen 80; + server_name _; + + root /var/www/geopulse; + index index.html; + + client_max_body_size 100M; + + gzip on; + gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; + gzip_comp_level 6; + gzip_min_length 1000; + + location ~* ^/(?!osm/).*\.(jpg|jpeg|png|gif|ico|css|js)$ { + expires 1y; + add_header Cache-Control "public, max-age=31536000"; + } + + location ^~ /osm/tiles/ { + resolver 8.8.8.8 valid=300s; + resolver_timeout 10s; + rewrite ^/osm/tiles/[abc]/(.*)$ /$1 break; + proxy_pass https://$osm_subdomain.tile.openstreetmap.org; + proxy_cache osm_cache; + proxy_cache_key "$scheme$proxy_host$uri"; + proxy_cache_valid 200 30d; + proxy_cache_valid 404 1m; + proxy_cache_valid 502 503 504 1m; + proxy_ignore_headers Cache-Control Expires Set-Cookie; + proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; + proxy_cache_background_update on; + proxy_cache_lock on; + proxy_set_header Cookie ""; + proxy_set_header Authorization ""; + proxy_set_header User-Agent "GeoPulse/1.0"; + proxy_set_header Host $osm_subdomain.tile.openstreetmap.org; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_connect_timeout 10s; + proxy_read_timeout 10s; + expires 30d; + add_header Cache-Control "public, immutable"; + add_header X-Cache-Status $upstream_cache_status always; + } + + location /api/ { + proxy_pass http://localhost:8080/api/; + proxy_connect_timeout 3600s; + proxy_send_timeout 3600s; + proxy_read_timeout 3600s; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location / { + try_files $uri $uri/ /index.html; + } + + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + add_header X-XSS-Protection "1; mode=block" always; + + access_log /var/log/geopulse/nginx/access.log; + error_log /var/log/geopulse/nginx/error.log; +} +EOF +ln -sf /etc/nginx/sites-available/geopulse.conf /etc/nginx/sites-enabled/ +rm -f /etc/nginx/sites-enabled/default +systemctl enable -q --now nginx +systemctl reload nginx +msg_ok "Configured Nginx" + +msg_info "Creating Admin Helper" +cat <<'EOF' >/usr/local/bin/create-geopulse-admin +#!/usr/bin/env bash +read -rp "Enter admin email address: " ADMIN_EMAIL +if [[ -z "$ADMIN_EMAIL" ]]; then + echo "No email provided. Aborting." + exit 1 +fi +sed -i '/^GEOPULSE_ADMIN_EMAIL=/d' /etc/geopulse/geopulse.env +echo "GEOPULSE_ADMIN_EMAIL=${ADMIN_EMAIL}" >>/etc/geopulse/geopulse.env +systemctl restart geopulse-backend +echo "Admin email set to '${ADMIN_EMAIL}'. Register with this email in the GeoPulse UI to receive admin privileges." +EOF +chmod +x /usr/local/bin/create-geopulse-admin +msg_ok "Created Admin Helper" + +motd_ssh +customize +cleanup_lxc diff --git a/install/ghost-install.sh b/install/ghost-install.sh index cae000bd6..cf3efe5f5 100644 --- a/install/ghost-install.sh +++ b/install/ghost-install.sh @@ -23,7 +23,7 @@ msg_ok "Installed Dependencies" setup_mariadb MARIADB_DB_NAME="ghost" MARIADB_DB_USER="ghostuser" setup_mariadb_db -NODE_VERSION="22" setup_nodejs +NODE_VERSION="22" NODE_MODULE="pnpm" setup_nodejs msg_info "Installing Ghost CLI" $STD npm install ghost-cli@latest -g diff --git a/install/ghostfolio-install.sh b/install/ghostfolio-install.sh index 2461ca3d4..d520e0021 100644 --- a/install/ghostfolio-install.sh +++ b/install/ghostfolio-install.sh @@ -25,25 +25,12 @@ PG_VERSION="17" setup_postgresql NODE_VERSION="24" setup_nodejs msg_info "Setting up Database" -DB_NAME=ghostfolio -DB_USER=ghostfolio -DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) +PG_DB_NAME="ghostfolio" PG_DB_USER="ghostfolio" PG_DB_SCHEMA_PERMS="true" setup_postgresql_db REDIS_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) ACCESS_TOKEN_SALT=$(openssl rand -base64 32) JWT_SECRET_KEY=$(openssl rand -base64 32) -$STD sudo -u postgres psql -c "CREATE DATABASE $DB_NAME;" -$STD sudo -u postgres psql -c "CREATE USER $DB_USER WITH ENCRYPTED PASSWORD '$DB_PASS';" -$STD sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $DB_NAME TO $DB_USER;" -$STD sudo -u postgres psql -c "ALTER USER $DB_USER CREATEDB;" -$STD sudo -u postgres psql -d $DB_NAME -c "GRANT ALL ON SCHEMA public TO $DB_USER;" -$STD sudo -u postgres psql -d $DB_NAME -c "GRANT CREATE ON SCHEMA public TO $DB_USER;" -$STD sudo -u postgres psql -d $DB_NAME -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO $DB_USER;" -$STD sudo -u postgres psql -d $DB_NAME -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO $DB_USER;" { echo "Ghostfolio Credentials" - echo "Database User: $DB_USER" - echo "Database Password: $DB_PASS" - echo "Database Name: $DB_NAME" echo "Redis Password: $REDIS_PASS" echo "Access Token Salt: $ACCESS_TOKEN_SALT" echo "JWT Secret Key: $JWT_SECRET_KEY" @@ -69,7 +56,7 @@ read -rp "${TAB3}CoinGecko Pro API key (press Enter to skip): " COINGECKO_PRO_KE msg_info "Setting up Environment" cat </opt/ghostfolio/.env -DATABASE_URL=postgresql://$DB_USER:$DB_PASS@localhost:5432/$DB_NAME?connect_timeout=300&sslmode=prefer +DATABASE_URL=postgresql://$PG_DB_USER:$PG_DB_PASS@localhost:5432/$PG_DB_NAME?connect_timeout=300 REDIS_HOST=localhost REDIS_PORT=6379 REDIS_PASSWORD=$REDIS_PASS diff --git a/install/github-runner-install.sh b/install/github-runner-install.sh new file mode 100644 index 000000000..80a9d24cf --- /dev/null +++ b/install/github-runner-install.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://docs.github.com/en/actions/hosting-your-own-runners + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + git \ + gh +msg_ok "Installed Dependencies" + +NODE_VERSION="24" setup_nodejs + +msg_info "Creating runner user (no sudo)" +useradd -m -s /bin/bash runner +msg_ok "Runner user ready" + +fetch_and_deploy_gh_release "actions-runner" "actions/runner" "prebuild" "latest" "/opt/actions-runner" "actions-runner-linux-x64-*.tar.gz" + +msg_info "Setting ownership for runner user" +chown -R runner:runner /opt/actions-runner +msg_ok "Ownership set" + +msg_info "Creating Service" +cat </etc/systemd/system/actions-runner.service +[Unit] +Description=GitHub Actions self-hosted runner +Documentation=https://docs.github.com/en/actions/hosting-your-own-runners +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=runner +WorkingDirectory=/opt/actions-runner +ExecStart=/opt/actions-runner/run.sh +Restart=on-failure +RestartSec=10 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q actions-runner +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/gluetun-install.sh b/install/gluetun-install.sh new file mode 100644 index 000000000..68e841d93 --- /dev/null +++ b/install/gluetun-install.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/qdm12/gluetun + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + openvpn \ + wireguard-tools \ + iptables +msg_ok "Installed Dependencies" + +msg_info "Configuring iptables" +$STD update-alternatives --set iptables /usr/sbin/iptables-legacy +$STD update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy +ln -sf /usr/sbin/openvpn /usr/sbin/openvpn2.6 +msg_ok "Configured iptables" + +setup_go + +fetch_and_deploy_gh_release "gluetun" "qdm12/gluetun" "tarball" + +msg_info "Building Gluetun" +cd /opt/gluetun +$STD go mod download +CGO_ENABLED=0 $STD go build -trimpath -ldflags="-s -w" -o /usr/local/bin/gluetun ./cmd/gluetun/ +msg_ok "Built Gluetun" + +msg_info "Configuring Gluetun" +mkdir -p /opt/gluetun-data +touch /etc/alpine-release +ln -sf /opt/gluetun-data /gluetun +cat </opt/gluetun-data/.env +VPN_SERVICE_PROVIDER=custom +VPN_TYPE=openvpn +OPENVPN_CUSTOM_CONFIG=/opt/gluetun-data/custom.ovpn +OPENVPN_USER= +OPENVPN_PASSWORD= +OPENVPN_PROCESS_USER=root +PUID=0 +PGID=0 +HTTP_CONTROL_SERVER_ADDRESS=:8000 +HTTPPROXY=off +SHADOWSOCKS=off +PPROF_ENABLED=no +PPROF_BLOCK_PROFILE_RATE=0 +PPROF_MUTEX_PROFILE_RATE=0 +PPROF_HTTP_SERVER_ADDRESS=:6060 +FIREWALL_ENABLED_DISABLING_IT_SHOOTS_YOU_IN_YOUR_FOOT=on +HEALTH_SERVER_ADDRESS=127.0.0.1:9999 +DNS_UPSTREAM_RESOLVERS=cloudflare +LOG_LEVEL=info +STORAGE_FILEPATH=/gluetun/servers.json +PUBLICIP_FILE=/gluetun/ip +VPN_PORT_FORWARDING_STATUS_FILE=/gluetun/forwarded_port +TZ=UTC +EOF +msg_ok "Configured Gluetun" + +msg_info "Creating Service" +cat </etc/systemd/system/gluetun.service +[Unit] +Description=Gluetun VPN Client +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/gluetun-data +EnvironmentFile=/opt/gluetun-data/.env +UnsetEnvironment=USER +ExecStartPre=/bin/sh -c 'rm -f /etc/openvpn/target.ovpn' +ExecStart=/usr/local/bin/gluetun +Restart=on-failure +RestartSec=5 +AmbientCapabilities=CAP_NET_ADMIN + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now gluetun +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/go2rtc-install.sh b/install/go2rtc-install.sh index e793486c9..82927161c 100644 --- a/install/go2rtc-install.sh +++ b/install/go2rtc-install.sh @@ -14,6 +14,10 @@ network_check update_os setup_hwaccel +msg_info "Installing Dependencies" +$STD apt install -y ffmpeg +msg_ok "Installed Dependencies" + USE_ORIGINAL_FILENAME="true" fetch_and_deploy_gh_release "go2rtc" "AlexxIT/go2rtc" "singlefile" "latest" "/opt/go2rtc" "go2rtc_linux_amd64" msg_info "Creating Service" diff --git a/install/gogs-install.sh b/install/gogs-install.sh new file mode 100644 index 000000000..e58e1d36c --- /dev/null +++ b/install/gogs-install.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://gogs.io/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y git +msg_ok "Installed Dependencies" + +fetch_and_deploy_gh_release "gogs" "gogs/gogs" "prebuild" "latest" "/opt/gogs" "gogs_*_linux_amd64.tar.gz" + +msg_info "Setting up Gogs" +mkdir -p /opt/gogs/{custom/conf,data,log} +msg_ok "Set up Gogs" + +msg_info "Creating Service" +cat </etc/systemd/system/gogs.service +[Unit] +Description=Gogs Git Service +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/gogs +ExecStart=/opt/gogs/gogs web +Restart=on-failure +RestartSec=5 +Environment=USER=root +Environment=HOME=/root + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now gogs +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/gokapi-install.sh b/install/gokapi-install.sh index c7d80cf17..d45fa580b 100644 --- a/install/gokapi-install.sh +++ b/install/gokapi-install.sh @@ -13,11 +13,11 @@ setting_up_container network_check update_os -fetch_and_deploy_gh_release "gokapi" "Forceu/Gokapi" "prebuild" "latest" "/opt/gokapi" "gokapi-linux_amd64.zip" +fetch_and_deploy_gh_release "gokapi" "Forceu/Gokapi" "prebuild" "latest" "/opt/gokapi" "*linux*amd64.zip" msg_info "Configuring Gokapi" mkdir -p /opt/gokapi/{data,config} -chmod +x /opt/gokapi/gokapi-linux_amd64 +chmod +x /opt/gokapi/gokapi msg_ok "Configured Gokapi" msg_info "Creating Service" @@ -29,7 +29,7 @@ Description=gokapi Type=simple Environment=GOKAPI_DATA_DIR=/opt/gokapi/data Environment=GOKAPI_CONFIG_DIR=/opt/gokapi/config -ExecStart=/opt/gokapi/gokapi-linux_amd64 +ExecStart=/opt/gokapi/gokapi [Install] WantedBy=multi-user.target diff --git a/install/graylog-install.sh b/install/graylog-install.sh index ff88a182c..9e00da93f 100644 --- a/install/graylog-install.sh +++ b/install/graylog-install.sh @@ -13,7 +13,7 @@ setting_up_container network_check update_os -MONGO_VERSION="8.0" setup_mongodb +MONGO_VERSION="8.2" setup_mongodb msg_info "Setup Graylog Data Node" PASSWORD_SECRET=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c16) @@ -38,6 +38,8 @@ sed -i "s/password_secret =/password_secret = $PASSWORD_SECRET/g" /etc/graylog/s sed -i "s/root_password_sha2 =/root_password_sha2 = $ROOT_PASSWORD/g" /etc/graylog/server/server.conf sed -i 's/#http_bind_address = 127.0.0.1.*/http_bind_address = 0.0.0.0:9000/g' /etc/graylog/server/server.conf systemctl enable -q --now graylog-server +sleep 5 +sed -i "s/0\.0\.0\.0:9000/$LOCAL_IP:9000/g" /var/log/graylog-server/server.log msg_ok "Setup ${APPLICATION}" motd_ssh diff --git a/install/grist-install.sh b/install/grist-install.sh index c86eff323..5ebea6e76 100644 --- a/install/grist-install.sh +++ b/install/grist-install.sh @@ -28,7 +28,6 @@ export CYPRESS_INSTALL_BINARY=0 export NODE_OPTIONS="--max-old-space-size=2048" cd /opt/grist $STD yarn install -$STD yarn run install:ee $STD yarn run build:prod $STD yarn run install:python cat </opt/grist/.env diff --git a/install/healthchecks-install.sh b/install/healthchecks-install.sh index e46959702..1d6cb8664 100644 --- a/install/healthchecks-install.sh +++ b/install/healthchecks-install.sh @@ -35,7 +35,7 @@ PG_DB_NAME="healthchecks_db" PG_DB_USER="hc_user" PG_DB_PASS=$(openssl rand -bas msg_info "Setup Keys (Admin / Secret)" SECRET_KEY="$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)" -ADMIN_EMAIL="admin@helper-scripts.local" +ADMIN_EMAIL="admin@community-scripts.org" ADMIN_PASSWORD="$PG_DB_PASS" { echo "healthchecks Admin Email: $ADMIN_EMAIL" diff --git a/install/homarr-install.sh b/install/homarr-install.sh index e8f199ce2..f8e39c3a6 100644 --- a/install/homarr-install.sh +++ b/install/homarr-install.sh @@ -47,6 +47,8 @@ mkdir -p /appdata/redis chown -R redis:redis /appdata/redis chmod 744 /appdata/redis cp /opt/homarr/redis.conf /etc/redis/redis.conf +sed -i -e '$a\' /etc/redis/redis.conf +grep -q '^bind 127.0.0.1 -::1$' /etc/redis/redis.conf || echo "bind 127.0.0.1 -::1" >>/etc/redis/redis.conf rm /etc/nginx/nginx.conf mkdir -p /etc/nginx/templates cp /opt/homarr/nginx.conf /etc/nginx/templates/nginx.conf @@ -80,7 +82,7 @@ chmod +x /opt/homarr/run.sh systemctl daemon-reload systemctl enable -q --now redis-server systemctl enable -q --now homarr -systemctl disable -q --now nginx +systemctl disable -q --now nginx msg_ok "Created Services" motd_ssh diff --git a/install/homelable-install.sh b/install/homelable-install.sh new file mode 100644 index 000000000..e356ab5c8 --- /dev/null +++ b/install/homelable-install.sh @@ -0,0 +1,127 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/Pouzor/homelable + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + nmap \ + iputils-ping \ + caddy +msg_ok "Installed Dependencies" + +UV_PYTHON="3.13" setup_uv +NODE_VERSION="20" setup_nodejs +fetch_and_deploy_gh_release "homelable" "Pouzor/homelable" "tarball" "latest" "/opt/homelable" + +msg_info "Setting up Python Backend" +cd /opt/homelable/backend +$STD uv venv /opt/homelable/backend/.venv +$STD uv pip install --python /opt/homelable/backend/.venv/bin/python -r requirements.txt +msg_ok "Set up Python Backend" + +msg_info "Configuring Homelable" +mkdir -p /opt/homelable/data +SECRET_KEY=$(openssl rand -hex 32) +BCRYPT_HASH=$(/opt/homelable/backend/.venv/bin/python -c "from passlib.context import CryptContext; print(CryptContext(schemes=['bcrypt']).hash('admin'))") +cat </opt/homelable/backend/.env +SECRET_KEY=${SECRET_KEY} +SQLITE_PATH=/opt/homelable/data/homelab.db +CORS_ORIGINS=["http://localhost:3000","http://${LOCAL_IP}:3000"] +AUTH_USERNAME=admin +AUTH_PASSWORD_HASH='${BCRYPT_HASH}' +SCANNER_RANGES=["192.168.1.0/24"] +STATUS_CHECKER_INTERVAL=60 +EOF +msg_ok "Configured Homelable" + +msg_info "Creating Password Reset Utility" +cat <<'EOF' >/root/change_password.sh +#!/usr/bin/env bash + +NEW_PASS="" + +while [[ -z "$NEW_PASS" ]]; do + read -s -p "Enter new password: " NEW_PASS + echo "" + if [[ -z "$NEW_PASS" ]]; then + echo "Error: Password cannot be blank. Try again." + fi +done + +HASH=$(/opt/homelable/backend/.venv/bin/python -c "from passlib.context import CryptContext; print(CryptContext(schemes=['bcrypt']).hash('${NEW_PASS}'))") + +sed -i "s|^AUTH_PASSWORD_HASH=.*|AUTH_PASSWORD_HASH='${HASH}'|" /opt/homelable/backend/.env + +systemctl restart homelable +echo "Password updated and service restarted successfully!" +EOF +chmod +x /root/change_password.sh +msg_ok "Created Password Reset Utility" + +msg_info "Building Frontend" +cd /opt/homelable/frontend +$STD npm ci +$STD npm run build +msg_ok "Built Frontend" + +msg_info "Creating Service" +cat </etc/systemd/system/homelable.service +[Unit] +Description=Homelable Backend +After=network.target + +[Service] +Type=simple +WorkingDirectory=/opt/homelable/backend +EnvironmentFile=/opt/homelable/backend/.env +ExecStart=/opt/homelable/backend/.venv/bin/uvicorn app.main:app --host 127.0.0.1 --port 8000 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now homelable +msg_ok "Created Service" + +msg_info "Configuring Caddy" +cat </etc/caddy/Caddyfile +:3000 { + root * /opt/homelable/frontend/dist + file_server + + @websocket path /api/v1/status/ws/* + handle @websocket { + reverse_proxy 127.0.0.1:8000 + } + + handle /ws/* { + reverse_proxy 127.0.0.1:8000 + } + + handle /api/* { + reverse_proxy 127.0.0.1:8000 + } + + handle { + try_files {path} {path}.html /index.html + } +} +EOF +systemctl reload caddy +msg_ok "Configured Caddy" + +motd_ssh +customize +cleanup_lxc diff --git a/install/homepage-install.sh b/install/homepage-install.sh index b1e951fd7..45cba3d82 100644 --- a/install/homepage-install.sh +++ b/install/homepage-install.sh @@ -25,6 +25,7 @@ msg_info "Installing Homepage (Patience)" mkdir -p /opt/homepage/config cd /opt/homepage cp /opt/homepage/src/skeleton/* /opt/homepage/config +echo 'onlyBuiltDependencies=*' >> .npmrc $STD pnpm install export NEXT_PUBLIC_VERSION="v$RELEASE" export NEXT_PUBLIC_REVISION="source" diff --git a/install/hoodik-install.sh b/install/hoodik-install.sh new file mode 100644 index 000000000..d524c503e --- /dev/null +++ b/install/hoodik-install.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/hudikhq/hoodik + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +fetch_and_deploy_gh_release "hoodik" "hudikhq/hoodik" "prebuild" "latest" "/opt/hoodik" "*x86_64.tar.gz" + +msg_info "Configuring Hoodik" +mkdir -p /opt/hoodik_data +JWT_SECRET=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32) +cat </opt/hoodik/.env +DATA_DIR=/opt/hoodik_data +HTTP_PORT=5443 +HTTP_ADDRESS=0.0.0.0 +JWT_SECRET=${JWT_SECRET} +APP_URL=http://${LOCAL_IP}:5443 +SSL_DISABLED=true +COOKIE_SECURE=false +COOKIE_HTTP_ONLY=false +MAILER_TYPE=none +RUST_LOG=hoodik=info,error=info +EOF +msg_ok "Configured Hoodik" + +msg_info "Creating Service" +cat </etc/systemd/system/hoodik.service +[Unit] +Description=Hoodik - Encrypted File Storage +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/hoodik_data +EnvironmentFile=/opt/hoodik/.env +ExecStart=/opt/hoodik/hoodik +Restart=always +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now hoodik +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/igotify-install.sh b/install/igotify-install.sh new file mode 100644 index 000000000..66987b1c8 --- /dev/null +++ b/install/igotify-install.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: pfassina +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/androidseb25/iGotify-Notification-Assistent + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +setup_deb822_repo \ + "microsoft" \ + "https://packages.microsoft.com/keys/microsoft-2025.asc" \ + "https://packages.microsoft.com/debian/13/prod/" \ + "trixie" \ + "main" +$STD apt install -y aspnetcore-runtime-10.0 +msg_ok "Installed Dependencies" + +fetch_and_deploy_gh_release "igotify" "androidseb25/iGotify-Notification-Assistent" "prebuild" "latest" "/opt/igotify" "iGotify-Notification-Service-amd64-v*.zip" + +msg_info "Creating Service" +cat </opt/igotify/.env +ASPNETCORE_URLS=http://0.0.0.0:80 +ASPNETCORE_ENVIRONMENT=Production +GOTIFY_DEFAULTUSER_PASS= +GOTIFY_URLS= +GOTIFY_CLIENT_TOKENS= +SECNTFY_TOKENS= +EOF +cat </etc/systemd/system/igotify.service +[Unit] +Description=iGotify Notification Service +After=network.target + +[Service] +EnvironmentFile=/opt/igotify/.env +WorkingDirectory=/opt/igotify +ExecStart=/usr/bin/dotnet "/opt/igotify/iGotify Notification Assist.dll" +Restart=always +RestartSec=10 +KillSignal=SIGINT +TimeoutStopSec=10 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now igotify +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/immich-install.sh b/install/immich-install.sh index a95c7cada..45fce2344 100644 --- a/install/immich-install.sh +++ b/install/immich-install.sh @@ -13,41 +13,43 @@ setting_up_container network_check update_os -if [ -d /dev/dri ]; then +if lscpu | grep -q 'GenuineIntel'; then echo "" echo "" - echo -e "🤖 ${BL}Immich Machine Learning Options${CL}" + echo -e "🤖 ${BL}Immich Machine-Learning Options${CL}" echo "─────────────────────────────────────────" echo "Please choose your machine-learning type:" echo "" echo " 1) CPU only (default)" - echo " 2) Intel OpenVINO (requires GPU passthrough)" + echo " 2) **NEW** Intel OpenVINO CPU or iGPU" echo "" read -r -p "${TAB3}Select machine-learning type [1]: " ML_TYPE ML_TYPE="${ML_TYPE:-1}" if [[ "$ML_TYPE" == "2" ]]; then - msg_info "Installing OpenVINO dependencies" touch ~/.openvino $STD apt install -y --no-install-recommends patchelf - tmp_dir=$(mktemp -d) - $STD pushd "$tmp_dir" - curl -fsSLO https://raw.githubusercontent.com/immich-app/immich/refs/heads/main/machine-learning/Dockerfile - readarray -t INTEL_URLS < <( - sed -n "/intel-[igc|opencl]/p" ./Dockerfile | awk '{print $3}' - sed -n "/libigdgmm12/p" ./Dockerfile | awk '{print $3}' - ) - for url in "${INTEL_URLS[@]}"; do - curl -fsSLO "$url" - done - $STD apt install -y ./libigdgmm12*.deb - rm ./libigdgmm12*.deb - $STD apt install -y ./*.deb - $STD apt-mark hold libigdgmm12 - $STD popd - rm -rf "$tmp_dir" - dpkg-query -W -f='${Version}\n' intel-opencl-icd >~/.intel_version - msg_ok "Installed OpenVINO dependencies" + if [[ -d /dev/dri ]]; then + msg_info "Installing Intel OpenVINO dependencies" + tmp_dir=$(mktemp -d) + $STD pushd "$tmp_dir" + curl_with_retry "https://raw.githubusercontent.com/immich-app/immich/refs/heads/main/machine-learning/Dockerfile" "Dockerfile" + readarray -t INTEL_URLS < <( + sed -n "/intel-[igc|opencl]/p" ./Dockerfile | awk '{print $3}' + sed -n "/libigdgmm12/p" ./Dockerfile | awk '{print $3}' + ) + for url in "${INTEL_URLS[@]}"; do + curl_with_retry "$url" "$(basename "$url")" + done + $STD apt install -y ./libigdgmm12*.deb + rm ./libigdgmm12*.deb + $STD apt install -y ./*.deb + $STD apt-mark hold libigdgmm12 + $STD popd + rm -rf "$tmp_dir" + dpkg-query -W -f='${Version}\n' intel-opencl-icd >~/.intel_version + msg_ok "Installed Intel OpenVINO dependencies" + fi fi fi @@ -154,6 +156,10 @@ sed -i "s/^#shared_preload.*/shared_preload_libraries = 'vchord.so'/" /etc/postg systemctl restart postgresql.service PG_DB_NAME="immich" PG_DB_USER="immich" PG_DB_GRANT_SUPERUSER="true" PG_DB_SKIP_ALTER_ROLE="true" setup_postgresql_db +msg_info "Installing GCC-13 (available as fallback compiler)" +$STD apt install -y gcc-13 g++-13 +msg_ok "Installed GCC-13" + msg_warn "Compiling Custom Photo-processing Libraries (can take anywhere from 15min to 2h)" LD_LIBRARY_PATH=/usr/local/lib export LD_RUN_PATH=/usr/local/lib @@ -169,7 +175,8 @@ cd "$STAGING_DIR" SOURCE=${SOURCE_DIR}/libjxl JPEGLI_LIBJPEG_LIBRARY_SOVERSION="62" JPEGLI_LIBJPEG_LIBRARY_VERSION="62.3.0" -: "${LIBJXL_REVISION:=$(jq -cr '.revision' $BASE_DIR/server/sources/libjxl.json)}" +LIBJXL_REVISION="794a5dcf0d54f9f0b20d288a12e87afb91d20dfc" +# : "${LIBJXL_REVISION:=$(jq -cr '.revision' $BASE_DIR/server/sources/libjxl.json)}" $STD git clone https://github.com/libjxl/libjxl.git "$SOURCE" cd "$SOURCE" $STD git reset --hard "$LIBJXL_REVISION" @@ -206,7 +213,8 @@ msg_ok "(1/5) Compiled libjxl" msg_info "(2/5) Compiling libheif" SOURCE=${SOURCE_DIR}/libheif -: "${LIBHEIF_REVISION:=$(jq -cr '.revision' $BASE_DIR/server/sources/libheif.json)}" +LIBHEIF_REVISION="35dad50a9145332a7bfdf1ff6aef6801fb613d68" +# : "${LIBHEIF_REVISION:=$(jq -cr '.revision' $BASE_DIR/server/sources/libheif.json)}" $STD git clone https://github.com/strukturag/libheif.git "$SOURCE" cd "$SOURCE" $STD git reset --hard "$LIBHEIF_REVISION" @@ -231,7 +239,8 @@ msg_ok "(2/5) Compiled libheif" msg_info "(3/5) Compiling libraw" SOURCE=${SOURCE_DIR}/libraw -: "${LIBRAW_REVISION:=$(jq -cr '.revision' $BASE_DIR/server/sources/libraw.json)}" +LIBRAW_REVISION="0b56545a4f828743f28a4345cdfdd4c49f9f9a2a" +# : "${LIBRAW_REVISION:=$(jq -cr '.revision' $BASE_DIR/server/sources/libraw.json)}" $STD git clone https://github.com/LibRaw/LibRaw.git "$SOURCE" cd "$SOURCE" $STD git reset --hard "$LIBRAW_REVISION" @@ -289,8 +298,8 @@ ML_DIR="${APP_DIR}/machine-learning" GEO_DIR="${INSTALL_DIR}/geodata" mkdir -p {"${APP_DIR}","${UPLOAD_DIR}","${GEO_DIR}","${INSTALL_DIR}"/cache} -fetch_and_deploy_gh_release "Immich" "immich-app/immich" "tarball" "v2.5.6" "$SRC_DIR" -PNPM_VERSION="$(jq -r '.packageManager | split("@")[1]' ${SRC_DIR}/package.json)" +fetch_and_deploy_gh_release "Immich" "immich-app/immich" "tarball" "v2.7.5" "$SRC_DIR" +PNPM_VERSION="$(jq -r '.packageManager | split("@")[1] | split("+")[0]' ${SRC_DIR}/package.json)" NODE_VERSION="24" NODE_MODULE="pnpm@${PNPM_VERSION}" setup_nodejs msg_info "Installing Immich (patience)" @@ -306,6 +315,12 @@ $STD pnpm --filter immich --frozen-lockfile build unset SHARP_IGNORE_GLOBAL_LIBVIPS export SHARP_FORCE_GLOBAL_LIBVIPS=true $STD pnpm --filter immich --frozen-lockfile --prod --no-optional deploy "$APP_DIR" + +# Patch helmet.json: disable upgrade-insecure-requests for HTTP access +if [[ -f "$APP_DIR/helmet.json" ]]; then + jq '.contentSecurityPolicy.directives["upgrade-insecure-requests"] = null' "$APP_DIR/helmet.json" >"$APP_DIR/helmet.json.tmp" && mv "$APP_DIR/helmet.json.tmp" "$APP_DIR/helmet.json" +fi + cp "$APP_DIR"/package.json "$APP_DIR"/bin sed -i "s|^start|${APP_DIR}/bin/start|" "$APP_DIR"/bin/immich-admin @@ -338,17 +353,41 @@ msg_ok "Installed Immich Server, Web and Plugin Components" cd "$SRC_DIR"/machine-learning $STD useradd -U -s /usr/sbin/nologin -r -M -d "$INSTALL_DIR" immich -mkdir -p "$ML_DIR" && chown -R immich:immich "$INSTALL_DIR" +mkdir -p "$ML_DIR" +# chown excluding upload dir contents (may be a mount with restricted permissions) +chown immich:immich "$INSTALL_DIR" +find "$INSTALL_DIR" -maxdepth 1 -mindepth 1 ! -name upload -exec chown -R immich:immich {} + +chown immich:immich "$UPLOAD_DIR" 2>/dev/null || true export VIRTUAL_ENV="${ML_DIR}/ml-venv" +export UV_HTTP_TIMEOUT=300 if [[ -f ~/.openvino ]]; then - msg_info "Installing HW-accelerated machine-learning" - $STD uv add --no-sync --optional openvino onnxruntime-openvino==1.24.1 --active -n -p python3.13 --managed-python - $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p python3.13 --managed-python + ML_PYTHON="python3.13" + msg_info "Pre-installing Python ${ML_PYTHON} for machine-learning" + for attempt in $(seq 1 3); do + $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv python install "${ML_PYTHON}" && break + [[ $attempt -lt 3 ]] && msg_warn "Python download attempt $attempt failed, retrying..." && sleep 5 + done + msg_ok "Pre-installed Python ${ML_PYTHON}" + msg_info "Installing Intel OpenVINO machine-learning" + for attempt in $(seq 1 3); do + $STD sudo --preserve-env=VIRTUAL_ENV,UV_HTTP_TIMEOUT -nu immich uv sync --extra openvino --no-dev --active --link-mode copy -n -p "${ML_PYTHON}" --managed-python && break + [[ $attempt -lt 3 ]] && msg_warn "uv sync attempt $attempt failed, retrying..." && sleep 10 + done patchelf --clear-execstack "${VIRTUAL_ENV}/lib/python3.13/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.cpython-313-x86_64-linux-gnu.so" - msg_ok "Installed HW-accelerated machine-learning" + msg_ok "Installed Intel OpenVINO machine-learning" else + ML_PYTHON="python3.11" + msg_info "Pre-installing Python ${ML_PYTHON} for machine-learning" + for attempt in $(seq 1 3); do + $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv python install "${ML_PYTHON}" && break + [[ $attempt -lt 3 ]] && msg_warn "Python download attempt $attempt failed, retrying..." && sleep 5 + done + msg_ok "Pre-installed Python ${ML_PYTHON}" msg_info "Installing machine-learning" - $STD sudo --preserve-env=VIRTUAL_ENV -nu immich uv sync --extra cpu --no-dev --active --link-mode copy -n -p python3.11 --managed-python + for attempt in $(seq 1 3); do + $STD sudo --preserve-env=VIRTUAL_ENV,UV_HTTP_TIMEOUT -nu immich uv sync --extra cpu --no-dev --active --link-mode copy -n -p "${ML_PYTHON}" --managed-python && break + [[ $attempt -lt 3 ]] && msg_warn "uv sync attempt $attempt failed, retrying..." && sleep 10 + done msg_ok "Installed machine-learning" fi cd "$SRC_DIR" @@ -365,10 +404,10 @@ ln -s "$UPLOAD_DIR" "$ML_DIR"/upload msg_info "Installing GeoNames data" cd "$GEO_DIR" -curl -fsSLZ -O "https://download.geonames.org/export/dump/admin1CodesASCII.txt" \ - -O "https://download.geonames.org/export/dump/admin2Codes.txt" \ - -O "https://download.geonames.org/export/dump/cities500.zip" \ - -O "https://raw.githubusercontent.com/nvkelso/natural-earth-vector/v5.1.2/geojson/ne_10m_admin_0_countries.geojson" +curl_with_retry "https://download.geonames.org/export/dump/admin1CodesASCII.txt" "admin1CodesASCII.txt" +curl_with_retry "https://download.geonames.org/export/dump/admin2Codes.txt" "admin2Codes.txt" +curl_with_retry "https://download.geonames.org/export/dump/cities500.zip" "cities500.zip" +curl_with_retry "https://raw.githubusercontent.com/nvkelso/natural-earth-vector/v5.1.2/geojson/ne_10m_admin_0_countries.geojson" "ne_10m_admin_0_countries.geojson" unzip -q cities500.zip date --iso-8601=seconds | tr -d "\n" >geodata-date.txt rm cities500.zip @@ -389,6 +428,9 @@ IMMICH_VERSION=release NODE_ENV=production IMMICH_ALLOW_SETUP=true +## Change to 'false' to disable CSP +IMMICH_HELMET_FILE=true + DB_HOSTNAME=127.0.0.1 DB_USERNAME=${PG_DB_USER} DB_PASSWORD=${PG_DB_PASS} @@ -443,8 +485,7 @@ User=immich Group=immich UMask=0077 WorkingDirectory=${APP_DIR} -EnvironmentFile=${INSTALL_DIR}/.env -ExecStart=/usr/bin/node ${APP_DIR}/dist/main +ExecStart=${APP_DIR}/bin/start.sh Restart=on-failure SyslogIdentifier=immich-web StandardOutput=append:/var/log/immich/web.log @@ -474,7 +515,11 @@ StandardError=append:/var/log/immich/ml.log [Install] WantedBy=multi-user.target EOF -chown -R immich:immich "$INSTALL_DIR" /var/log/immich +chown -R immich:immich /var/log/immich +# chown excluding upload dir contents (may be a mount with restricted permissions) +chown immich:immich "$INSTALL_DIR" +find "$INSTALL_DIR" -maxdepth 1 -mindepth 1 ! -name upload -exec chown -R immich:immich {} + +chown immich:immich "$UPLOAD_DIR" 2>/dev/null || true systemctl enable -q --now immich-ml.service immich-web.service msg_ok "Modified user, created env file, scripts and services" diff --git a/install/immichframe-install.sh b/install/immichframe-install.sh index c90dbeb6e..683ee162c 100644 --- a/install/immichframe-install.sh +++ b/install/immichframe-install.sh @@ -43,8 +43,6 @@ cd /tmp/immichframe/immichFrame.Web $STD npm ci $STD npm run build cp -r build/* /opt/immichframe/wwwroot -$STD apt remove -y dotnet-sdk-8.0 -$STD apt autoremove -y rm -rf /tmp/immichframe mkdir -p /opt/immichframe/Config curl -fsSL "https://raw.githubusercontent.com/immichFrame/ImmichFrame/main/docker/Settings.example.yml" -o /opt/immichframe/Config/Settings.yml diff --git a/install/inspircd-install.sh b/install/inspircd-install.sh index 27a4a6984..b960bfaac 100644 --- a/install/inspircd-install.sh +++ b/install/inspircd-install.sh @@ -17,7 +17,7 @@ fetch_and_deploy_gh_release "inspircd" "inspircd/inspircd" "binary" "latest" "/o msg_info "Configuring InspIRCd" cat </etc/inspircd/inspircd.conf - + /etc/inspircd/inspircd.conf email="irc@&networkDomain;"> EOF +systemctl enable -q --now inspircd msg_ok "Installed InspIRCd" motd_ssh diff --git a/install/investbrain-install.sh b/install/investbrain-install.sh index e626ccb5d..9163166e8 100644 --- a/install/investbrain-install.sh +++ b/install/investbrain-install.sh @@ -90,6 +90,11 @@ MAIL_PORT=2525 MAIL_FROM_ADDRESS="investbrain@${LOCAL_IP}" VITE_APP_NAME=Investbrain + +# Reverse Proxy Support (uncomment and set APP_URL/ASSET_URL to your domain when using a reverse proxy) +# APP_URL=https://your-domain.com +# ASSET_URL=https://your-domain.com +# TRUSTED_PROXIES=* EOF export COMPOSER_ALLOW_SUPERUSER=1 $STD /usr/local/bin/composer install --no-interaction --no-dev --optimize-autoloader diff --git a/install/iobroker-install.sh b/install/iobroker-install.sh index ec74dceb4..b9f48b713 100644 --- a/install/iobroker-install.sh +++ b/install/iobroker-install.sh @@ -28,7 +28,7 @@ if [[ ! "$CONFIRM" =~ ^([yY][eE][sS]|[yY])$ ]]; then exit 10 fi -NODE_VERSION="22" setup_nodejs +NODE_VERSION="24" setup_nodejs msg_info "Installing ioBroker (Patience)" $STD bash <(curl -fsSL https://iobroker.net/install.sh) diff --git a/install/ironclaw-install.sh b/install/ironclaw-install.sh new file mode 100644 index 000000000..07cbea755 --- /dev/null +++ b/install/ironclaw-install.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/nearai/ironclaw + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + dbus-user-session \ + gnome-keyring \ + libsecret-tools +msg_ok "Installed Dependencies" + +PG_VERSION="17" PG_MODULES="pgvector" setup_postgresql +PG_DB_NAME="ironclaw" PG_DB_USER="ironclaw" PG_DB_EXTENSIONS="vector" setup_postgresql_db + +fetch_and_deploy_gh_release "ironclaw-bin" "nearai/ironclaw" "prebuild" "latest" "/usr/local/bin" \ + "ironclaw-$(uname -m)-unknown-linux-$([[ -f /etc/alpine-release ]] && echo "musl" || echo "gnu").tar.gz" +chmod +x /usr/local/bin/ironclaw + +msg_info "Configuring IronClaw" +mkdir -p /root/.ironclaw +GATEWAY_TOKEN=$(openssl rand -hex 32) +cat </root/.ironclaw/.env +DATABASE_URL=postgresql://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}?sslmode=disable +GATEWAY_ENABLED=true +GATEWAY_HOST=0.0.0.0 +GATEWAY_PORT=3000 +GATEWAY_AUTH_TOKEN=${GATEWAY_TOKEN} +CLI_ENABLED=false +AGENT_NAME=ironclaw +RUST_LOG=ironclaw=info,tower_http=info +EOF +chmod 600 /root/.ironclaw/.env +msg_ok "Configured IronClaw" + +msg_info "Creating Service" +cat </etc/systemd/system/ironclaw.service +[Unit] +Description=IronClaw AI Agent +After=network.target postgresql.service + +[Service] +Type=simple +User=root +WorkingDirectory=/root +EnvironmentFile=/root/.ironclaw/.env +ExecStart=/usr/bin/dbus-run-session /usr/local/bin/ironclaw +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q ironclaw +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/isponsorblocktv-install.sh b/install/isponsorblocktv-install.sh new file mode 100644 index 000000000..fb85518ee --- /dev/null +++ b/install/isponsorblocktv-install.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Matthew Stern (sternma) | MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/dmunozv04/iSponsorBlockTV + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +if ! grep -q ' avx ' /proc/cpuinfo 2>/dev/null; then + msg_error "CPU does not support AVX instructions (required by iSponsorBlockTV/PyApp)" + exit 106 +fi + +fetch_and_deploy_gh_release "isponsorblocktv" "dmunozv04/iSponsorBlockTV" "singlefile" "latest" "/opt/isponsorblocktv" "iSponsorBlockTV-x86_64-linux" + +msg_info "Setting up iSponsorBlockTV" +install -d /var/lib/isponsorblocktv +msg_ok "Set up iSponsorBlockTV" + +msg_info "Creating Service" +cat </etc/systemd/system/isponsorblocktv.service +[Unit] +Description=iSponsorBlockTV +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=root +Group=root +Environment=iSPBTV_data_dir=/var/lib/isponsorblocktv +ExecStart=/opt/isponsorblocktv/isponsorblocktv +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q isponsorblocktv +msg_ok "Created Service" + +msg_info "Creating CLI wrapper" +cat <<'EOF' >/usr/local/bin/iSponsorBlockTV +#!/usr/bin/env bash +export iSPBTV_data_dir="/var/lib/isponsorblocktv" + +set +e +/opt/isponsorblocktv/isponsorblocktv "$@" +status=$? +set -e + +case "${1:-}" in + setup|setup-cli) + systemctl restart isponsorblocktv >/dev/null 2>&1 || true + ;; +esac + +exit $status +EOF +chmod +x /usr/local/bin/iSponsorBlockTV +ln -sf /usr/local/bin/iSponsorBlockTV /usr/bin/iSponsorBlockTV +msg_ok "Created CLI wrapper" + +motd_ssh +customize +cleanup_lxc diff --git a/install/itsm-ng-install.sh b/install/itsm-ng-install.sh index 8541a3cc0..bbaa14431 100644 --- a/install/itsm-ng-install.sh +++ b/install/itsm-ng-install.sh @@ -14,40 +14,32 @@ network_check update_os setup_mariadb - -msg_info "Setting up database" -DB_NAME=itsmng_db -DB_USER=itsmng -DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) +msg_info "Loading timezone data" mariadb-tzinfo-to-sql /usr/share/zoneinfo | mariadb mysql -mariadb -u root -e "CREATE DATABASE $DB_NAME;" -mariadb -u root -e "CREATE USER '$DB_USER'@'localhost' IDENTIFIED BY '$DB_PASS';" -mariadb -u root -e "GRANT ALL PRIVILEGES ON $DB_NAME.* TO '$DB_USER'@'localhost';" -mariadb -u root -e "GRANT SELECT ON \`mysql\`.\`time_zone_name\` TO '$DB_USER'@'localhost'; FLUSH PRIVILEGES;" -{ - echo "ITSM-NG Database Credentials" - echo "Database: $DB_NAME" - echo "Username: $DB_USER" - echo "Password: $DB_PASS" -} >>~/itsmng_db.creds -msg_ok "Set up database" +msg_ok "Loaded timezone data" +MARIADB_DB_NAME="itsmng_db" MARIADB_DB_USER="itsmng" MARIADB_DB_EXTRA_GRANTS="GRANT SELECT ON \`mysql\`.\`time_zone_name\`" setup_mariadb_db -msg_info "Setup ITSM-NG Repository" +msg_info "Installing ITSM-NG" setup_deb822_repo \ "itsm-ng" \ "http://deb.itsm-ng.org/pubkey.gpg" \ "http://deb.itsm-ng.org/$(get_os_info id)/" \ "$(get_os_info codename)" -msg_ok "Setup ITSM-NG Repository" - -msg_info "Installing ITSM-NG" $STD apt install -y itsm-ng cd /usr/share/itsm-ng -$STD php bin/console db:install --db-name=$DB_NAME --db-user=$DB_USER --db-password=$DB_PASS --no-interaction +$STD php bin/console db:install --db-name="$MARIADB_DB_NAME" --db-user="$MARIADB_DB_USER" --db-password="$MARIADB_DB_PASS" --no-interaction $STD a2dissite 000-default.conf -echo "* * * * * php /usr/share/itsm-ng/front/cron.php" | crontab - +echo "* * * * * www-data php /usr/share/itsm-ng/front/cron.php" | crontab - msg_ok "Installed ITSM-NG" +msg_info "Setting permissions" +chown -R www-data:www-data /var/lib/itsm-ng +mkdir -p /usr/share/itsm-ng/css/palettes +chown -R www-data:www-data /usr/share/itsm-ng/css +chown -R www-data:www-data /usr/share/itsm-ng/css_compiled +chown www-data:www-data /etc/itsm-ng/config_db.php +msg_ok "Set permissions" + msg_info "Configuring PHP" PHP_VERSION=$(ls /etc/php/ | grep -E '^[0-9]+\.[0-9]+$' | head -n 1) PHP_INI="/etc/php/$PHP_VERSION/apache2/php.ini" diff --git a/install/jellyfin-install.sh b/install/jellyfin-install.sh index e32e383c6..a25ac387f 100644 --- a/install/jellyfin-install.sh +++ b/install/jellyfin-install.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash -# Copyright (c) 2021-2026 tteck -# Author: tteck (tteckster) +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE # Source: https://jellyfin.org/ @@ -14,31 +14,31 @@ network_check update_os msg_custom "ℹ️" "${GN}" "If NVIDIA GPU passthrough is detected, you'll be asked whether to install drivers in the container" -setup_hwaccel -msg_info "Installing Jellyfin" -VERSION="$(awk -F'=' '/^VERSION_CODENAME=/{ print $NF }' /etc/os-release)" -if ! dpkg -s libjemalloc2 >/dev/null 2>&1; then - $STD apt install -y libjemalloc2 -fi +msg_info "Installing Dependencies" +ensure_dependencies libjemalloc2 if [[ ! -f /usr/lib/libjemalloc.so ]]; then ln -sf /usr/lib/x86_64-linux-gnu/libjemalloc.so.2 /usr/lib/libjemalloc.so fi -if [[ ! -d /etc/apt/keyrings ]]; then - mkdir -p /etc/apt/keyrings -fi -curl -fsSL https://repo.jellyfin.org/jellyfin_team.gpg.key | gpg --dearmor --yes --output /etc/apt/keyrings/jellyfin.gpg -cat </etc/apt/sources.list.d/jellyfin.sources -Types: deb -URIs: https://repo.jellyfin.org/${PCT_OSTYPE} -Suites: ${VERSION} -Components: main -Architectures: amd64 -Signed-By: /etc/apt/keyrings/jellyfin.gpg -EOF +msg_ok "Installed Dependencies" -$STD apt update -$STD apt install -y jellyfin +msg_info "Setting up Jellyfin Repository" +setup_deb822_repo \ + "jellyfin" \ + "https://repo.jellyfin.org/jellyfin_team.gpg.key" \ + "https://repo.jellyfin.org/$(get_os_info id)" \ + "$(get_os_info codename)" +msg_ok "Set up Jellyfin Repository" + +msg_info "Installing Jellyfin" +$STD apt install -y jellyfin jellyfin-ffmpeg7 +ln -sf /usr/lib/jellyfin-ffmpeg/ffmpeg /usr/bin/ffmpeg +ln -sf /usr/lib/jellyfin-ffmpeg/ffprobe /usr/bin/ffprobe +msg_ok "Installed Jellyfin" + +setup_hwaccel "jellyfin" + +msg_info "Configuring Jellyfin" # Configure log rotation to prevent disk fill (keeps fail2ban compatibility) (PR: #1690 / Issue: #11224) cat </etc/logrotate.d/jellyfin /var/log/jellyfin/*.log { @@ -55,12 +55,7 @@ EOF chown -R jellyfin:adm /etc/jellyfin sleep 10 systemctl restart jellyfin -if [[ "$CTTYPE" == "0" ]]; then - sed -i -e 's/^ssl-cert:x:104:$/render:x:104:root,jellyfin/' -e 's/^render:x:108:root,jellyfin$/ssl-cert:x:108:/' /etc/group -else - sed -i -e 's/^ssl-cert:x:104:$/render:x:104:jellyfin/' -e 's/^render:x:108:jellyfin$/ssl-cert:x:108:/' /etc/group -fi -msg_ok "Installed Jellyfin" +msg_ok "Configured Jellyfin" motd_ssh customize diff --git a/install/jitsi-meet-install.sh b/install/jitsi-meet-install.sh new file mode 100644 index 000000000..4d24339d6 --- /dev/null +++ b/install/jitsi-meet-install.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://jitsi.org/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y nginx +msg_ok "Installed Dependencies" + +source /etc/os-release +setup_deb822_repo "jitsi" \ + "https://download.jitsi.org/jitsi-key.gpg.key" \ + "https://download.jitsi.org" \ + "stable/" \ + "" + +msg_info "Installing Jitsi Meet" +echo "jitsi-videobridge2 jitsi-videobridge/jvb-hostname string ${LOCAL_IP}" | debconf-set-selections +echo "jitsi-meet-web-config jitsi-meet/cert-choice select Generate a new self-signed certificate" | debconf-set-selections +DEBIAN_FRONTEND=noninteractive $STD apt install -y jitsi-meet +msg_ok "Installed Jitsi Meet" + +motd_ssh +customize +cleanup_lxc diff --git a/install/jotty-install.sh b/install/jotty-install.sh index e4f01c94e..725d93d95 100644 --- a/install/jotty-install.sh +++ b/install/jotty-install.sh @@ -17,7 +17,7 @@ NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs fetch_and_deploy_gh_release "jotty" "fccview/jotty" "prebuild" "latest" "/opt/jotty" "jotty_*_prebuild.tar.gz" msg_info "Setup jotty" -mkdir -p data/{users,checklists,notes} +mkdir -p /opt/jotty/data/{users,checklists,notes} cat </opt/jotty/.env NODE_ENV=production diff --git a/install/kasm-install.sh b/install/kasm-install.sh index fb3ad99cc..340a6ae11 100644 --- a/install/kasm-install.sh +++ b/install/kasm-install.sh @@ -18,18 +18,21 @@ $STD sh <(curl -fsSL https://get.docker.com/) msg_ok "Installed Docker" msg_info "Detecting latest Kasm Workspaces release" -KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1) -if [[ -z "$KASM_URL" ]]; then - SERVICE_IMAGE_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_service_images_amd64_[0-9]+\.[0-9]+\.[0-9]+\.tar\.gz' | head -n 1) - if [[ -n "$SERVICE_IMAGE_URL" ]]; then - KASM_VERSION=$(echo "$SERVICE_IMAGE_URL" | sed -E 's/.*kasm_release_service_images_amd64_([0-9]+\.[0-9]+\.[0-9]+).*/\1/') - KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION}.tar.gz" - fi -else - KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/') -fi +KASM_VERSION=$(curl -s https://kasm.com/downloads | grep -oP ']*>.*?' | sed -E 's/<\/?h1[^>]*>//g' | grep -oP '\d+\.\d+\.\d+') +KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION:-var_kasm_version}.tar.gz" + +# KASM_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_[0-9]+\.[0-9]+\.[0-9]+\.[a-z0-9]+\.tar\.gz' | head -n 1) +# if [[ -z "$KASM_URL" ]]; then +# SERVICE_IMAGE_URL=$(curl -fsSL "https://www.kasm.com/downloads" | tr '\n' ' ' | grep -oE 'https://kasm-static-content[^"]*kasm_release_service_images_amd64_[0-9]+\.[0-9]+\.[0-9]+\.tar\.gz' | head -n 1) +# if [[ -n "$SERVICE_IMAGE_URL" ]]; then +# KASM_VERSION=$(echo "$SERVICE_IMAGE_URL" | sed -E 's/.*kasm_release_service_images_amd64_([0-9]+\.[0-9]+\.[0-9]+).*/\1/') +# KASM_URL="https://kasm-static-content.s3.amazonaws.com/kasm_release_${KASM_VERSION}.tar.gz" +# fi +# else +# KASM_VERSION=$(echo "$KASM_URL" | sed -E 's/.*kasm_release_([0-9]+\.[0-9]+\.[0-9]+).*/\1/') +# fi -if [[ -z "$KASM_URL" ]] || [[ -z "$KASM_VERSION" ]]; then +if [[ -z "$KASM_VERSION" ]] || [[ -z "$KASM_URL" ]]; then msg_error "Unable to detect latest Kasm release URL." exit 250 fi diff --git a/install/kima-hub-install.sh b/install/kima-hub-install.sh index 54cad2b88..850da5a36 100644 --- a/install/kima-hub-install.sh +++ b/install/kima-hub-install.sh @@ -28,7 +28,7 @@ msg_ok "Installed Dependencies" PG_VERSION="16" PG_MODULES="pgvector" setup_postgresql PG_DB_NAME="kima" PG_DB_USER="kima" PG_DB_GRANT_SUPERUSER="true" setup_postgresql_db -NODE_VERSION="20" setup_nodejs +NODE_VERSION="22" setup_nodejs msg_info "Configuring Redis" systemctl enable -q --now redis-server diff --git a/install/kimai-install.sh b/install/kimai-install.sh index 9e5129893..e555a8489 100644 --- a/install/kimai-install.sh +++ b/install/kimai-install.sh @@ -55,10 +55,10 @@ $STD expect </opt/kitchenowl/kitchenowl.env diff --git a/install/kometa-install.sh b/install/kometa-install.sh index f944985a2..135080bc7 100644 --- a/install/kometa-install.sh +++ b/install/kometa-install.sh @@ -23,12 +23,20 @@ mkdir -p config/assets cp config/config.yml.template config/config.yml msg_ok "Setup Kometa" -read -p "${TAB3}Enter your TMDb API key: " TMDBKEY -read -p "${TAB3}Enter your Plex URL: " PLEXURL -read -p "${TAB3}Enter your Plex token: " PLEXTOKEN -sed -i -e "s#url: http://192.168.1.12:32400#url: $PLEXURL #g" /opt/kometa/config/config.yml -sed -i -e "s/token: ####################/token: $PLEXTOKEN/g" /opt/kometa/config/config.yml -sed -i -e "s/apikey: ################################/apikey: $TMDBKEY/g" /opt/kometa/config/config.yml +read -r -p "${TAB3}Enter your TMDb API key: " TMDBKEY +read -r -p "${TAB3}Enter your Plex URL: " PLEXURL +read -r -p "${TAB3}Enter your Plex token: " PLEXTOKEN +sed -i '/^plex:/,/^[^ ]/{s| url:.*| url: '"$PLEXURL"'|}' /opt/kometa/config/config.yml +sed -i '/^plex:/,/^[^ ]/{s| token:.*| token: '"$PLEXTOKEN"'|}' /opt/kometa/config/config.yml +sed -i '/^tmdb:/,/^[^ ]/{s| apikey:.*| apikey: '"$TMDBKEY"'|}' /opt/kometa/config/config.yml + +fetch_and_deploy_gh_release "kometa-quickstart" "Kometa-Team/Quickstart" "tarball" + +msg_info "Installing Kometa Quickstart" +cd /opt/kometa-quickstart +$STD uv venv /opt/kometa-quickstart/.venv +$STD uv pip install -r requirements.txt -p /opt/kometa-quickstart/.venv/bin/python +msg_ok "Installed Kometa Quickstart" msg_info "Creating Service" cat </etc/systemd/system/kometa.service @@ -46,7 +54,22 @@ RestartSec=30 [Install] WantedBy=multi-user.target EOF -systemctl enable -q --now kometa +cat </etc/systemd/system/kometa-quickstart.service +[Unit] +Description=Kometa Quickstart +After=network-online.target + +[Service] +Type=simple +WorkingDirectory=/opt/kometa-quickstart +ExecStart=/opt/kometa-quickstart/.venv/bin/python quickstart.py +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now kometa kometa-quickstart msg_ok "Created Service" motd_ssh diff --git a/install/librechat-install.sh b/install/librechat-install.sh new file mode 100644 index 000000000..6108860a2 --- /dev/null +++ b/install/librechat-install.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/danny-avila/LibreChat + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +MONGO_VERSION="8.0" setup_mongodb +setup_meilisearch +PG_VERSION="17" PG_MODULES="pgvector" setup_postgresql +PG_DB_NAME="ragapi" PG_DB_USER="ragapi" PG_DB_EXTENSIONS="vector" setup_postgresql_db +NODE_VERSION="24" setup_nodejs +UV_PYTHON="3.12" setup_uv + +fetch_and_deploy_gh_tag "librechat" "danny-avila/LibreChat" +fetch_and_deploy_gh_release "rag-api" "danny-avila/rag_api" "tarball" + +msg_info "Installing LibreChat Dependencies" +cd /opt/librechat +$STD npm ci +msg_ok "Installed LibreChat Dependencies" + +msg_info "Building Frontend" +$STD npm run frontend +$STD npm prune --production +$STD npm cache clean --force +msg_ok "Built Frontend" + +msg_info "Installing RAG API Dependencies" +cd /opt/rag-api +$STD uv venv --python 3.12 --seed .venv +$STD .venv/bin/pip install -r requirements.lite.txt +mkdir -p /opt/rag-api/uploads +msg_ok "Installed RAG API Dependencies" + +msg_info "Configuring LibreChat" +JWT_SECRET=$(openssl rand -hex 32) +JWT_REFRESH_SECRET=$(openssl rand -hex 32) +CREDS_KEY=$(openssl rand -hex 32) +CREDS_IV=$(openssl rand -hex 16) +cat </opt/librechat/.env +HOST=0.0.0.0 +PORT=3080 +MONGO_URI=mongodb://127.0.0.1:27017/LibreChat +DOMAIN_CLIENT=http://${LOCAL_IP}:3080 +DOMAIN_SERVER=http://${LOCAL_IP}:3080 +NO_INDEX=true +TRUST_PROXY=1 +JWT_SECRET=${JWT_SECRET} +JWT_REFRESH_SECRET=${JWT_REFRESH_SECRET} +SESSION_EXPIRY=1000 * 60 * 15 +REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7 +CREDS_KEY=${CREDS_KEY} +CREDS_IV=${CREDS_IV} +ALLOW_EMAIL_LOGIN=true +ALLOW_REGISTRATION=true +ALLOW_SOCIAL_LOGIN=false +ALLOW_SOCIAL_REGISTRATION=false +ALLOW_PASSWORD_RESET=false +ALLOW_UNVERIFIED_EMAIL_LOGIN=true +SEARCH=true +MEILI_NO_ANALYTICS=true +MEILI_HOST=http://127.0.0.1:7700 +MEILI_MASTER_KEY=${MEILISEARCH_MASTER_KEY} +RAG_PORT=8000 +RAG_API_URL=http://127.0.0.1:8000 +APP_TITLE=LibreChat +ENDPOINTS=openAI,agents,assistants,anthropic,google +# OPENAI_API_KEY=your-key-here +# OPENAI_MODELS= +# ANTHROPIC_API_KEY=your-key-here +# GOOGLE_KEY=your-key-here +EOF +msg_ok "Configured LibreChat" + +msg_info "Configuring RAG API" +cat </opt/rag-api/.env +VECTOR_DB_TYPE=pgvector +DB_HOST=127.0.0.1 +DB_PORT=5432 +POSTGRES_DB=${PG_DB_NAME} +POSTGRES_USER=${PG_DB_USER} +POSTGRES_PASSWORD=${PG_DB_PASS} +RAG_HOST=0.0.0.0 +RAG_PORT=8000 +JWT_SECRET=${JWT_SECRET} +RAG_UPLOAD_DIR=/opt/rag-api/uploads/ +EOF +msg_ok "Configured RAG API" + +msg_info "Creating Services" +cat </etc/systemd/system/librechat.service +[Unit] +Description=LibreChat +After=network.target mongod.service meilisearch.service rag-api.service + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/librechat +EnvironmentFile=/opt/librechat/.env +ExecStart=/usr/bin/npm run backend +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +cat </etc/systemd/system/rag-api.service +[Unit] +Description=LibreChat RAG API +After=network.target postgresql.service + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/rag-api +EnvironmentFile=/opt/rag-api/.env +ExecStart=/opt/rag-api/.venv/bin/uvicorn main:app --host 0.0.0.0 --port 8000 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now rag-api librechat +msg_ok "Created Services" + +motd_ssh +customize +cleanup_lxc diff --git a/install/linkding-install.sh b/install/linkding-install.sh index a77509d83..88ed1d433 100644 --- a/install/linkding-install.sh +++ b/install/linkding-install.sh @@ -27,7 +27,7 @@ msg_ok "Installed Dependencies" NODE_VERSION="22" setup_nodejs setup_uv -fetch_and_deploy_gh_release "linkding" "sissbruecker/linkding" +fetch_and_deploy_gh_release "linkding" "sissbruecker/linkding" "tarball" msg_info "Building Frontend" cd /opt/linkding diff --git a/install/litellm-install.sh b/install/litellm-install.sh deleted file mode 100644 index f86ce9d79..000000000 --- a/install/litellm-install.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: stout01 -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://github.com/BerriAI/litellm - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Dependencies" -$STD apt install -y \ - build-essential \ - python3-dev -msg_ok "Installed Dependencies" - -PG_VERSION="17" setup_postgresql -PG_DB_NAME="litellm_db" PG_DB_USER="litellm" setup_postgresql_db -PYTHON_VERSION="3.13" USE_UVX="YES" setup_uv - -msg_info "Setting up Virtual Environment" -mkdir -p /opt/litellm -cd /opt/litellm -$STD uv venv --clear /opt/litellm/.venv -$STD /opt/litellm/.venv/bin/python -m ensurepip --upgrade -$STD /opt/litellm/.venv/bin/python -m pip install --upgrade pip -$STD /opt/litellm/.venv/bin/python -m pip install litellm[proxy] prisma -msg_ok "Installed LiteLLM" - -msg_info "Configuring LiteLLM" -mkdir -p /opt -cat </opt/litellm/litellm.yaml -general_settings: - master_key: sk-1234 - database_url: postgresql://$PG_DB_USER:$PG_DB_PASS@127.0.0.1:5432/$PG_DB_NAME - store_model_in_db: true -EOF -uv --directory=/opt/litellm run litellm --config /opt/litellm/litellm.yaml --use_prisma_db_push --skip_server_startup -msg_ok "Configured LiteLLM" - -msg_info "Creating Service" -cat </etc/systemd/system/litellm.service -[Unit] -Description=LiteLLM - -[Service] -Type=simple -ExecStart=uv --directory=/opt/litellm run litellm --config /opt/litellm/litellm.yaml -Restart=always - -[Install] -WantedBy=multi-user.target -EOF -systemctl enable -q --now litellm -msg_ok "Created Service" - -motd_ssh -customize -cleanup_lxc diff --git a/install/lychee-install.sh b/install/lychee-install.sh new file mode 100644 index 000000000..29cbc7ddd --- /dev/null +++ b/install/lychee-install.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/LycheeOrg/Lychee + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + caddy \ + libimage-exiftool-perl \ + jpegoptim +msg_ok "Installed Dependencies" + +PHP_VERSION="8.4" PHP_FPM="YES" PHP_MODULE="bcmath,ldap,exif,gd,intl,imagick,redis,zip,pdo_pgsql,pcntl" setup_php +PG_VERSION="16" setup_postgresql +PG_DB_NAME="lychee" PG_DB_USER="lychee" setup_postgresql_db +setup_ffmpeg +setup_imagemagick + +fetch_and_deploy_gh_release "lychee" "LycheeOrg/Lychee" "prebuild" "latest" "/opt/lychee" "Lychee.zip" + +msg_info "Configuring Application" +cd /opt/lychee +cp .env.example .env +APP_KEY=$($STD php artisan key:generate --show) +sed -i "s|^APP_KEY=.*|APP_KEY=${APP_KEY}|" .env +sed -i "s|^APP_ENV=.*|APP_ENV=production|" .env +sed -i "s|^APP_DEBUG=.*|APP_DEBUG=false|" .env +sed -i "s|^APP_URL=.*|APP_URL=http://${LOCAL_IP}|" .env +sed -i "s|^DB_CONNECTION=.*|DB_CONNECTION=pgsql|" .env +sed -i "s|^DB_HOST=.*|DB_HOST=127.0.0.1|" .env +sed -i "s|^DB_PORT=.*|DB_PORT=5432|" .env +sed -i "s|^#\?DB_DATABASE=.*|DB_DATABASE=${PG_DB_NAME}|" .env +sed -i "s|^DB_USERNAME=.*|DB_USERNAME=${PG_DB_USER}|" .env +sed -i "s|^DB_PASSWORD=.*|DB_PASSWORD=${PG_DB_PASS}|" .env +mkdir -p storage/framework/{cache,sessions,views} storage/logs bootstrap/cache public/dist public/uploads public/sym +touch public/dist/user.css public/dist/custom.js +chmod -R 775 storage bootstrap/cache public/dist public/uploads public/sym +msg_ok "Configured Application" + +msg_info "Running Database Migrations" +cd /opt/lychee +$STD php artisan migrate --force +msg_ok "Ran Database Migrations" + +chown -R www-data:www-data /opt/lychee + +msg_info "Configuring Caddy" +PHP_VER=$(php -r 'echo PHP_MAJOR_VERSION . "." . PHP_MINOR_VERSION;') +cat </etc/caddy/Caddyfile +:80 { + root * /opt/lychee/public + php_fastcgi unix//run/php/php${PHP_VER}-fpm.sock + file_server + encode gzip +} +EOF +usermod -aG www-data caddy +msg_ok "Configured Caddy" + +systemctl enable -q --now php${PHP_VER}-fpm +systemctl restart caddy + +motd_ssh +customize +cleanup_lxc diff --git a/install/lyrionmusicserver-install.sh b/install/lyrionmusicserver-install.sh index cc968a443..a348c9fbb 100644 --- a/install/lyrionmusicserver-install.sh +++ b/install/lyrionmusicserver-install.sh @@ -14,10 +14,10 @@ network_check update_os msg_info "Setup Lyrion Music Server" -DEB_URL=$(curl -fsSL 'https://lyrion.org/getting-started/' | grep -oP ']*href="\K[^"]*amd64\.deb(?="[^>]*>)' | head -n 1) +DEB_URL=$(curl_with_retry 'https://lyrion.org/getting-started/' | grep -oP ']*href="\K[^"]*amd64\.deb(?="[^>]*>)' | head -n 1) RELEASE=$(echo "$DEB_URL" | grep -oP 'lyrionmusicserver_\K[0-9.]+(?=_amd64\.deb)') DEB_FILE="/tmp/lyrionmusicserver_${RELEASE}_amd64.deb" -curl -fsSL -o "$DEB_FILE" "$DEB_URL" +curl_with_retry "$DEB_URL" "$DEB_FILE" $STD apt install "$DEB_FILE" -y rm -f "$DEB_FILE" echo "${RELEASE}" >"/opt/lyrion_version.txt" diff --git a/install/mail-archiver-install.sh b/install/mail-archiver-install.sh index 26caab59b..24fb81061 100644 --- a/install/mail-archiver-install.sh +++ b/install/mail-archiver-install.sh @@ -22,7 +22,8 @@ setup_deb822_repo \ "main" $STD apt install -y \ dotnet-sdk-10.0 \ - aspnetcore-runtime-8.0 + aspnetcore-runtime-8.0 \ + libgssapi-krb5-2 msg_ok "Installed Dependencies" PG_VERSION="17" setup_postgresql diff --git a/install/manyfold-install.sh b/install/manyfold-install.sh index fddaf63c2..b766c7fd5 100644 --- a/install/manyfold-install.sh +++ b/install/manyfold-install.sh @@ -101,11 +101,14 @@ server { location /cable { proxy_pass http://127.0.0.1:5000; - proxy_set_header Host \$host; + + proxy_set_header Host \$http_host; + proxy_set_header X-Forwarded-Host \$http_host; + proxy_set_header X-Forwarded-Port \$server_port; proxy_http_version 1.1; proxy_set_header Upgrade \$http_upgrade; - proxy_set_header Connection "Upgrade"; + proxy_set_header Connection "upgrade"; proxy_set_header X-Real-IP \$remote_addr; proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; @@ -118,7 +121,11 @@ server { location @rails { proxy_pass http://127.0.0.1:5000; - proxy_set_header Host \$host; + + proxy_set_header Host \$http_host; + proxy_set_header X-Forwarded-Host \$http_host; + proxy_set_header X-Forwarded-Port \$server_port; + proxy_set_header X-Real-IP \$remote_addr; proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto \$scheme; diff --git a/install/matomo-install.sh b/install/matomo-install.sh new file mode 100644 index 000000000..4e5b8df4e --- /dev/null +++ b/install/matomo-install.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://matomo.org/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y caddy +msg_ok "Installed Dependencies" + +mkdir -p /opt/matomo + +PHP_VERSION="8.3" PHP_FPM="YES" PHP_MODULES="pdo_mysql,gd,mbstring,xml,curl,intl,zip,ldap" setup_php +setup_mariadb +MARIADB_DB_NAME="matomo" MARIADB_DB_USER="matomo" setup_mariadb_db + +msg_info "Allowing Local TCP Database Access" +$STD mariadb -u root -e "CREATE USER IF NOT EXISTS '$MARIADB_DB_USER'@'127.0.0.1' IDENTIFIED BY '$MARIADB_DB_PASS';" +$STD mariadb -u root -e "ALTER USER '$MARIADB_DB_USER'@'127.0.0.1' IDENTIFIED BY '$MARIADB_DB_PASS';" +$STD mariadb -u root -e "GRANT ALL ON \`$MARIADB_DB_NAME\`.* TO '$MARIADB_DB_USER'@'127.0.0.1';" +$STD mariadb -u root -e "FLUSH PRIVILEGES;" +msg_ok "Allowed Local TCP Database Access" + +fetch_and_deploy_gh_release "matomo" "matomo-org/matomo" "prebuild" "latest" "/opt/matomo" "matomo-*.zip" + +msg_info "Setting up Matomo" +if [[ -d /opt/matomo/matomo ]]; then + rm -rf /opt/matomo/tmp "/opt/matomo/How to install Matomo.html" + find /opt/matomo/matomo -mindepth 1 -maxdepth 1 -exec mv -t /opt/matomo {} + + rm -rf /opt/matomo/matomo +fi +mkdir -p /opt/matomo/tmp +chown -R www-data:www-data /opt/matomo +chmod -R 755 /opt/matomo/tmp +msg_ok "Set up Matomo" + +msg_info "Configuring Caddy" +PHP_VER=$(php -r 'echo PHP_MAJOR_VERSION . "." . PHP_MINOR_VERSION;') +cat </etc/caddy/Caddyfile +:80 { + root * /opt/matomo + @blocked path /config /config/* /tmp /tmp/* /.* /.*/* + respond @blocked 403 + php_fastcgi unix//run/php/php${PHP_VER}-fpm.sock + file_server + encode gzip +} +EOF +usermod -aG www-data caddy +msg_ok "Configured Caddy" + +systemctl enable -q --now php${PHP_VER}-fpm +systemctl restart caddy + +motd_ssh +customize +cleanup_lxc diff --git a/install/matter-server-install.sh b/install/matter-server-install.sh new file mode 100644 index 000000000..4fff7775d --- /dev/null +++ b/install/matter-server-install.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/matter-js/python-matter-server + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + libuv1 \ + libjson-c5 \ + libnl-3-200 \ + libnl-route-3-200 \ + iputils-ping \ + iproute2 +msg_ok "Installed Dependencies" + +UV_PYTHON="3.12" setup_uv + +msg_info "Setting up Matter Server" +mkdir -p /opt/matter-server/data/credentials +if [ -L /data ]; then + rm -f /data +fi +if [ ! -e /data ]; then + ln -s /opt/matter-server/data /data +fi +$STD uv venv /opt/matter-server/.venv +MATTER_VERSION=$(get_latest_github_release "matter-js/python-matter-server") +$STD uv pip install --python /opt/matter-server/.venv/bin/python "python-matter-server[server]==${MATTER_VERSION}" +echo "${MATTER_VERSION}" >~/.matter-server +msg_ok "Set up Matter Server" + +fetch_and_deploy_gh_release "chip-ota-provider-app" "home-assistant-libs/matter-linux-ota-provider" "singlefile" "latest" "/usr/local/bin" "chip-ota-provider-app-x86-64" + +msg_info "Configuring Network" +cat </etc/sysctl.d/99-matter.conf +net.ipv4.igmp_max_memberships=1024 +EOF +$STD sysctl -p /etc/sysctl.d/99-matter.conf +msg_ok "Configured Network" + +msg_info "Creating Service" +cat </etc/systemd/system/matter-server.service +[Unit] +Description=Matter Server +After=network.target + +[Service] +Type=simple +User=root +ExecStart=/opt/matter-server/.venv/bin/matter-server --storage-path /data --paa-root-cert-dir /data/credentials +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now matter-server +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/mealie-install.sh b/install/mealie-install.sh index cae7fac85..f29ea36b4 100644 --- a/install/mealie-install.sh +++ b/install/mealie-install.sh @@ -30,7 +30,7 @@ msg_ok "Installed Dependencies" PYTHON_VERSION="3.12" setup_uv PG_VERSION="16" setup_postgresql NODE_MODULE="yarn" NODE_VERSION="24" setup_nodejs -fetch_and_deploy_gh_release "mealie" "mealie-recipes/mealie" "tarball" "latest" "/opt/mealie" +fetch_and_deploy_gh_release "mealie" "mealie-recipes/mealie" "tarball" PG_DB_NAME="mealie_db" PG_DB_USER="mealie_user" PG_DB_GRANT_SUPERUSER="true" setup_postgresql_db msg_info "Installing Python Dependencies with uv" @@ -42,9 +42,10 @@ msg_info "Building Frontend" MEALIE_VERSION=$(<$HOME/.mealie) export NUXT_TELEMETRY_DISABLED=1 cd /opt/mealie/frontend -$STD sed -i "s|https://github.com/mealie-recipes/mealie/commit/|https://github.com/mealie-recipes/mealie/releases/tag/|g" /opt/mealie/frontend/pages/admin/site-settings.vue -$STD sed -i "s|value: data.buildId,|value: \"v${MEALIE_VERSION}\",|g" /opt/mealie/frontend/pages/admin/site-settings.vue -$STD sed -i "s|value: data.production ? i18n.t(\"about.production\") : i18n.t(\"about.development\"),|value: \"bare-metal\",|g" /opt/mealie/frontend/pages/admin/site-settings.vue +SITE_SETTINGS=$(find /opt/mealie/frontend -name "site-settings.vue" -path "*/admin/*" | head -1) +$STD sed -i "s|https://github.com/mealie-recipes/mealie/commit/|https://github.com/mealie-recipes/mealie/releases/tag/|g" "$SITE_SETTINGS" +$STD sed -i "s|value: data.buildId,|value: \"v${MEALIE_VERSION}\",|g" "$SITE_SETTINGS" +$STD sed -i "s|value: data.production ? i18n.t(\"about.production\") : i18n.t(\"about.development\"),|value: \"bare-metal\",|g" "$SITE_SETTINGS" $STD yarn install --prefer-offline --frozen-lockfile --non-interactive --production=false --network-timeout 1000000 $STD yarn generate msg_ok "Built Frontend" @@ -54,11 +55,7 @@ mkdir -p /opt/mealie/mealie/frontend cp -r /opt/mealie/frontend/dist/* /opt/mealie/mealie/frontend/ msg_ok "Copied Frontend" -msg_info "Downloading NLTK Data" -mkdir -p /nltk_data/ -cd /opt/mealie -$STD uv run python -m nltk.downloader -d /nltk_data averaged_perceptron_tagger_eng -msg_ok "Downloaded NLTK Data" +setup_nltk "averaged_perceptron_tagger_eng" "/nltk_data" msg_info "Writing Environment File" SECRET=$(openssl rand -hex 32) diff --git a/install/metube-install.sh b/install/metube-install.sh index ec64699df..734cf583c 100644 --- a/install/metube-install.sh +++ b/install/metube-install.sh @@ -41,6 +41,7 @@ if command -v corepack >/dev/null 2>&1; then $STD corepack enable $STD corepack prepare pnpm --activate || true fi +echo 'onlyBuiltDependencies=*' >> .npmrc $STD pnpm install --frozen-lockfile $STD pnpm run build cd /opt/metube diff --git a/install/mini-qr-install.sh b/install/mini-qr-install.sh new file mode 100644 index 000000000..1bfa0728b --- /dev/null +++ b/install/mini-qr-install.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: doge0420 +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/lyqht/mini-qr + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + libharfbuzz0b \ + caddy \ + fontconfig +msg_ok "Installed Dependencies" + +NODE_VERSION="20" setup_nodejs +fetch_and_deploy_gh_release "mini-qr" "lyqht/mini-qr" "tarball" + +msg_info "Building MiniQR" +cd /opt/mini-qr +$STD npm install +$STD npm run build +msg_ok "Built MiniQR" + +msg_info "Configuring Caddy" +cat </etc/caddy/Caddyfile +:80 { + root * /opt/mini-qr/dist + file_server + + # Handle client-side routing + try_files {path} /index.html + + # Cache static assets + @assets { + path /assets/* + } + header @assets Cache-Control "public, immutable, max-age=31536000" + + # Correct MIME types for JS modules + @jsmodules { + path *.js *.mjs + } + header @jsmodules Content-Type "application/javascript" +} +EOF +systemctl enable -q --now caddy +systemctl reload caddy +msg_ok "Configured Caddy" + +motd_ssh +customize +cleanup_lxc diff --git a/install/minthcm-install.sh b/install/minthcm-install.sh new file mode 100644 index 000000000..d9d5071bd --- /dev/null +++ b/install/minthcm-install.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MintHCM +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/minthcm/minthcm +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" + +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +PHP_VERSION="8.2" +PHP_APACHE="YES" PHP_MODULE="mysql,redis" PHP_FPM="YES" setup_php +setup_composer +setup_mariadb +$STD mariadb -u root -e "SET GLOBAL sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'" + +fetch_and_deploy_gh_release "MintHCM" "minthcm/minthcm" "tarball" "latest" "/var/www/MintHCM" + +msg_info "Configuring MintHCM" +mkdir -p /etc/php/${PHP_VERSION}/mods-available +cp /var/www/MintHCM/docker/config/000-default.conf /etc/apache2/sites-available/000-default.conf +cp /var/www/MintHCM/docker/config/php-minthcm.ini /etc/php/${PHP_VERSION}/mods-available/php-minthcm.ini +mkdir -p "/etc/php/${PHP_VERSION}/cli/conf.d" "/etc/php/${PHP_VERSION}/apache2/conf.d" +ln -s "/etc/php/${PHP_VERSION}/mods-available/php-minthcm.ini" "/etc/php/${PHP_VERSION}/cli/conf.d/20-minthcm.ini" +ln -s "/etc/php/${PHP_VERSION}/mods-available/php-minthcm.ini" "/etc/php/${PHP_VERSION}/apache2/conf.d/20-minthcm.ini" +chown -R www-data:www-data /var/www/MintHCM +find /var/www/MintHCM -type d -exec chmod 755 {} \; +find /var/www/MintHCM -type f -exec chmod 644 {} \; +mkdir -p /var/www/script +cp /var/www/MintHCM/docker/script/generate_config.php /var/www/script/generate_config.php +cp /var/www/MintHCM/docker/.env /var/www/script/.env +chown -R www-data:www-data /var/www/script +$STD a2enmod rewrite +$STD a2enmod headers +$STD systemctl restart apache2 +msg_ok "Configured MintHCM" + +msg_info "Setting up Elasticsearch" +setup_deb822_repo \ + "elasticsearch" \ + "https://artifacts.elastic.co/GPG-KEY-elasticsearch" \ + "https://artifacts.elastic.co/packages/7.x/apt" \ + "stable" +$STD apt install -y elasticsearch +echo "-Xms2g" >>/etc/elasticsearch/jvm.options +echo "-Xmx2g" >>/etc/elasticsearch/jvm.options +$STD /usr/share/elasticsearch/bin/elasticsearch-plugin install ingest-attachment -b +systemctl enable -q --now elasticsearch +msg_ok "Set up Elasticsearch" + +msg_info "Configuring Database" +DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) +$STD mariadb -u root -e "CREATE USER 'minthcm'@'localhost' IDENTIFIED BY '${DB_PASS}';" +$STD mariadb -u root -e "GRANT ALL ON *.* TO 'minthcm'@'localhost'; FLUSH PRIVILEGES;" +sed -i "s/^DB_HOST=.*/DB_HOST=localhost/" /var/www/script/.env +sed -i "s/^DB_USER=.*/DB_USER=minthcm/" /var/www/script/.env +sed -i "s/^DB_PASS=.*/DB_PASS=$DB_PASS/" /var/www/script/.env +sed -i "s/^ELASTICSEARCH_HOST=.*/ELASTICSEARCH_HOST=localhost/" /var/www/script/.env +msg_ok "Configured Database" + +msg_info "Generating configuration file" +set -a +source /var/www/script/.env +set +a +$STD php /var/www/script/generate_config.php +msg_ok "Generated configuration file" + +msg_info "Installing MintHCM" +cd /var/www/MintHCM +$STD sudo -u www-data php MintCLI install /dev/null 2>&1\n" >/var/spool/cron/crontabs/www-data +service cron start +rm -f /var/www/MintHCM/configMint4 +msg_ok "Installed MintHCM" + +motd_ssh +customize +cleanup_lxc diff --git a/install/monica-install.sh b/install/monica-install.sh index a4d8ed4a7..c09bd0339 100644 --- a/install/monica-install.sh +++ b/install/monica-install.sh @@ -33,7 +33,7 @@ $STD yarn config set ignore-engines true $STD yarn install $STD yarn run production $STD php artisan key:generate -$STD php artisan setup:production --email=admin@helper-scripts.com --password=helper-scripts.com --force +$STD php artisan setup:production --email=admin@community-scripts.org --password=community-scripts.org --force chown -R www-data:www-data /opt/monica chmod -R 775 /opt/monica/storage echo "* * * * * root php /opt/monica/artisan schedule:run >> /dev/null 2>&1" >>/etc/crontab diff --git a/install/motioneye-install.sh b/install/motioneye-install.sh index 362221efb..d3829d572 100644 --- a/install/motioneye-install.sh +++ b/install/motioneye-install.sh @@ -50,6 +50,7 @@ msg_ok "Installed MotionEye" msg_info "Creating Service" curl -fsSL "https://raw.githubusercontent.com/motioneye-project/motioneye/dev/motioneye/extra/motioneye.systemd" -o "/etc/systemd/system/motioneye.service" +sed -i 's/^User=.*/User=root/' /etc/systemd/system/motioneye.service systemctl enable -q --now motioneye msg_ok "Created Service" diff --git a/install/myip-install.sh b/install/myip-install.sh index f70847cbb..be8bcb203 100644 --- a/install/myip-install.sh +++ b/install/myip-install.sh @@ -13,7 +13,7 @@ setting_up_container network_check update_os -NODE_VERSION="22" setup_nodejs +NODE_VERSION="24" setup_nodejs fetch_and_deploy_gh_release "myip" "jason5ng32/MyIP" "tarball" msg_info "Configuring MyIP" diff --git a/install/nagios-install.sh b/install/nagios-install.sh new file mode 100644 index 000000000..35f2d86f2 --- /dev/null +++ b/install/nagios-install.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: CanbiZ (MickLesk) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/NagiosEnterprises/nagioscore + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + autoconf \ + automake \ + build-essential \ + bc \ + dc \ + gawk \ + gettext \ + gperf \ + libgd-dev \ + libmcrypt-dev \ + libnet-snmp-perl \ + libssl-dev \ + snmp \ + apache2 \ + apache2-utils +msg_ok "Installed Dependencies" + +PHP_APACHE="YES" setup_php + +fetch_and_deploy_gh_release "nagios" "NagiosEnterprises/nagioscore" "tarball" + +msg_info "Building Nagios Core" +cd /opt/nagios +$STD ./configure --with-httpd-conf=/etc/apache2/sites-enabled +$STD make all +$STD make install-groups-users +usermod -a -G nagios www-data +$STD make install +$STD make install-daemoninit +$STD make install-commandmode +$STD make install-config +$STD make install-webconf +$STD a2enmod rewrite +$STD a2enmod cgi +msg_ok "Built Nagios Core" + +fetch_and_deploy_gh_release "nagios-plugins" "nagios-plugins/nagios-plugins" "tarball" + +msg_info "Building Nagios Plugins" +cd /opt/nagios-plugins +$STD ./tools/setup +$STD ./configure +$STD make +$STD make install +setcap cap_net_raw+p /bin/ping +msg_ok "Built Nagios Plugins" + +msg_info "Configuring Web Authentication" +$STD htpasswd -bc /usr/local/nagios/etc/htpasswd.users nagiosadmin nagiosadmin +chown root:www-data /usr/local/nagios/etc/htpasswd.users +chmod 640 /usr/local/nagios/etc/htpasswd.users +msg_ok "Configured Web Authentication" + +msg_info "Starting Services" +systemctl enable -q apache2 +systemctl restart apache2 +systemctl enable -q --now nagios +msg_ok "Started Services" + +motd_ssh +customize +cleanup_lxc diff --git a/install/nametag-install.sh b/install/nametag-install.sh new file mode 100644 index 000000000..8f952153b --- /dev/null +++ b/install/nametag-install.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/mattogodoy/nametag + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +PG_VERSION="16" setup_postgresql +PG_DB_NAME="nametag_db" PG_DB_USER="nametag" setup_postgresql_db +NODE_VERSION="20" setup_nodejs +fetch_and_deploy_gh_release "nametag" "mattogodoy/nametag" "tarball" "latest" "/opt/nametag" + +msg_info "Setting up Application" +cd /opt/nametag +$STD npm ci +DATABASE_URL="postgresql://${PG_DB_USER}:${PG_DB_PASS}@127.0.0.1:5432/${PG_DB_NAME}" $STD npx prisma generate +DATABASE_URL="postgresql://${PG_DB_USER}:${PG_DB_PASS}@127.0.0.1:5432/${PG_DB_NAME}" $STD npx prisma migrate deploy +msg_ok "Set up Application" + +msg_info "Configuring Nametag" +NEXTAUTH_SECRET=$(openssl rand -base64 32) +CRON_SECRET=$(openssl rand -base64 16) +mkdir -p /opt/nametag/data/photos +cat </opt/nametag/.env +DATABASE_URL=postgresql://${PG_DB_USER}:${PG_DB_PASS}@127.0.0.1:5432/${PG_DB_NAME} +NEXTAUTH_URL=http://${LOCAL_IP}:3000 +NEXTAUTH_SECRET=${NEXTAUTH_SECRET} +CRON_SECRET=${CRON_SECRET} +PHOTO_STORAGE_PATH=/opt/nametag/data/photos +NODE_ENV=production +EOF +msg_ok "Configured Nametag" + +msg_info "Building Application" +cd /opt/nametag +set -a +source /opt/nametag/.env +set +a +$STD npm run build +cp -r /opt/nametag/.next/static /opt/nametag/.next/standalone/.next/static +cp -r /opt/nametag/public /opt/nametag/.next/standalone/public +msg_ok "Built Application" + +msg_info "Running Production Seed" +cd /opt/nametag +$STD npx esbuild prisma/seed.production.ts --platform=node --format=cjs --outfile=prisma/seed.production.js --bundle --external:@prisma/client --external:pg --minify +$STD node prisma/seed.production.js +msg_ok "Ran Production Seed" + +msg_info "Creating Service" +cat </etc/systemd/system/nametag.service +[Unit] +Description=Nametag - Personal Relationships Manager +After=network.target postgresql.service + +[Service] +Type=simple +WorkingDirectory=/opt/nametag +EnvironmentFile=/opt/nametag/.env +ExecStart=/usr/bin/node /opt/nametag/.next/standalone/server.js +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now nametag +msg_ok "Created Service" + +msg_info "Setting up Cron Jobs" +cat </etc/cron.d/nametag +0 8 * * * root curl -sf -H "Authorization: Bearer ${CRON_SECRET}" http://127.0.0.1:3000/api/cron/send-reminders >/dev/null 2>&1 +0 3 * * * root curl -sf -H "Authorization: Bearer ${CRON_SECRET}" http://127.0.0.1:3000/api/cron/purge-deleted >/dev/null 2>&1 +EOF +chmod 644 /etc/cron.d/nametag +msg_ok "Set up Cron Jobs" + +motd_ssh +customize +cleanup_lxc diff --git a/install/neko-install.sh b/install/neko-install.sh new file mode 100644 index 000000000..3dde5f869 --- /dev/null +++ b/install/neko-install.sh @@ -0,0 +1,255 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: CanbiZ (MickLesk) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://neko.m1k1o.net/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + supervisor \ + pulseaudio \ + dbus-x11 \ + xserver-xorg-video-dummy \ + xdotool \ + xclip \ + libgtk-3-0 \ + gstreamer1.0-plugins-base \ + gstreamer1.0-plugins-good \ + gstreamer1.0-plugins-bad \ + gstreamer1.0-plugins-ugly \ + gstreamer1.0-pulseaudio \ + openbox \ + firefox-esr \ + fonts-noto-color-emoji \ + fonts-wqy-zenhei +msg_ok "Installed Dependencies" +systemctl disable -q --now supervisor + +msg_info "Installing Build Dependencies" +$STD apt install -y \ + build-essential \ + pkg-config \ + libx11-dev \ + libxrandr-dev \ + libxtst-dev \ + libgtk-3-dev \ + libxcvt-dev \ + libgstreamer1.0-dev \ + libgstreamer-plugins-base1.0-dev +msg_ok "Installed Build Dependencies" + +NODE_VERSION="22" setup_nodejs +setup_go + +fetch_and_deploy_gh_release "neko" "m1k1o/neko" "tarball" + +msg_info "Building Client" +cd /opt/neko/client +$STD npm install +$STD npm run build +mkdir -p /var/www +cp -r /opt/neko/client/dist/* /var/www/ +msg_ok "Built Client" + +msg_info "Building Server" +cd /opt/neko/server +$STD ./build +cp /opt/neko/server/bin/neko /usr/bin/neko +mkdir -p /etc/neko/plugins +cp -r /opt/neko/server/bin/plugins/* /etc/neko/plugins/ 2>/dev/null || true +msg_ok "Built Server" + +msg_info "Setting up Runtime" +useradd -m -s /bin/bash neko +usermod -aG audio,video neko + +mkdir -p /etc/neko/supervisord /var/www /var/log/neko /tmp/.X11-unix /tmp/runtime-neko /home/neko/.config/pulse /home/neko/.local/share/xorg +chmod 1777 /tmp/.X11-unix +chmod 1777 /var/log/neko +chmod 0700 /tmp/runtime-neko +chown neko /tmp/.X11-unix /var/log/neko /tmp/runtime-neko +chown -R neko:neko /home/neko + +cp /opt/neko/runtime/xorg.conf /etc/neko/xorg.conf +# Remove the dummy_touchscreen InputDevice section (requires custom "neko" Xorg driver not available bare-metal) +sed -i '/Section "InputDevice"/{N;/dummy_touchscreen/{:l;N;/EndSection/!bl;d}}' /etc/neko/xorg.conf +sed -i '/dummy_touchscreen/d' /etc/neko/xorg.conf +sed -i 's/InputDevice "dummy_mouse"/InputDevice "dummy_mouse" "CorePointer"/' /etc/neko/xorg.conf +cp /opt/neko/runtime/default.pa /etc/pulse/default.pa + +cat </etc/neko/supervisord.conf +[supervisord] +nodaemon=true +user=root +pidfile=/var/run/supervisord.pid +logfile=/dev/null +logfile_maxbytes=0 +loglevel=debug + +[include] +files=/etc/neko/supervisord/*.conf + +[program:x-server] +environment=HOME="/home/neko",USER="neko" +command=/usr/bin/X :99.0 -config /etc/neko/xorg.conf -noreset -nolisten tcp +autorestart=true +priority=300 +user=neko +stdout_logfile=/var/log/neko/xorg.log +stdout_logfile_maxbytes=100MB +stdout_logfile_backups=10 +redirect_stderr=true + +[program:pulseaudio] +environment=HOME="/home/neko",USER="neko",DISPLAY=":99.0" +command=/usr/bin/pulseaudio --log-level=error --disallow-module-loading --disallow-exit --exit-idle-time=-1 +autorestart=true +priority=300 +user=neko +stdout_logfile=/var/log/neko/pulseaudio.log +stdout_logfile_maxbytes=100MB +stdout_logfile_backups=10 +redirect_stderr=true + +[program:neko] +environment=HOME="/home/neko",USER="neko",DISPLAY=":99.0" +command=/usr/bin/neko serve --server.static "/var/www" +stopsignal=INT +stopwaitsecs=3 +autorestart=true +priority=800 +user=neko +stdout_logfile=/var/log/neko/neko.log +stdout_logfile_maxbytes=100MB +stdout_logfile_backups=10 +redirect_stderr=true + +[unix_http_server] +file=/var/run/supervisor.sock +chmod=0770 +chown=root:neko + +[supervisorctl] +serverurl=unix:///var/run/supervisor.sock + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface +EOF + +cat </etc/neko/supervisord/firefox.conf +[program:firefox] +environment=HOME="/home/neko",USER="neko",DISPLAY=":99.0" +command=/usr/bin/firefox-esr --no-remote --display=:99.0 -width 1280 -height 720 +stopsignal=INT +autorestart=true +priority=800 +user=neko +stdout_logfile=/var/log/neko/firefox.log +stdout_logfile_maxbytes=100MB +stdout_logfile_backups=10 +redirect_stderr=true + +[program:openbox] +environment=HOME="/home/neko",USER="neko",DISPLAY=":99.0" +command=/usr/bin/openbox --config-file /etc/neko/openbox.xml +autorestart=true +priority=300 +user=neko +stdout_logfile=/var/log/neko/openbox.log +stdout_logfile_maxbytes=100MB +stdout_logfile_backups=10 +redirect_stderr=true +EOF + +cat <<'EOF' >/etc/neko/openbox.xml + + + + + no + true + yes + normal + + + + yes + no + yes + no + 200 + no + + + Smart +
yes
+
+ + 1 + 1 + 0 + +
+EOF + +cat </etc/neko/neko.yaml +server: + bind: "0.0.0.0:8080" + static: "/var/www" +session: + cookie: + enabled: false +webrtc: + icelite: true + nat1to1: + - "${LOCAL_IP}" + epr: "59000-59100" +desktop: + input: + enabled: false +member: + provider: "multiuser" + multiuser: + admin_password: "admin" + user_password: "neko" +EOF +msg_ok "Set up Runtime" + +msg_info "Creating Service" +cat </etc/systemd/system/neko.service +[Unit] +Description=Neko Virtual Browser +After=network.target + +[Service] +Type=simple +User=root +Environment=USER=neko +Environment=DISPLAY=:99.0 +Environment=PULSE_SERVER=unix:/tmp/pulseaudio.socket +Environment=XDG_RUNTIME_DIR=/tmp/runtime-neko +Environment=NEKO_PLUGINS_ENABLED=true +Environment=NEKO_PLUGINS_DIR=/etc/neko/plugins/ +Environment=NEKO_CONFIG=/etc/neko/neko.yaml +ExecStart=/usr/bin/supervisord -c /etc/neko/supervisord.conf -n +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now neko +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/netboot-xyz-install.sh b/install/netboot-xyz-install.sh new file mode 100644 index 000000000..e63f3de3f --- /dev/null +++ b/install/netboot-xyz-install.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://netboot.xyz + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + nginx \ + tftpd-hpa +msg_ok "Installed Dependencies" + +fetch_and_deploy_gh_release "netboot-xyz" "netbootxyz/netboot.xyz" "prebuild" "latest" "/var/www/html" "menus.tar.gz" + +# x86_64 UEFI bootloaders +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-efi" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.efi" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-efi-dsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.efi.dsk" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-snp" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-snp.efi" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-snp-dsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-snp.efi.dsk" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-snponly" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-snponly.efi" +# x86_64 metal (code-signed) UEFI bootloaders +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal.efi" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-dsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal.efi.dsk" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-snp" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-snp.efi" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-snp-dsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-snp.efi.dsk" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-snponly" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-snponly.efi" +# x86_64 BIOS/Legacy bootloaders +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-kpxe" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.kpxe" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-undionly" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-undionly.kpxe" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-kpxe" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal.kpxe" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-lkrn" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.lkrn" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-linux-bin" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-linux.bin" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-dsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.dsk" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-pdsk" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.pdsk" +# ARM64 bootloaders +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-arm64" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-arm64.efi" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-arm64-snp" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-arm64-snp.efi" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-arm64-snponly" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-arm64-snponly.efi" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-arm64" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-arm64.efi" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-arm64-snp" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-arm64-snp.efi" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-metal-arm64-snponly" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-metal-arm64-snponly.efi" +# ISO and IMG images (for virtual/physical media creation) +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-iso" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.iso" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-img" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz.img" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-arm64-iso" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-arm64.iso" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-arm64-img" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-arm64.img" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-multiarch-iso" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-multiarch.iso" +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-multiarch-img" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-multiarch.img" +# SHA256 checksums +USE_ORIGINAL_FILENAME=true fetch_and_deploy_gh_release "netboot-xyz-checksums" "netbootxyz/netboot.xyz" "singlefile" "latest" "/var/www/html" "netboot.xyz-sha256-checksums.txt" + +msg_info "Configuring Webserver" +rm -f /etc/nginx/sites-enabled/default +cat <<'EOF' >/etc/nginx/sites-available/netboot-xyz +server { + listen 80 default_server; + listen [::]:80 default_server; + + root /var/www/html; + server_name _; + + location / { + autoindex on; + add_header Access-Control-Allow-Origin "*"; + add_header Access-Control-Allow-Headers "Content-Type"; + } + + # The index.html from menus.tar.gz links bootloaders under /ipxe/ — + # serve them from the same root directory via alias + location /ipxe/ { + alias /var/www/html/; + autoindex on; + add_header Access-Control-Allow-Origin "*"; + } +} +EOF +ln -sf /etc/nginx/sites-available/netboot-xyz /etc/nginx/sites-enabled/netboot-xyz +$STD systemctl reload nginx +msg_ok "Configured Webserver" + +msg_info "Configuring TFTP Server" +cat </etc/default/tftpd-hpa +TFTP_USERNAME="tftp" +TFTP_DIRECTORY="/var/www/html" +TFTP_ADDRESS="0.0.0.0:69" +TFTP_OPTIONS="--secure" +EOF +systemctl enable -q --now tftpd-hpa +msg_ok "Configured TFTP Server" + +motd_ssh +customize +cleanup_lxc diff --git a/install/nextexplorer-install.sh b/install/nextexplorer-install.sh new file mode 100644 index 000000000..b6510a71d --- /dev/null +++ b/install/nextexplorer-install.sh @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: vhsdream +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/nxzai/nextExplorer + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + ripgrep \ + imagemagick \ + ffmpeg \ + libva-drm2 \ + libva2 \ + mesa-va-drivers \ + vainfo +msg_ok "Installed Dependencies" + +NODE_VERSION="24" setup_nodejs + +fetch_and_deploy_gh_release "nextExplorer" "nxzai/nextExplorer" "tarball" "latest" "/opt/nextExplorer" + +msg_info "Building nextExplorer" +APP_DIR="/opt/nextExplorer/app" +LOCAL_IP="$(hostname -I | awk '{print $1}')" +mkdir -p "$APP_DIR" +mkdir -p /etc/nextExplorer +cd /opt/nextExplorer +export NODE_ENV=production +$STD npm ci --omit=dev --workspace backend +mv node_modules "$APP_DIR" +mv backend/{src,package.json} "$APP_DIR" +unset NODE_ENV + +export NODE_ENV=development +export NODE_OPTIONS="--max-old-space-size=2048" +$STD npm ci --workspace frontend +$STD npm run -w frontend build -- --sourcemap false +unset NODE_ENV +mv frontend/dist/ "$APP_DIR"/src/public +msg_ok "Built nextExplorer" + +msg_info "Configuring nextExplorer" +SECRET=$(openssl rand -hex 32) +cat </etc/nextExplorer/.env +NODE_ENV=production +PORT=3000 + +VOLUME_ROOT=/mnt +CONFIG_DIR=/etc/nextExplorer +CACHE_DIR=/etc/nextExplorer/cache +# USER_ROOT= + +PUBLIC_URL=${LOCAL_IP}:3000 +# TRUST_PROXY= +# CORS_ORIGINS= + +TERMINAL_ENABLED=false + +LOG_LEVEL=info +DEBUG=false +ENABLE_HTTP_LOGGING=false + +AUTH_ENABLED=true +AUTH_MODE=both +SESSION_SECRET="${SECRET}" +# AUTH_MAX_FAILED= +# AUTH_LOCK_MINUTES= +# AUTH_USER_EMAIL= +# AUTH_USER_PASSWORD= + +# OIDC_ENABLED= +# OIDC_ISSUER= +# OIDC_AUTHORIZATION_URL= +# OIDC_TOKEN_URL= +# OIDC_USERINFO_URL= +# OIDC_CLIENT_ID= +# OIDC_CLIENT_SECRET= +# OIDC_CALLBACK_URL= +# OIDC_LOGOUT_URL= +# OIDC_SCOPES= +# OIDC_AUTO_CREATE_USERS=true + +# SEARCH_DEEP= +# SEARCH_RIPGREP= +# SEARCH_MAX_FILESIZE= + +# ONLYOFFICE_URL= +# ONLYOFFICE_SECRET= +# ONLYOFFICE_LANG= +# ONLYOFFICE_FORCE_SAVE= +# ONLYOFFICE_FILE_EXTENSIONS= + +# COLLABORA_URL= +# COLLABORA_DISCOVERY_URL= +# COLLABORA_SECRET= +# COLLABORA_LANG= +# COLLABORA_FILE_EXTENSIONS= + +SHOW_VOLUME_USAGE=true +# USER_DIR_ENABLED= +# SKIP_HOME= + +# EDITOR_EXTENSIONS= + +# FFMPEG_PATH= +# FFPROBE_PATH= + +## Hardware acceleration +# FFMPEG_HWACCEL=vaapi +# FFMPEG_HWACCEL_DEVICE=/dev/dri/renderD128 +# FFMPEG_HWACCEL_OUTPUT_FORMAT=nv12 + +FAVORITES_DEFAULT_ICON=outline.StarIcon + +SHARES_ENABLED=true +# SHARES_TOKEN_LENGTH=10 +# SHARES_MAX_PER_USER=100 +# SHARES_DEFAULT_EXPIRY_DAYS=30 +# SHARES_GUEST_SESSION_HOURS=24 +# SHARES_ALLOW_PASSWORD=true +# SHARES_ALLOW_ANONYMOUS=true +EOF +chmod 600 /etc/nextExplorer/.env +$STD useradd -U -s /usr/sbin/nologin -m -d /home/explorer explorer +chown -R explorer:explorer "$APP_DIR" /etc/nextExplorer +sed -i "\|version|s|$(jq -cr '.version' ${APP_DIR}/package.json)|$(cat ~/.nextexplorer)|" "$APP_DIR"/package.json +msg_ok "Configured nextExplorer" + +msg_info "Creating nextExplorer Service" +cat </etc/systemd/system/nextexplorer.service +[Unit] +Description=nextExplorer Service +After=network.target + +[Service] +Type=simple +User=explorer +Group=explorer +WorkingDirectory=/opt/nextExplorer/app +EnvironmentFile=/etc/nextExplorer/.env +ExecStart=/usr/bin/node ./src/server.js +Restart=always +RestartSec=5 +StandardOutput=journal +StandardError=journal + +[Install] +WantedBy=multi-user.target +EOF +$STD systemctl enable -q --now nextexplorer +msg_ok "Created nextExplorer Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/nginxproxymanager-install.sh b/install/nginxproxymanager-install.sh index 12015a74e..307a14389 100644 --- a/install/nginxproxymanager-install.sh +++ b/install/nginxproxymanager-install.sh @@ -14,23 +14,20 @@ network_check update_os msg_info "Installing Dependencies" -$STD apt update -$STD apt -y install \ - ca-certificates \ +$STD apt install -y \ apache2-utils \ logrotate \ build-essential \ - git -msg_ok "Installed Dependencies" - -msg_info "Installing Python Dependencies" -$STD apt install -y \ + libpcre3-dev \ + libssl-dev \ + zlib1g-dev \ + git \ python3 \ python3-dev \ python3-pip \ python3-venv \ python3-cffi -msg_ok "Installed Python Dependencies" +msg_ok "Installed Dependencies" msg_info "Setting up Certbot" $STD python3 -m venv /opt/certbot @@ -39,33 +36,51 @@ $STD /opt/certbot/bin/pip install certbot certbot-dns-cloudflare ln -sf /opt/certbot/bin/certbot /usr/local/bin/certbot msg_ok "Set up Certbot" -msg_info "Installing Openresty" -curl -fsSL "https://openresty.org/package/pubkey.gpg" | gpg --dearmor -o /etc/apt/trusted.gpg.d/openresty.gpg -cat <<'EOF' >/etc/apt/sources.list.d/openresty.sources -Types: deb -URIs: http://openresty.org/package/debian/ -Suites: bookworm -Components: openresty -Signed-By: /etc/apt/trusted.gpg.d/openresty.gpg +fetch_and_deploy_gh_release "openresty" "openresty/openresty" "prebuild" "latest" "/opt/openresty" "openresty-*.tar.gz" + +msg_info "Building OpenResty" +cd /opt/openresty +$STD ./configure \ + --with-http_v2_module \ + --with-http_realip_module \ + --with-http_stub_status_module \ + --with-http_ssl_module \ + --with-http_sub_module \ + --with-http_auth_request_module \ + --with-pcre-jit \ + --with-stream \ + --with-stream_ssl_module +$STD make -j"$(nproc)" +$STD make install +rm -rf /opt/openresty + +cat <<'EOF' >/lib/systemd/system/openresty.service +[Unit] +Description=The OpenResty Application Platform +After=syslog.target network-online.target remote-fs.target nss-lookup.target +Wants=network-online.target + +[Service] +Type=simple +ExecStartPre=-/bin/mkdir -p /tmp/nginx/body /run/nginx +ExecStartPre=/usr/local/openresty/nginx/sbin/nginx -t +ExecStart=/usr/local/openresty/nginx/sbin/nginx -g 'daemon off;' + +[Install] +WantedBy=multi-user.target EOF -$STD apt update -$STD apt -y install openresty -msg_ok "Installed Openresty" +msg_ok "Built OpenResty" NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs - -RELEASE=$(curl -fsSL https://api.github.com/repos/NginxProxyManager/nginx-proxy-manager/releases/latest | - grep "tag_name" | - awk '{print substr($2, 3, length($2)-4) }') - +RELEASE=$(get_latest_github_release "NginxProxyManager/nginx-proxy-manager") fetch_and_deploy_gh_release "nginxproxymanager" "NginxProxyManager/nginx-proxy-manager" "tarball" "v${RELEASE}" msg_info "Setting up Environment" ln -sf /usr/bin/python3 /usr/bin/python ln -sf /usr/local/openresty/nginx/sbin/nginx /usr/sbin/nginx ln -sf /usr/local/openresty/nginx/ /etc/nginx -sed -i "s|\"version\": \"2.0.0\"|\"version\": \"$RELEASE\"|" /opt/nginxproxymanager/backend/package.json -sed -i "s|\"version\": \"2.0.0\"|\"version\": \"$RELEASE\"|" /opt/nginxproxymanager/frontend/package.json +sed -i "0,/\"version\": \"[^\"]*\"/s|\"version\": \"[^\"]*\"|\"version\": \"$RELEASE\"|" /opt/nginxproxymanager/backend/package.json +sed -i "0,/\"version\": \"[^\"]*\"/s|\"version\": \"[^\"]*\"|\"version\": \"$RELEASE\"|" /opt/nginxproxymanager/frontend/package.json sed -i 's+^daemon+#daemon+g' /opt/nginxproxymanager/docker/rootfs/etc/nginx/nginx.conf NGINX_CONFS=$(find /opt/nginxproxymanager -type f -name "*.conf") for NGINX_CONF in $NGINX_CONFS; do @@ -169,7 +184,6 @@ sed -i 's/user npm/user root/g; s/^pid/#pid/g' /usr/local/openresty/nginx/conf/n sed -r -i 's/^([[:space:]]*)su npm npm/\1#su npm npm/g;' /etc/logrotate.d/nginx-proxy-manager systemctl enable -q --now openresty systemctl enable -q --now npm -systemctl restart openresty msg_ok "Started Services" motd_ssh diff --git a/install/nocodb-install.sh b/install/nocodb-install.sh index fbd5b764c..81e31e7d8 100644 --- a/install/nocodb-install.sh +++ b/install/nocodb-install.sh @@ -13,11 +13,11 @@ setting_up_container network_check update_os -fetch_and_deploy_gh_release "nocodb" "nocodb/nocodb" "singlefile" "0.301.1" "/opt/nocodb/" "Noco-linux-x64" +fetch_and_deploy_gh_release "nocodb" "nocodb/nocodb" "singlefile" "latest" "/opt/nocodb/" "Noco-linux-x64" msg_info "Creating Service" cat </etc/systemd/system/nocodb.service -echo "[Unit] +[Unit] Description=nocodb [Service] diff --git a/install/nodebb-install.sh b/install/nodebb-install.sh index 3823d59a0..73468576c 100644 --- a/install/nodebb-install.sh +++ b/install/nodebb-install.sh @@ -91,16 +91,16 @@ expect "Format: mongodb://*" { send "$MONGO_CONNECTION_STRING\r" } expect "Administrator username" { - send "helper-scripts\r" + send "community-scripts\r" } expect "Administrator email address" { - send "helper-scripts@local.com\r" + send "admin@community-scripts.org\r" } expect "Password" { - send "helper-scripts\r" + send "community-scripts\r" } expect "Confirm Password" { - send "helper-scripts\r" + send "community-scripts\r" } expect eof EOF diff --git a/install/nodecast-tv-install.sh b/install/nodecast-tv-install.sh index d4b3652bd..ecbb2e4c3 100644 --- a/install/nodecast-tv-install.sh +++ b/install/nodecast-tv-install.sh @@ -13,7 +13,7 @@ setting_up_container network_check update_os -fetch_and_deploy_gh_release "nodecast-tv" "technomancer702/nodecast-tv" +fetch_and_deploy_gh_release "nodecast-tv" "technomancer702/nodecast-tv" "tarball" NODE_VERSION="20" setup_nodejs msg_info "Installing Dependencies" diff --git a/install/npmplus-install.sh b/install/npmplus-install.sh index 33144093e..64f04dae9 100644 --- a/install/npmplus-install.sh +++ b/install/npmplus-install.sh @@ -60,7 +60,7 @@ read -r -p "${TAB3}Enter your ACME Email: " ACME_EMAIL_INPUT yq -i " .services.npmplus.environment |= (map(select(. != \"TZ=*\" and . != \"ACME_EMAIL=*\" and . != \"INITIAL_ADMIN_EMAIL=*\" and . != \"INITIAL_ADMIN_PASSWORD=*\")) + - [\"TZ=$TZ_INPUT\", \"ACME_EMAIL=$ACME_EMAIL_INPUT\", \"INITIAL_ADMIN_EMAIL=admin@local.com\", \"INITIAL_ADMIN_PASSWORD=helper-scripts.com\"]) + [\"TZ=$TZ_INPUT\", \"ACME_EMAIL=$ACME_EMAIL_INPUT\", \"INITIAL_ADMIN_EMAIL=admin@local.com\", \"INITIAL_ADMIN_PASSWORD=community-scripts.org\"]) " /opt/compose.yaml msg_info "Building and Starting NPMplus (Patience)" diff --git a/install/ollama-install.sh b/install/ollama-install.sh index 6f4a5db13..161d354fa 100644 --- a/install/ollama-install.sh +++ b/install/ollama-install.sh @@ -22,7 +22,7 @@ msg_ok "Installed Dependencies" msg_info "Setting up Intel® Repositories" mkdir -p /usr/share/keyrings -curl -fsSL https://repositories.intel.com/gpu/intel-graphics.key | gpg --dearmor -o /usr/share/keyrings/intel-graphics.gpg +curl -fsSL https://repositories.intel.com/gpu/intel-graphics.key | gpg --dearmor -o /usr/share/keyrings/intel-graphics.gpg 2>/dev/null || true cat </etc/apt/sources.list.d/intel-gpu.sources Types: deb URIs: https://repositories.intel.com/gpu/ubuntu @@ -31,7 +31,7 @@ Components: client Architectures: amd64 i386 Signed-By: /usr/share/keyrings/intel-graphics.gpg EOF -curl -fsSL https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor -o /usr/share/keyrings/oneapi-archive-keyring.gpg +curl -fsSL https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor -o /usr/share/keyrings/oneapi-archive-keyring.gpg 2>/dev/null || true cat </etc/apt/sources.list.d/oneAPI.sources Types: deb URIs: https://apt.repos.intel.com/oneapi @@ -42,8 +42,6 @@ EOF $STD apt update msg_ok "Set up Intel® Repositories" -setup_hwaccel - msg_info "Installing Intel® Level Zero" # Debian 13+ has newer Level Zero packages in system repos that conflict with Intel repo packages if is_debian && [[ "$(get_os_version_major)" -ge 13 ]]; then @@ -89,11 +87,11 @@ msg_info "Creating ollama User and Group" if ! id ollama >/dev/null 2>&1; then useradd -r -s /usr/sbin/nologin -U -m -d /usr/share/ollama ollama fi -$STD usermod -aG render ollama || true -$STD usermod -aG video ollama || true $STD usermod -aG ollama $(id -u -n) msg_ok "Created ollama User and adjusted Groups" +setup_hwaccel "ollama" + msg_info "Creating Service" cat </etc/systemd/system/ollama.service [Unit] diff --git a/install/opencloud-install.sh b/install/opencloud-install.sh index f99a7efd5..8181eb060 100644 --- a/install/opencloud-install.sh +++ b/install/opencloud-install.sh @@ -64,7 +64,7 @@ $STD sudo -u cool coolconfig set-admin-password --user=admin --password="$COOLPA echo "$COOLPASS" >~/.coolpass msg_ok "Installed Collabora Online" -fetch_and_deploy_gh_release "OpenCloud" "opencloud-eu/opencloud" "singlefile" "v5.2.0" "/usr/bin" "opencloud-*-linux-amd64" +fetch_and_deploy_gh_release "OpenCloud" "opencloud-eu/opencloud" "singlefile" "v6.1.0" "/usr/bin" "opencloud-*-linux-amd64" mv /usr/bin/OpenCloud /usr/bin/opencloud msg_info "Configuring OpenCloud" diff --git a/install/openthread-br-install.sh b/install/openthread-br-install.sh new file mode 100644 index 000000000..82c3fea45 --- /dev/null +++ b/install/openthread-br-install.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://openthread.io/guides/border-router + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + build-essential \ + cmake \ + ninja-build \ + pkg-config \ + git \ + iproute2 \ + libreadline-dev \ + libncurses-dev \ + rsyslog \ + dbus \ + libdbus-1-dev \ + libjsoncpp-dev \ + iptables \ + ipset \ + bind9 \ + libnetfilter-queue1 \ + libnetfilter-queue-dev \ + libprotobuf-dev \ + protobuf-compiler \ + socat +msg_ok "Installed Dependencies" + +setup_nodejs + +msg_info "Cloning OpenThread Border Router" +# git clone is needed to fetch submodules, fetch_and_deploy_gh_release doesn't support this. We use --depth 1 to minimize the amount of data cloned, but it still may take a while. +$STD git clone --depth 1 https://github.com/openthread/ot-br-posix /opt/ot-br-posix +cd /opt/ot-br-posix +$STD git submodule update --depth 1 --init --recursive +msg_ok "Cloned OpenThread Border Router" + +msg_info "Building OpenThread Border Router (Patience)" +mkdir -p build && cd build +$STD cmake -GNinja \ + -DBUILD_TESTING=OFF \ + -DCMAKE_INSTALL_PREFIX=/usr \ + -DOTBR_DBUS=ON \ + -DOTBR_MDNS=openthread \ + -DOTBR_REST=ON \ + -DOTBR_WEB=ON \ + -DOTBR_BORDER_ROUTING=ON \ + -DOTBR_BACKBONE_ROUTER=ON \ + -DOT_FIREWALL=ON \ + -DOT_POSIX_NAT64_CIDR="192.168.255.0/24" \ + .. +$STD ninja +$STD ninja install +msg_ok "Built OpenThread Border Router" + +msg_info "Configuring Network" +cat </etc/sysctl.d/99-otbr.conf +net.ipv6.conf.all.forwarding=1 +net.ipv4.ip_forward=1 +EOF +$STD sysctl -p /etc/sysctl.d/99-otbr.conf +msg_ok "Configured Network" + +msg_info "Configuring Services" +cat <<'EOF' >/etc/default/otbr-agent +# USB example: +# OTBR_AGENT_OPTS="-I wpan0 -B eth0 --vendor-name OpenThread --model-name BorderRouter --rest-listen-address 0.0.0.0 --rest-listen-port 8081 spinel+hdlc+uart:///dev/ttyACM0" +# TCP via socat (for network-attached RCP like SLZB-06/SLZB-MR3): + +# OTBR_AGENT_OPTS="-I wpan0 -B eth0 --vendor-name OpenThread --model-name BorderRouter --rest-listen-address 0.0.0.0 --rest-listen-port 8081 spinel+hdlc+forkpty:///usr/bin/socat?forkpty-arg=-,rawer&forkpty-arg=tcp:IP:PORT trel://eth0" +OTBR_AGENT_OPTS="-I wpan0 -B eth0 --vendor-name OpenThread --model-name BorderRouter --rest-listen-address 0.0.0.0 --rest-listen-port 8081 spinel+hdlc+uart:///dev/ttyACM0" +EOF +cat <<'EOF' >/etc/default/otbr-web +OTBR_WEB_OPTS="-I wpan0 -a 0.0.0.0 -p 80" +EOF +systemctl enable -q dbus rsyslog otbr-agent otbr-web +systemctl enable -q bind9 2>/dev/null || systemctl enable -q named 2>/dev/null || true +systemctl start -q dbus rsyslog bind9 +msg_ok "Configured Services" + +motd_ssh +customize +cleanup_lxc diff --git a/install/outline-install.sh b/install/outline-install.sh index 0c30cac1d..9bacffe8f 100644 --- a/install/outline-install.sh +++ b/install/outline-install.sh @@ -20,7 +20,7 @@ $STD apt install -y \ redis msg_ok "Installed Dependencies" -NODE_VERSION="22" setup_nodejs +NODE_VERSION="24" setup_nodejs PG_VERSION="16" setup_postgresql PG_DB_NAME="outline" PG_DB_USER="outline" setup_postgresql_db diff --git a/install/ownfoil-install.sh b/install/ownfoil-install.sh new file mode 100644 index 000000000..6717cfbe4 --- /dev/null +++ b/install/ownfoil-install.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: pajjski +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/a1ex4/ownfoil + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y git +msg_ok "Installed Dependencies" + +setup_uv +fetch_and_deploy_gh_release "ownfoil" "a1ex4/ownfoil" "tarball" + +msg_info "Setting up Ownfoil" +cd /opt/ownfoil +$STD uv venv .venv +$STD source .venv/bin/activate +$STD uv pip install -r requirements.txt +msg_ok "Setup ownfoil" + +msg_info "Creating Service" +cat </etc/systemd/system/ownfoil.service +[Unit] +Description=ownfoil Service +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/ownfoil +ExecStart=/opt/ownfoil/.venv/bin/python /opt/ownfoil/app/app.py +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now ownfoil +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/pangolin-install.sh b/install/pangolin-install.sh index b522551ac..64fcdfcc3 100644 --- a/install/pangolin-install.sh +++ b/install/pangolin-install.sh @@ -22,7 +22,8 @@ $STD apt install -y \ msg_ok "Installed Dependencies" NODE_VERSION="24" setup_nodejs -fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball" +PANGOLIN_VERSION="${PANGOLIN_VERSION:-1.18.3}" +fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball" "$PANGOLIN_VERSION" fetch_and_deploy_gh_release "gerbil" "fosrl/gerbil" "singlefile" "latest" "/usr/bin" "gerbil_linux_amd64" fetch_and_deploy_gh_release "traefik" "traefik/traefik" "prebuild" "latest" "/usr/bin" "traefik_v*_linux_amd64.tar.gz" @@ -204,6 +205,7 @@ User=root Environment=NODE_ENV=production Environment=ENVIRONMENT=prod WorkingDirectory=/opt/pangolin +ExecStartPre=/usr/bin/node dist/migrations.mjs ExecStart=/usr/bin/node --enable-source-maps dist/server.mjs Restart=always RestartSec=10 diff --git a/install/paperless-ngx-install.sh b/install/paperless-ngx-install.sh index 2b4206251..b7d1745a4 100644 --- a/install/paperless-ngx-install.sh +++ b/install/paperless-ngx-install.sh @@ -94,18 +94,12 @@ user.save() EOF msg_ok "Set up admin Paperless-ngx User & Password" -msg_info "Installing Natural Language Toolkit (Patience)" -cd /opt/paperless -$STD uv run python -m nltk.downloader -d /usr/share/nltk_data snowball_data -$STD uv run python -m nltk.downloader -d /usr/share/nltk_data stopwords -$STD uv run python -m nltk.downloader -d /usr/share/nltk_data punkt_tab || - $STD uv run python -m nltk.downloader -d /usr/share/nltk_data punkt +setup_nltk "snowball_data stopwords punkt_tab" "/usr/share/nltk_data" for policy_file in /etc/ImageMagick-6/policy.xml /etc/ImageMagick-7/policy.xml; do if [[ -f "$policy_file" ]]; then sed -i -e 's/rights="none" pattern="PDF"/rights="read|write" pattern="PDF"/' "$policy_file" fi done -msg_ok "Installed Natural Language Toolkit" msg_info "Creating Services" cat </etc/systemd/system/paperless-scheduler.service diff --git a/install/part-db-install.sh b/install/part-db-install.sh index 23d8ae678..110ed3acf 100644 --- a/install/part-db-install.sh +++ b/install/part-db-install.sh @@ -13,27 +13,19 @@ setting_up_container network_check update_os -NODE_VERSION="22" NODE_MODULE="yarn@latest" setup_nodejs PG_VERSION="16" setup_postgresql PG_DB_NAME="partdb" PG_DB_USER="partdb" setup_postgresql_db PHP_VERSION="8.4" PHP_APACHE="YES" PHP_MODULE="xsl" PHP_POST_MAX_SIZE="100M" PHP_UPLOAD_MAX_FILESIZE="100M" setup_php setup_composer -msg_info "Installing Part-DB (Patience)" -cd /opt -RELEASE=$(get_latest_github_release "Part-DB/Part-DB-server") -curl -fsSL "https://github.com/Part-DB/Part-DB-server/archive/refs/tags/v${RELEASE}.zip" -o "/opt/v${RELEASE}.zip" -$STD unzip "v${RELEASE}.zip" -mv /opt/Part-DB-server-${RELEASE}/ /opt/partdb +fetch_and_deploy_gh_release "partdb" "Part-DB/Part-DB-server" "prebuild" "latest" "/opt/partdb" "partdb_with_assets.zip" +msg_info "Installing Part-DB" cd /opt/partdb/ cp .env .env.local sed -i "s|DATABASE_URL=\"sqlite:///%kernel.project_dir%/var/app.db\"|DATABASE_URL=\"postgresql://${PG_DB_USER}:${PG_DB_PASS}@127.0.0.1:5432/${PG_DB_NAME}?serverVersion=12.19&charset=utf8\"|" .env.local - export COMPOSER_ALLOW_SUPERUSER=1 $STD composer install --no-dev -o --no-interaction -$STD yarn install -$STD yarn build $STD php bin/console cache:clear php bin/console doctrine:migrations:migrate -n >~/database-migration-output chown -R www-data:www-data /opt/partdb @@ -44,8 +36,6 @@ ADMIN_PASS=$(grep -oP 'The initial password for the "admin" user is: \K\w+' ~/da echo "Part-DB Admin Password: $ADMIN_PASS" } >>~/partdb.creds rm -rf ~/database-migration-output -rm -rf "/opt/v${RELEASE}.zip" -echo "${RELEASE}" >~/.partdb msg_ok "Installed Part-DB" msg_info "Creating Service" diff --git a/install/patchmon-install.sh b/install/patchmon-install.sh index 70cc00f28..fddc32394 100644 --- a/install/patchmon-install.sh +++ b/install/patchmon-install.sh @@ -14,74 +14,91 @@ network_check update_os msg_info "Installing Dependencies" -$STD apt install -y \ - build-essential \ - nginx \ - redis-server +$STD apt install -y redis-server msg_ok "Installed Dependencies" -NODE_VERSION="24" setup_nodejs PG_VERSION="17" setup_postgresql PG_DB_NAME="patchmon_db" PG_DB_USER="patchmon_usr" setup_postgresql_db -fetch_and_deploy_gh_release "PatchMon" "PatchMon/PatchMon" "tarball" "latest" "/opt/patchmon" +RELEASE="v2.0.2" +fetch_and_deploy_gh_release "PatchMon" "PatchMon/PatchMon" "singlefile" "latest" "/opt/patchmon" "patchmon-server-linux-amd64" +mv /opt/patchmon/PatchMon /opt/patchmon/patchmon-server msg_info "Configuring PatchMon" -VERSION=$(get_latest_github_release "PatchMon/PatchMon") -export NODE_ENV=production -cd /opt/patchmon -$STD npm install --no-audit --no-fund --no-save --ignore-scripts - -cd /opt/patchmon/frontend -cat <./.env -VITE_APP_NAME=PatchMon -VITE_APP_VERSION=${VERSION} -EOF -$STD npm install --no-audit --no-fund --no-save --ignore-scripts --include=dev -$STD npm run build - +cat </opt/patchmon/.env +DATABASE_URL="postgresql://$PG_DB_USER:$PG_DB_PASS@localhost:5432/$PG_DB_NAME" JWT_SECRET="$(openssl rand -hex 64)" -mv /opt/patchmon/backend/env.example /opt/patchmon/backend/.env -sed -i -e "s|DATABASE_URL=.*|DATABASE_URL=\"postgresql://$PG_DB_USER:$PG_DB_PASS@localhost:5432/$PG_DB_NAME\"|" \ - -e "/JWT_SECRET/s/[=$].*/=$JWT_SECRET/" \ - -e "\|CORS_ORIGIN|s|localhost|$LOCAL_IP|" \ - -e "/PORT=3001/aSERVER_PROTOCOL=http \\ - SERVER_HOST=$LOCAL_IP \\ - SERVER_PORT=3000" \ - -e '/_ENV=production/aTRUST_PROXY=1' \ - -e '/REDIS_USER=.*/,+1d' /opt/patchmon/backend/.env +SESSION_SECRET="$(openssl rand -hex 64)" +AI_ENCRYPTION_KEY="$(openssl rand -hex 64)" +CORS_ORIGIN=http://${LOCAL_IP}:3000 +PORT=3000 +APP_ENV=production -cd /opt/patchmon/backend -$STD npm run db:generate -$STD npx prisma migrate deploy +# Redis +REDIS_HOST=localhost +REDIS_PORT=6379 + +## OIDC / SSO (when OIDC_ENABLED=true, issuer/client/secret/redirect required) +# OIDC_ENABLED=false +# OIDC_ISSUER_URL= +# OIDC_CLIENT_ID= +# OIDC_CLIENT_SECRET= +# OIDC_REDIRECT_URI= +# OIDC_SCOPES=openid email profile groups +# OIDC_AUTO_CREATE_USERS=false +# OIDC_DEFAULT_ROLE=user +# OIDC_DISABLE_LOCAL_AUTH=false +# OIDC_BUTTON_TEXT=Login with SSO +# OIDC_SESSION_TTL=600 +# OIDC_POST_LOGOUT_URI= +# OIDC_SYNC_ROLES=false +# OIDC_ADMIN_GROUP= +# OIDC_SUPERADMIN_GROUP= +# OIDC_HOST_MANAGER_GROUP= +# OIDC_READONLY_GROUP= +# OIDC_USER_GROUP= +# OIDC_ENFORCE_HTTPS=true + +AGENT_BINARIES_DIR=/opt/patchmon/agents +EOF msg_ok "Configured PatchMon" -msg_info "Configuring Nginx" -cp /opt/patchmon/docker/nginx.conf.template /etc/nginx/sites-available/patchmon.conf -sed -i -e 's|proxy_pass .*|proxy_pass http://127.0.0.1:3001;|' \ - -e '\|try_files |i\ root /opt/patchmon/frontend/dist;' \ - -e 's|alias.*|alias /opt/patchmon/frontend/dist/assets;|' \ - -e '\|expires 1y|i\ root /opt/patchmon/frontend/dist;' /etc/nginx/sites-available/patchmon.conf -ln -sf /etc/nginx/sites-available/patchmon.conf /etc/nginx/sites-enabled/ -rm -f /etc/nginx/sites-enabled/default -$STD nginx -t -systemctl restart nginx -msg_ok "Configured Nginx" +msg_info "Fetching PatchMon agent binaries" +RELEASE=$(get_latest_github_release "PatchMon/PatchMon") +mkdir -p /opt/patchmon/agents +FILE_URL="https://github.com/PatchMon/PatchMon/releases/download/v${RELEASE}/patchmon-agent-" +AGENT_NAME=( + "linux-amd64" + "linux-arm64" + "linux-arm" + "linux-386" + "freebsd-amd64" + "freebsd-arm64" + "freebsd-arm" + "freebsd-386" + "windows-amd64.exe" + "windows-arm64.exe" +) +for arch in "${AGENT_NAME[@]}"; do + curl_with_retry "${FILE_URL}${arch}" "/opt/patchmon/agents/patchmon-agent-${arch}" + [[ "${arch}" != *.exe ]] && chmod 755 "/opt/patchmon/agents/patchmon-agent-${arch}" +done +msg_ok "Fetched PatchMon agent binaries" msg_info "Creating service" cat </etc/systemd/system/patchmon-server.service [Unit] -Description=PatchMon Service +Description=PatchMon Server After=network.target postgresql.service [Service] Type=simple -WorkingDirectory=/opt/patchmon/backend -ExecStart=/usr/bin/npm run start +WorkingDirectory=/opt/patchmon +ExecStart=/opt/patchmon/patchmon-server Restart=always RestartSec=10 -Environment=NODE_ENV=production Environment=PATH=/usr/bin:/usr/local/bin +EnvironmentFile=/opt/patchmon/.env NoNewPrivileges=true PrivateTmp=true ProtectSystem=strict diff --git a/install/peanut-install.sh b/install/peanut-install.sh index 5fa4cbaf3..9c65dd93b 100644 --- a/install/peanut-install.sh +++ b/install/peanut-install.sh @@ -29,13 +29,28 @@ cp -r .next/static .next/standalone/.next/ mkdir -p /opt/peanut/.next/standalone/config mkdir -p /etc/peanut/ ln -sf .next/standalone/server.js server.js -cat </etc/peanut/settings.yml -WEB_HOST: 0.0.0.0 -WEB_PORT: 8080 -NUT_HOST: 0.0.0.0 -NUT_PORT: 3493 +if [[ ! -f /etc/peanut/settings.yml ]]; then + cat </etc/peanut/settings.yml +NUT_SERVERS: [] EOF +fi ln -sf /etc/peanut/settings.yml /opt/peanut/.next/standalone/config/settings.yml +cat </etc/peanut/peanut.env +NODE_ENV=production + +#WEB_HOST=0.0.0.0 +#WEB_PORT=8080 +#NUT_HOST=localhost +#NUT_PORT=3493 + +# Disable auth entirely: +#AUTH_DISABLED=true + +# Bootstrap initial account on first start (ignored afterwards): +#WEB_USERNAME=admin +#WEB_PASSWORD=changeme +EOF +chmod 600 /etc/peanut/peanut.env msg_ok "Setup Peanut" msg_info "Creating Service" @@ -48,11 +63,7 @@ SyslogIdentifier=peanut Restart=always RestartSec=5 Type=simple -Environment="NODE_ENV=production" -#Environment="NUT_HOST=localhost" -#Environment="NUT_PORT=3493" -#Environment="WEB_HOST=0.0.0.0" -#Environment="WEB_PORT=8080" +EnvironmentFile=/etc/peanut/peanut.env WorkingDirectory=/opt/peanut ExecStart=node /opt/peanut/entrypoint.mjs TimeoutStopSec=30 diff --git a/install/pf2etools-install.sh b/install/pf2etools-install.sh deleted file mode 100644 index dbf9c7664..000000000 --- a/install/pf2etools-install.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: TheRealVira -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://pf2etools.com/ | Github: https://github.com/Pf2eToolsOrg/Pf2eTools - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing Dependencies" -$STD apt install -y \ - apache2 \ - ca-certificates \ - git -msg_ok "Installed Dependencies" - -NODE_VERSION="22" setup_nodejs -fetch_and_deploy_gh_release "pf2etools" "Pf2eToolsOrg/Pf2eTools" "tarball" "latest" "/opt/Pf2eTools" - -msg_info "Configuring Pf2eTools" -cd /opt/Pf2eTools -$STD npm install -$STD npm run build -msg_ok "Configured Pf2eTools" - -msg_info "Creating Service" -cat <>/etc/apache2/apache2.conf - - SetHandler server-status - Order deny,allow - Allow from all - -EOF -rm -rf /var/www/html -ln -s "/opt/Pf2eTools" /var/www/html -chown -R www-data: "/opt/Pf2eTools" -chmod -R 755 "/opt/Pf2eTools" -msg_ok "Created Service" -cleanup_lxc -motd_ssh -customize diff --git a/install/photoprism-install.sh b/install/photoprism-install.sh index 2db48cce0..1d54199ce 100644 --- a/install/photoprism-install.sh +++ b/install/photoprism-install.sh @@ -99,7 +99,7 @@ PHOTOPRISM_DEBUG='false' PHOTOPRISM_LOG_LEVEL='info' # Site Info -PHOTOPRISM_SITE_CAPTION='https://Helper-Scripts.com' +PHOTOPRISM_SITE_CAPTION='https://community-scripts.org' PHOTOPRISM_SITE_DESCRIPTION='' PHOTOPRISM_SITE_AUTHOR='' EOF diff --git a/install/plex-install.sh b/install/plex-install.sh index e2a142724..8ed8bac6d 100644 --- a/install/plex-install.sh +++ b/install/plex-install.sh @@ -13,8 +13,6 @@ setting_up_container network_check update_os -setup_hwaccel - msg_info "Setting Up Plex Media Server Repository" setup_deb822_repo \ "plexmediaserver" \ @@ -26,13 +24,10 @@ msg_ok "Set Up Plex Media Server Repository" msg_info "Installing Plex Media Server" $STD apt install -y plexmediaserver -if [[ "$CTTYPE" == "0" ]]; then - sed -i -e 's/^ssl-cert:x:104:plex$/render:x:104:root,plex/' -e 's/^render:x:108:root$/ssl-cert:x:108:plex/' /etc/group -else - sed -i -e 's/^ssl-cert:x:104:plex$/render:x:104:plex/' -e 's/^render:x:108:$/ssl-cert:x:108:/' /etc/group -fi msg_ok "Installed Plex Media Server" +setup_hwaccel "plex" + motd_ssh customize cleanup_lxc diff --git a/install/podman-homeassistant-install.sh b/install/podman-homeassistant-install.sh index 2f4ba0d2b..03fb87b4d 100644 --- a/install/podman-homeassistant-install.sh +++ b/install/podman-homeassistant-install.sh @@ -45,32 +45,58 @@ systemctl enable -q --now podman.socket echo -e 'unqualified-search-registries=["docker.io"]' >>/etc/containers/registries.conf msg_ok "Installed Podman" +mkdir -p /etc/containers/systemd + read -r -p "${TAB3}Would you like to add Portainer? " prompt if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then msg_info "Installing Portainer $PORTAINER_LATEST_VERSION" podman volume create portainer_data >/dev/null - $STD podman run -d \ - -p 8000:8000 \ - -p 9443:9443 \ - --name=portainer \ - --restart=always \ - -v /run/podman/podman.sock:/var/run/docker.sock \ - -v portainer_data:/data \ - portainer/portainer-ce:latest + cat </etc/containers/systemd/portainer.container +[Unit] +Description=Portainer Container +After=network-online.target + +[Container] +Image=docker.io/portainer/portainer-ce:latest +ContainerName=portainer +PublishPort=8000:8000 +PublishPort=9443:9443 +Volume=/run/podman/podman.sock:/var/run/docker.sock +Volume=portainer_data:/data + +[Service] +Restart=always + +[Install] +WantedBy=default.target multi-user.target +EOF + systemctl daemon-reload + $STD systemctl start portainer msg_ok "Installed Portainer $PORTAINER_LATEST_VERSION" else read -r -p "${TAB3}Would you like to add the Portainer Agent? " prompt if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then msg_info "Installing Portainer agent $PORTAINER_AGENT_LATEST_VERSION" - podman volume create temp >/dev/null - podman volume remove temp >/dev/null - $STD podman run -d \ - -p 9001:9001 \ - --name portainer_agent \ - --restart=always \ - -v /run/podman/podman.sock:/var/run/docker.sock \ - -v /var/lib/containers/storage/volumes:/var/lib/docker/volumes \ - portainer/agent + cat </etc/containers/systemd/portainer-agent.container +[Unit] +Description=Portainer Agent Container +After=network-online.target + +[Container] +Image=docker.io/portainer/agent:latest +ContainerName=portainer_agent +PublishPort=9001:9001 +Volume=/run/podman/podman.sock:/var/run/docker.sock +Volume=/var/lib/containers/storage/volumes:/var/lib/docker/volumes + +[Service] +Restart=always + +[Install] +WantedBy=default.target multi-user.target +EOF + systemctl daemon-reload + $STD systemctl start portainer-agent msg_ok "Installed Portainer Agent $PORTAINER_AGENT_LATEST_VERSION" fi fi @@ -81,19 +107,29 @@ msg_ok "Pulled Home Assistant Image" msg_info "Installing Home Assistant" $STD podman volume create hass_config -$STD podman run -d \ - --name homeassistant \ - --restart unless-stopped \ - -v /dev:/dev \ - -v hass_config:/config \ - -v /etc/localtime:/etc/localtime:ro \ - -v /etc/timezone:/etc/timezone:ro \ - --net=host \ - homeassistant/home-assistant:stable -podman generate systemd \ - --new --name homeassistant \ - >/etc/systemd/system/homeassistant.service -systemctl enable -q --now homeassistant +cat </etc/containers/systemd/homeassistant.container +[Unit] +Description=Home Assistant Container +After=network-online.target + +[Container] +Image=docker.io/homeassistant/home-assistant:stable +ContainerName=homeassistant +Volume=/dev:/dev +Volume=hass_config:/config +Volume=/etc/localtime:/etc/localtime:ro +Volume=/etc/timezone:/etc/timezone:ro +Network=host + +[Service] +Restart=always +TimeoutStartSec=300 + +[Install] +WantedBy=default.target multi-user.target +EOF +systemctl daemon-reload +$STD systemctl start homeassistant msg_ok "Installed Home Assistant" motd_ssh diff --git a/install/podman-install.sh b/install/podman-install.sh index 9e3dac2bc..9cad8662c 100644 --- a/install/podman-install.sh +++ b/install/podman-install.sh @@ -45,32 +45,58 @@ systemctl enable -q --now podman.socket echo -e 'unqualified-search-registries=["docker.io"]' >>/etc/containers/registries.conf msg_ok "Installed Podman" +mkdir -p /etc/containers/systemd + read -r -p "${TAB3}Would you like to add Portainer? " prompt if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then msg_info "Installing Portainer $PORTAINER_LATEST_VERSION" podman volume create portainer_data >/dev/null - $STD podman run -d \ - -p 8000:8000 \ - -p 9443:9443 \ - --name=portainer \ - --restart=always \ - -v /run/podman/podman.sock:/var/run/docker.sock \ - -v portainer_data:/data \ - portainer/portainer-ce:latest + cat </etc/containers/systemd/portainer.container +[Unit] +Description=Portainer Container +After=network-online.target + +[Container] +Image=docker.io/portainer/portainer-ce:latest +ContainerName=portainer +PublishPort=8000:8000 +PublishPort=9443:9443 +Volume=/run/podman/podman.sock:/var/run/docker.sock +Volume=portainer_data:/data + +[Service] +Restart=always + +[Install] +WantedBy=default.target multi-user.target +EOF + systemctl daemon-reload + $STD systemctl start portainer msg_ok "Installed Portainer $PORTAINER_LATEST_VERSION" else read -r -p "${TAB3}Would you like to add the Portainer Agent? " prompt if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then msg_info "Installing Portainer agent $PORTAINER_AGENT_LATEST_VERSION" - podman volume create temp >/dev/null - podman volume remove temp >/dev/null - $STD podman run -d \ - -p 9001:9001 \ - --name portainer_agent \ - --restart=always \ - -v /run/podman/podman.sock:/var/run/docker.sock \ - -v /var/lib/containers/storage/volumes:/var/lib/docker/volumes \ - portainer/agent + cat </etc/containers/systemd/portainer-agent.container +[Unit] +Description=Portainer Agent Container +After=network-online.target + +[Container] +Image=docker.io/portainer/agent:latest +ContainerName=portainer_agent +PublishPort=9001:9001 +Volume=/run/podman/podman.sock:/var/run/docker.sock +Volume=/var/lib/containers/storage/volumes:/var/lib/docker/volumes + +[Service] +Restart=always + +[Install] +WantedBy=default.target multi-user.target +EOF + systemctl daemon-reload + $STD systemctl start portainer-agent msg_ok "Installed Portainer Agent $PORTAINER_AGENT_LATEST_VERSION" fi fi diff --git a/install/prometheus-install.sh b/install/prometheus-install.sh index 0cbcbea07..4893e6dc3 100644 --- a/install/prometheus-install.sh +++ b/install/prometheus-install.sh @@ -36,7 +36,7 @@ ExecStart=/usr/local/bin/prometheus \ --config.file=/etc/prometheus/prometheus.yml \ --storage.tsdb.path=/var/lib/prometheus/ \ --web.listen-address=0.0.0.0:9090 -ExecReload=/bin/kill -HUP \$MAINPID +ExecReload=/bin/kill -HUP $MAINPID [Install] WantedBy=multi-user.target diff --git a/install/protonmail-bridge-install.sh b/install/protonmail-bridge-install.sh new file mode 100644 index 000000000..ba895ec1f --- /dev/null +++ b/install/protonmail-bridge-install.sh @@ -0,0 +1,192 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Stephen Chin (steveonjava) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/ProtonMail/proton-bridge + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y pass +msg_ok "Installed Dependencies" + +msg_info "Creating Service User" +useradd -r -m -d /home/protonbridge -s /usr/sbin/nologin protonbridge +install -d -m 0750 -o protonbridge -g protonbridge /home/protonbridge +msg_ok "Created Service User" + +fetch_and_deploy_gh_release "protonmail-bridge" "ProtonMail/proton-bridge" "binary" + +msg_info "Creating Services" +cat </etc/systemd/system/protonmail-bridge.service +[Unit] +Description=Proton Mail Bridge (noninteractive) +After=network-online.target +Wants=network-online.target +ConditionPathExists=/home/protonbridge/.protonmailbridge-initialized + +[Service] +Type=simple +User=protonbridge +Group=protonbridge +WorkingDirectory=/home/protonbridge +Environment=HOME=/home/protonbridge +ExecStart=/usr/bin/protonmail-bridge --noninteractive +Restart=always +RestartSec=3 +NoNewPrivileges=yes +PrivateTmp=yes +ProtectSystem=full +ProtectKernelTunables=yes +ProtectKernelModules=yes +ProtectControlGroups=yes + +[Install] +WantedBy=multi-user.target +EOF +cat <<'EOF' >/etc/systemd/system/protonmail-bridge-imap.socket +[Unit] +Description=Proton Mail Bridge IMAP Socket (143) +ConditionPathExists=/home/protonbridge/.protonmailbridge-initialized + +[Socket] +ListenStream=143 +Accept=no +Service=protonmail-bridge-imap-proxy.service + +[Install] +WantedBy=sockets.target +EOF +cat <<'EOF' >/etc/systemd/system/protonmail-bridge-imap-proxy.service +[Unit] +Description=Proton Mail Bridge IMAP Proxy (143 -> 127.0.0.1:1143) +After=protonmail-bridge.service +Requires=protonmail-bridge.service +ConditionPathExists=/home/protonbridge/.protonmailbridge-initialized + +[Service] +Type=simple +Sockets=protonmail-bridge-imap.socket +ExecStart=/usr/lib/systemd/systemd-socket-proxyd 127.0.0.1:1143 +NoNewPrivileges=yes +PrivateTmp=yes +EOF +cat <<'EOF' >/etc/systemd/system/protonmail-bridge-smtp.socket +[Unit] +Description=Proton Mail Bridge SMTP Socket (587) +ConditionPathExists=/home/protonbridge/.protonmailbridge-initialized + +[Socket] +ListenStream=587 +Accept=no +Service=protonmail-bridge-smtp-proxy.service + +[Install] +WantedBy=sockets.target +EOF +cat <<'EOF' >/etc/systemd/system/protonmail-bridge-smtp-proxy.service +[Unit] +Description=Proton Mail Bridge SMTP Proxy (587 -> 127.0.0.1:1025) +After=protonmail-bridge.service +Requires=protonmail-bridge.service +ConditionPathExists=/home/protonbridge/.protonmailbridge-initialized + +[Service] +Type=simple +Sockets=protonmail-bridge-smtp.socket +ExecStart=/usr/lib/systemd/systemd-socket-proxyd 127.0.0.1:1025 +NoNewPrivileges=yes +PrivateTmp=yes +EOF +msg_ok "Created Services" + +msg_info "Creating Helper Commands" + +cat <<'EOF' >/usr/local/bin/protonmailbridge-configure +#!/usr/bin/env bash +set -euo pipefail + +BRIDGE_USER="protonbridge" +BRIDGE_HOME="/home/${BRIDGE_USER}" +GNUPG_HOME="${BRIDGE_HOME}/.gnupg" +MARKER="${BRIDGE_HOME}/.protonmailbridge-initialized" + +FIRST_TIME=0 +if [[ ! -f "${MARKER}" ]]; then + FIRST_TIME=1 +fi + +# Stop sockets/proxies/bridge daemon before configuration +systemctl stop protonmail-bridge-imap.socket protonmail-bridge-smtp.socket +systemctl stop protonmail-bridge-imap-proxy protonmail-bridge-smtp-proxy protonmail-bridge + +if [[ "${FIRST_TIME}" == "1" ]]; then + echo "First-time setup: initializing pass keychain for ${BRIDGE_USER} (required by Proton Mail Bridge on Linux)." + + install -d -m 0700 -o "${BRIDGE_USER}" -g "${BRIDGE_USER}" "${GNUPG_HOME}" + + FPR="$(runuser -u "${BRIDGE_USER}" -- env HOME="${BRIDGE_HOME}" GNUPGHOME="${GNUPG_HOME}" \ + gpg --list-secret-keys --with-colons 2>/dev/null | awk -F: '$1=="fpr"{print $10; exit}')" + + if [[ -z "${FPR}" ]]; then + runuser -u "${BRIDGE_USER}" -- env HOME="${BRIDGE_HOME}" GNUPGHOME="${GNUPG_HOME}" \ + gpg --batch --pinentry-mode loopback --passphrase '' \ + --quick-gen-key 'ProtonMail Bridge' default default never + + FPR="$(runuser -u "${BRIDGE_USER}" -- env HOME="${BRIDGE_HOME}" GNUPGHOME="${GNUPG_HOME}" \ + gpg --list-secret-keys --with-colons 2>/dev/null | awk -F: '$1=="fpr"{print $10; exit}')" + fi + + if [[ -z "${FPR}" ]]; then + echo "Failed to detect a GPG key fingerprint for ${BRIDGE_USER}." >&2 + exit 1 + fi + + runuser -u "${BRIDGE_USER}" -- env HOME="${BRIDGE_HOME}" GNUPGHOME="${GNUPG_HOME}" \ + pass init "${FPR}" + + echo + echo "To do initial configuration of the Proton Mail Bridge:" + echo "Run: login" + echo "Run: info" + echo "Run: exit" + echo +else + echo + echo "Launching Proton Mail Bridge CLI for configuration." + echo "External access is disabled until you exit." + echo "Run: exit" + echo +fi + +runuser -u "${BRIDGE_USER}" -- env HOME="${BRIDGE_HOME}" \ + protonmail-bridge -c + +if [[ "${FIRST_TIME}" == "1" ]]; then + touch "${MARKER}" + chown "${BRIDGE_USER}:${BRIDGE_USER}" "${MARKER}" + chmod 0644 "${MARKER}" +fi + +systemctl enable -q --now protonmail-bridge.service protonmail-bridge-imap.socket protonmail-bridge-smtp.socket + +if [[ "${FIRST_TIME}" == "1" ]]; then + echo "Initialization complete. Services enabled and started." +else + echo "Configuration complete. Services enabled and started." +fi +EOF +chmod +x /usr/local/bin/protonmailbridge-configure +ln -sf /usr/local/bin/protonmailbridge-configure /usr/bin/protonmailbridge-configure +msg_ok "Created Helper Commands" + +motd_ssh +customize +cleanup_lxc diff --git a/install/reactive-resume-install.sh b/install/reactive-resume-install.sh index f35278ab1..e06e56a12 100644 --- a/install/reactive-resume-install.sh +++ b/install/reactive-resume-install.sh @@ -15,16 +15,21 @@ update_os PG_VERSION="16" setup_postgresql PG_DB_NAME="reactive_resume" PG_DB_USER="reactive_resume" setup_postgresql_db -NODE_VERSION="22" NODE_MODULE="pnpm@latest" setup_nodejs +NODE_VERSION="24" setup_nodejs msg_info "Installing Dependencies" -$STD apt install -y chromium +$STD apt install -y \ + chromium \ + git msg_ok "Installed Dependencies" fetch_and_deploy_gh_release "reactive-resume" "amruthpillai/reactive-resume" "tarball" msg_info "Building Reactive Resume (Patience)" cd /opt/reactive-resume +export COREPACK_ENABLE_DOWNLOAD_PROMPT=0 +corepack enable +corepack prepare --activate export NODE_ENV="production" export CI="true" $STD pnpm install --frozen-lockfile diff --git a/install/reitti-install.sh b/install/reitti-install.sh index 660c09b27..2810b1495 100644 --- a/install/reitti-install.sh +++ b/install/reitti-install.sh @@ -16,7 +16,6 @@ update_os msg_info "Installing Dependencies" $STD apt install -y \ redis-server \ - rabbitmq-server \ libpq-dev \ zstd \ nginx @@ -26,26 +25,8 @@ JAVA_VERSION="25" setup_java PG_VERSION="17" PG_MODULES="postgis" setup_postgresql PG_DB_NAME="reitti_db" PG_DB_USER="reitti" PG_DB_EXTENSIONS="postgis" setup_postgresql_db -msg_info "Configuring RabbitMQ" -RABBIT_USER="reitti" -RABBIT_PASS="$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-13)" -RABBIT_VHOST="/" -$STD rabbitmqctl add_user "$RABBIT_USER" "$RABBIT_PASS" -$STD rabbitmqctl add_vhost "$RABBIT_VHOST" -$STD rabbitmqctl set_permissions -p "$RABBIT_VHOST" "$RABBIT_USER" ".*" ".*" ".*" -$STD rabbitmqctl set_user_tags "$RABBIT_USER" administrator -{ - echo "" - echo "Reitti Credentials" - echo "RabbitMQ User: $RABBIT_USER" - echo "RabbitMQ Password: $RABBIT_PASS" -} >>~/reitti.creds -msg_ok "Configured RabbitMQ" - USE_ORIGINAL_FILENAME="true" fetch_and_deploy_gh_release "reitti" "dedicatedcode/reitti" "singlefile" "latest" "/opt/reitti" "reitti-app.jar" mv /opt/reitti/reitti-*.jar /opt/reitti/reitti.jar -USE_ORIGINAL_FILENAME="true" fetch_and_deploy_gh_release "photon" "komoot/photon" "singlefile" "latest" "/opt/photon" "photon-0*.jar" -mv /opt/photon/photon-*.jar /opt/photon/photon.jar msg_info "Installing Nginx Tile Cache" mkdir -p /var/cache/nginx/tiles @@ -73,57 +54,105 @@ EOF chown -R www-data:www-data /var/cache/nginx chmod -R 750 /var/cache/nginx systemctl restart nginx -msg_info "Installed Nginx Tile Cache" +msg_ok "Installed Nginx Tile Cache" msg_info "Creating Reitti Configuration-File" mkdir -p /opt/reitti/data cat </opt/reitti/application.properties -# Reitti Server Base URI -reitti.server.advertise-uri=http://127.0.0.1:8080 +# Server configuration +server.port=8080 +server.servlet.context-path=/ +server.forward-headers-strategy=framework +server.compression.enabled=true +server.compression.min-response-size=1024 +server.compression.mime-types=text/plain,application/json -# PostgreSQL Database Connection +# Logging configuration +logging.level.root=INFO +logging.level.org.hibernate.engine.jdbc.spi.SqlExceptionHelper=FATAL +logging.level.com.dedicatedcode.reitti=INFO + +# Internationalization +spring.messages.basename=messages +spring.messages.encoding=UTF-8 +spring.messages.cache-duration=3600 +spring.messages.fallback-to-system-locale=false + +# PostgreSQL configuration spring.datasource.url=jdbc:postgresql://127.0.0.1:5432/$PG_DB_NAME spring.datasource.username=$PG_DB_USER spring.datasource.password=$PG_DB_PASS -spring.datasource.driver-class-name=org.postgresql.Driver +spring.datasource.hikari.maximum-pool-size=20 -# Flyway Database Migrations -spring.flyway.enabled=true -spring.flyway.locations=classpath:db/migration -spring.flyway.baseline-on-migrate=true - -# RabbitMQ (Message Queue) -spring.rabbitmq.host=127.0.0.1 -spring.rabbitmq.port=5672 -spring.rabbitmq.username=$RABBIT_USER -spring.rabbitmq.password=$RABBIT_PASS - -# Redis (Cache) +# Redis configuration spring.data.redis.host=127.0.0.1 spring.data.redis.port=6379 +spring.data.redis.username= +spring.data.redis.password= +spring.data.redis.database=0 +spring.cache.redis.key-prefix= -# Server Port -server.port=8080 +spring.cache.cache-names=processed-visits,significant-places,users,magic-links,configurations,transport-mode-configs,avatarThumbnails,avatarData,user-settings +spring.cache.redis.time-to-live=1d -# Optional: Logging & Performance -logging.level.root=INFO -spring.jpa.hibernate.ddl-auto=none -spring.datasource.hikari.maximum-pool-size=10 +# Upload configuration +spring.servlet.multipart.max-file-size=5GB +spring.servlet.multipart.max-request-size=5GB +server.tomcat.max-part-count=100 + +# Rqueue configuration +rqueue.web.enable=false +rqueue.job.enabled=false +rqueue.message.durability.in-terminal-state=0 +rqueue.key.prefix=\${spring.cache.redis.key-prefix} +rqueue.message.converter.provider.class=com.dedicatedcode.reitti.config.RQueueCustomMessageConverter + +# Application-specific settings +reitti.server.advertise-uri= + +reitti.security.local-login.disable=false # OIDC / Security Settings +reitti.security.oidc.enabled=false reitti.security.oidc.registration.enabled=false -# Photon (Geocoding) -PHOTON_BASE_URL=http://127.0.0.1:2322 -PROCESSING_WAIT_TIME=15 -PROCESSING_BATCH_SIZE=1000 -PROCESSING_WORKERS_PER_QUEUE=4-16 +reitti.import.batch-size=10000 +reitti.import.processing-idle-start-time=10 -# Disable potentially dangerous features unless needed -DANGEROUS_LIFE=false +reitti.geo-point-filter.max-speed-kmh=1000 +reitti.geo-point-filter.max-accuracy-meters=100 +reitti.geo-point-filter.history-lookback-hours=24 +reitti.geo-point-filter.window-size=50 -# Tiles Cache +reitti.process-data.schedule=0 */10 * * * * +reitti.process-data.refresh-views.schedule=0 0 4 * * * +reitti.imports.schedule=0 5/10 * * * * +reitti.imports.owntracks-recorder.schedule=\${reitti.imports.schedule} + +# Geocoding service configuration +reitti.geocoding.max-errors=10 +reitti.geocoding.photon.base-url= + +# Tiles Configuration reitti.ui.tiles.cache.url=http://127.0.0.1 +reitti.ui.tiles.default.service=https://tile.openstreetmap.org/{z}/{x}/{y}.png +reitti.ui.tiles.default.attribution=© OpenStreetMap contributors + +# Data management configuration +reitti.data-management.enabled=false +reitti.data-management.preview-cleanup.cron=0 0 4 * * * + +reitti.storage.path=data/ +reitti.storage.cleanup.cron=0 0 4 * * * + +# Location data density normalization +reitti.location.density.target-points-per-minute=4 + +# Logging buffer +reitti.logging.buffer-size=1000 +reitti.logging.max-buffer-size=10000 + +spring.config.import=optional:oidc.properties EOF msg_ok "Created Configuration-File for Reitti" @@ -131,8 +160,8 @@ msg_info "Creating Services" cat </etc/systemd/system/reitti.service [Unit] Description=Reitti -After=network.target postgresql.service redis-server.service rabbitmq-server.service photon.service -Wants=postgresql.service redis-server.service rabbitmq-server.service photon.service +After=network.target postgresql.service redis-server.service +Wants=postgresql.service redis-server.service [Service] Type=simple @@ -146,26 +175,6 @@ Restart=on-failure WantedBy=multi-user.target EOF -cat </etc/systemd/system/photon.service -[Unit] -Description=Photon Geocoding Service (Germany, OpenSearch) -After=network.target - -[Service] -Type=simple -WorkingDirectory=/opt/photon -ExecStart=/usr/bin/java -Xmx4g -jar photon.jar \ - -data-dir /opt/photon \ - -listen-port 2322 \ - -listen-ip 0.0.0.0 \ - -cors-any -Restart=on-failure -TimeoutStopSec=20 - -[Install] -WantedBy=multi-user.target -EOF -systemctl enable -q --now photon systemctl enable -q --now reitti msg_ok "Created Services" diff --git a/install/revealjs-install.sh b/install/revealjs-install.sh index 7cf265924..a78327999 100644 --- a/install/revealjs-install.sh +++ b/install/revealjs-install.sh @@ -19,7 +19,7 @@ fetch_and_deploy_gh_release "revealjs" "hakimel/reveal.js" "tarball" msg_info "Configuring ${APPLICATION}" cd /opt/revealjs $STD npm install -sed -i '25s/localhost/0.0.0.0/g' /opt/revealjs/gulpfile.js +sed -i 's/"vite"/"vite --host"/g' package.json msg_ok "Setup ${APPLICATION}" msg_info "Creating Service" diff --git a/install/romm-install.sh b/install/romm-install.sh index ddf0a2d6d..515355110 100644 --- a/install/romm-install.sh +++ b/install/romm-install.sh @@ -120,7 +120,7 @@ fetch_and_deploy_gh_release "RAHasher" "RetroAchievements/RALibretro" "prebuild" cp /opt/RALibretro/RAHasher /usr/bin/RAHasher chmod +x /usr/bin/RAHasher -fetch_and_deploy_gh_release "romm" "rommapp/romm" +fetch_and_deploy_gh_release "romm" "rommapp/romm" "tarball" msg_info "Creating environment file" sed -i 's/^supervised no/supervised systemd/' /etc/redis/redis.conf @@ -176,8 +176,10 @@ $STD npm run build cp -rf /opt/romm/frontend/assets/* /opt/romm/frontend/dist/assets/ mkdir -p /opt/romm/frontend/dist/assets/romm -ln -sfn /var/lib/romm/resources /opt/romm/frontend/dist/assets/romm/resources -ln -sfn /var/lib/romm/assets /opt/romm/frontend/dist/assets/romm/assets +ROMM_BASE=$(grep '^ROMM_BASE_PATH=' /opt/romm/.env | cut -d'=' -f2) +ROMM_BASE=${ROMM_BASE:-/var/lib/romm} +ln -sfn "$ROMM_BASE"/resources /opt/romm/frontend/dist/assets/romm/resources +ln -sfn "$ROMM_BASE"/assets /opt/romm/frontend/dist/assets/romm/assets msg_ok "Set up RomM Frontend" msg_info "Configuring Nginx" @@ -251,6 +253,7 @@ server { } EOF +sed -i "s|alias /var/lib/romm/library/;|alias ${ROMM_BASE}/library/;|" /etc/nginx/sites-available/romm rm -f /etc/nginx/sites-enabled/default ln -sf /etc/nginx/sites-available/romm /etc/nginx/sites-enabled/romm systemctl restart nginx diff --git a/install/seerr-install.sh b/install/seerr-install.sh index a46d233a6..1b38b04fe 100644 --- a/install/seerr-install.sh +++ b/install/seerr-install.sh @@ -14,7 +14,9 @@ network_check update_os msg_info "Installing Dependencies" -$STD apt-get install -y build-essential +$STD apt install -y \ + build-essential \ + python3-setuptools msg_ok "Installed Dependencies" fetch_and_deploy_gh_release "seerr" "seerr-team/seerr" "tarball" diff --git a/install/semaphore-install.sh b/install/semaphore-install.sh index 1cf74d6f2..1cd334dcd 100644 --- a/install/semaphore-install.sh +++ b/install/semaphore-install.sh @@ -40,7 +40,7 @@ cat </opt/semaphore/config.json "access_key_encryption": "${SEM_KEY}" } EOF -$STD semaphore user add --admin --login admin --email admin@helper-scripts.com --name Administrator --password "${SEM_PW}" --config /opt/semaphore/config.json +$STD semaphore user add --admin --login admin --email admin@community-scripts.org --name Administrator --password "${SEM_PW}" --config /opt/semaphore/config.json echo "${SEM_PW}" >~/semaphore.creds msg_ok "Setup Semaphore" diff --git a/install/shelfmark-install.sh b/install/shelfmark-install.sh index 961a7e1c2..22597eaf9 100644 --- a/install/shelfmark-install.sh +++ b/install/shelfmark-install.sh @@ -115,8 +115,8 @@ else msg_ok "Installed internal bypasser dependencies" fi -NODE_VERSION="22" setup_nodejs -PYTHON_VERSION="3.12" setup_uv +NODE_VERSION="24" setup_nodejs +PYTHON_VERSION="3.14" setup_uv fetch_and_deploy_gh_release "shelfmark" "calibrain/shelfmark" "tarball" "latest" "/opt/shelfmark" RELEASE_VERSION=$(cat "$HOME/.shelfmark") @@ -130,11 +130,15 @@ mv /opt/shelfmark/src/frontend/dist /opt/shelfmark/frontend-dist msg_ok "Built Shelfmark frontend" msg_info "Configuring Shelfmark" +export VIRTUAL_ENV=/opt/shelfmark/venv cd /opt/shelfmark $STD uv venv --clear ./venv $STD source ./venv/bin/activate -$STD uv pip install -r ./requirements-base.txt -[[ "$DEPLOYMENT_TYPE" == "1" ]] && $STD uv pip install -r ./requirements-shelfmark.txt +if [[ "$DEPLOYMENT_TYPE" == "1" ]]; then + $STD uv sync --active --locked --no-default-groups --extra browser +else + $STD uv sync --active --locked --no-default-groups +fi mkdir -p {/var/log/shelfmark,/tmp/shelfmark} msg_ok "Configured Shelfmark" diff --git a/install/shinobi-install.sh b/install/shinobi-install.sh index a5ffe1c94..383325016 100644 --- a/install/shinobi-install.sh +++ b/install/shinobi-install.sh @@ -35,7 +35,7 @@ cd Shinobi gitVersionNumber=$(git rev-parse HEAD) theDateRightNow=$(date) touch version.json -chmod 777 version.json +chmod 644 version.json echo '{"Product" : "'"Shinobi"'" , "Branch" : "'"master"'" , "Version" : "'"$gitVersionNumber"'" , "Date" : "'"$theDateRightNow"'" , "Repository" : "'"https://gitlab.com/Shinobi-Systems/Shinobi.git"'"}' >version.json msg_ok "Cloned Shinobi" diff --git a/install/shlink-install.sh b/install/shlink-install.sh new file mode 100644 index 000000000..97256f5e1 --- /dev/null +++ b/install/shlink-install.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://shlink.io/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +PHP_VERSION="8.5" setup_php +setup_mariadb +MARIADB_DB_NAME="shlink" MARIADB_DB_USER="shlink" setup_mariadb_db + +fetch_and_deploy_gh_release "shlink" "shlinkio/shlink" "prebuild" "latest" "/opt/shlink" "shlink*_php8.5_dist.zip" + +msg_info "Setting up Application" +cd /opt/shlink +$STD php ./vendor/bin/rr get --no-interaction --location bin/ +chmod +x bin/rr +mkdir -p data/cache data/locks data/log data/proxies data/temp-geolite +chmod -R 775 data +cat </opt/shlink/.env +DEFAULT_DOMAIN=${LOCAL_IP}:8080 +IS_HTTPS_ENABLED=false +DB_DRIVER=maria +DB_NAME=${MARIADB_DB_NAME} +DB_USER=${MARIADB_DB_USER} +DB_PASSWORD=${MARIADB_DB_PASS} +DB_HOST=127.0.0.1 +DB_PORT=3306 +EOF +set -a +source /opt/shlink/.env +set +a +$STD php vendor/bin/shlink-installer init --no-interaction --clear-db-cache --skip-download-geolite +API_OUTPUT=$(php bin/cli api-key:generate --name=default 2>&1) +INITIAL_API_KEY=$(echo "$API_OUTPUT" | sed -n 's/.*Generated API key: "\([^"]*\)".*/\1/p') +if [[ -n "$INITIAL_API_KEY" ]]; then + echo "INITIAL_API_KEY=${INITIAL_API_KEY}" >>/opt/shlink/.env +fi +msg_ok "Set up Application" + +if prompt_confirm "Install Shlink Web Client?" "y" 60; then + msg_info "Installing Dependencies" + $STD apt install -y nginx + msg_ok "Installed Dependencies" + + fetch_and_deploy_gh_release "shlink-web-client" "shlinkio/shlink-web-client" "prebuild" "latest" "/opt/shlink-web-client" "shlink-web-client_*_dist.zip" + + msg_info "Setting up Web Client" + cat </opt/shlink-web-client/servers.json +[ + { + "name": "Shlink", + "url": "http://${LOCAL_IP}:8080", + "apiKey": "${INITIAL_API_KEY}" + } +] +EOF + cat <<'EOF' >/etc/nginx/sites-available/shlink-web-client +server { + listen 3000 default_server; + charset utf-8; + root /opt/shlink-web-client; + index index.html; + + location ~* \.(?:manifest|appcache|html?|xml|json)$ { + expires -1; + } + + location ~* \.(?:jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm|htc)$ { + expires 1M; + add_header Cache-Control "public"; + } + + location ~* \.(?:css|js)$ { + expires 1y; + add_header Cache-Control "public"; + } + + location = /servers.json { + try_files /servers.json /conf.d/servers.json; + } + + location / { + try_files $uri $uri/ /index.html$is_args$args; + } +} +EOF + ln -sf /etc/nginx/sites-available/shlink-web-client /etc/nginx/sites-enabled/shlink-web-client + rm -f /etc/nginx/sites-enabled/default + systemctl enable -q nginx + $STD systemctl restart nginx + msg_ok "Set up Web Client" +fi + +msg_info "Creating Service" +cat </etc/systemd/system/shlink.service +[Unit] +Description=Shlink URL Shortener +After=network.target mariadb.service + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/shlink +EnvironmentFile=/opt/shlink/.env +ExecStart=/opt/shlink/bin/rr serve -c config/roadrunner/.rr.yml +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now shlink +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/slskd-install.sh b/install/slskd-install.sh index d56f37e28..448839a71 100644 --- a/install/slskd-install.sh +++ b/install/slskd-install.sh @@ -48,12 +48,15 @@ if [[ ${soularr,,} =~ ^(y|yes)$ ]]; then #!/usr/bin/env bash if ps aux | grep "[s]oularr.py" >/dev/null; then - echo "Soularr is already running. Exiting..." + echo "Soularr is already running. Exiting..." >&2 exit 1 -else - source /opt/soularr/venv/bin/activate - uv run python3 -u /opt/soularr/soularr.py --config-dir /opt/soularr fi + +# Remove stale lock file from previous ungraceful exit +rm -f "/opt/soularr/.soularr.lock" + +source /opt/soularr/venv/bin/activate +uv run python3 -u /opt/soularr/soularr.py --config-dir /opt/soularr 2>&1 EOF chmod +x /opt/soularr/run.sh deactivate diff --git a/install/solidtime-install.sh b/install/solidtime-install.sh new file mode 100644 index 000000000..fecdb644d --- /dev/null +++ b/install/solidtime-install.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://www.solidtime.io/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y caddy +msg_ok "Installed Dependencies" + +PHP_VERSION="8.3" PHP_FPM="YES" PHP_MODULES="bcmath,gd,intl,xml,zip,pdo_pgsql,redis,mbstring,curl" setup_php +setup_composer +NODE_VERSION="22" setup_nodejs +PG_VERSION="16" setup_postgresql +PG_DB_NAME="solidtime" PG_DB_USER="solidtime" setup_postgresql_db + +fetch_and_deploy_gh_release "solidtime" "solidtime-io/solidtime" "tarball" + +msg_info "Setting up SolidTime" +cd /opt/solidtime +cp .env.example .env +sed -i "s|^APP_ENV=.*|APP_ENV=production|" .env +sed -i "s|^APP_DEBUG=.*|APP_DEBUG=false|" .env +sed -i "s|^APP_URL=.*|APP_URL=http://${LOCAL_IP}|" .env +sed -i "s|^APP_ENABLE_REGISTRATION=.*|APP_ENABLE_REGISTRATION=true|" .env +sed -i "s|^DB_CONNECTION=.*|DB_CONNECTION=pgsql|" .env +sed -i "s|^DB_HOST=.*|DB_HOST=127.0.0.1|" .env +sed -i "s|^DB_PORT=.*|DB_PORT=5432|" .env +sed -i "s|^DB_DATABASE=.*|DB_DATABASE=${PG_DB_NAME}|" .env +sed -i "s|^DB_USERNAME=.*|DB_USERNAME=${PG_DB_USER}|" .env +sed -i "s|^DB_PASSWORD=.*|DB_PASSWORD=${PG_DB_PASS}|" .env +sed -i "s|^FILESYSTEM_DISK=.*|FILESYSTEM_DISK=local|" .env +sed -i "s|^PUBLIC_FILESYSTEM_DISK=.*|PUBLIC_FILESYSTEM_DISK=public|" .env +sed -i "s|^MAIL_MAILER=.*|MAIL_MAILER=log|" .env +sed -i "s|^SESSION_SECURE_COOKIE=.*|SESSION_SECURE_COOKIE=false|" .env +grep -q "^SESSION_SECURE_COOKIE=" .env || echo "SESSION_SECURE_COOKIE=false" >>.env +sed -i "s|^APP_FORCE_HTTPS=.*|APP_FORCE_HTTPS=false|" .env +grep -q "^APP_FORCE_HTTPS=" .env || echo "APP_FORCE_HTTPS=false" >>.env +$STD composer install --no-dev --optimize-autoloader +php artisan self-host:generate-keys >/tmp/solidtime.keys 2>/dev/null +while IFS= read -r line; do + KEY="${line%%=*}" + [[ -z "$KEY" || "${KEY:0:1}" == "#" ]] && continue + sed -i "/^${KEY}=/d" .env + echo "$line" >>.env +done /etc/caddy/Caddyfile +:80 { + root * /opt/solidtime/public + php_fastcgi unix//run/php/php${PHP_VER}-fpm.sock + file_server + encode gzip +} +EOF +usermod -aG www-data caddy +systemctl enable -q --now php${PHP_VER}-fpm +systemctl restart caddy +msg_ok "Configured Caddy" + +motd_ssh +customize +cleanup_lxc diff --git a/install/sonarqube-install.sh b/install/sonarqube-install.sh index 0856eb513..b84d7b03b 100644 --- a/install/sonarqube-install.sh +++ b/install/sonarqube-install.sh @@ -21,6 +21,7 @@ temp_file=$(mktemp) RELEASE=$(get_latest_github_release "SonarSource/sonarqube") curl -fsSL "https://binaries.sonarsource.com/Distribution/sonarqube/sonarqube-${RELEASE}.zip" -o $temp_file unzip -q "$temp_file" -d /opt +rm -f "$temp_file" mv /opt/sonarqube-* /opt/sonarqube $STD useradd -r -m -U -d /opt/sonarqube -s /bin/bash sonarqube chown -R sonarqube:sonarqube /opt/sonarqube diff --git a/install/soulsync-install.sh b/install/soulsync-install.sh new file mode 100644 index 000000000..00bfaf171 --- /dev/null +++ b/install/soulsync-install.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/Nezreka/SoulSync + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + gcc \ + libffi-dev \ + libssl-dev \ + libchromaprint-tools \ + ffmpeg +msg_ok "Installed Dependencies" + +UV_PYTHON="3.11" setup_uv + +fetch_and_deploy_gh_release "soulsync" "Nezreka/SoulSync" "tarball" + +msg_info "Setting up Application" +cd /opt/soulsync +$STD uv venv /opt/soulsync/.venv --python 3.11 +$STD uv pip install -r requirements.txt --python /opt/soulsync/.venv/bin/python +mkdir -p /opt/soulsync/{config,data,logs} +msg_ok "Set up Application" + +msg_info "Creating Service" +cat </etc/systemd/system/soulsync.service +[Unit] +Description=SoulSync Music Discovery +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/soulsync +ExecStart=/opt/soulsync/.venv/bin/python web_server.py +Environment=PYTHONPATH=/opt/soulsync PYTHONUNBUFFERED=1 DATABASE_PATH=/opt/soulsync/data/music_library.db +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now soulsync +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/sparkyfitness-install.sh b/install/sparkyfitness-install.sh index f7bad8760..4342c2ec7 100644 --- a/install/sparkyfitness-install.sh +++ b/install/sparkyfitness-install.sh @@ -40,6 +40,7 @@ sed \ -e "s|^SPARKY_FITNESS_SERVER_HOST=.*|SPARKY_FITNESS_SERVER_HOST=localhost|" \ -e "s|^SPARKY_FITNESS_SERVER_PORT=.*|SPARKY_FITNESS_SERVER_PORT=3010|" \ -e "s|^SPARKY_FITNESS_FRONTEND_URL=.*|SPARKY_FITNESS_FRONTEND_URL=http://${LOCAL_IP}:80|" \ + -e "s|^GARMIN_MICROSERVICE_URL=.*|GARMIN_MICROSERVICE_URL=http://${LOCAL_IP}:8000|" \ -e "s|^SPARKY_FITNESS_API_ENCRYPTION_KEY=.*|SPARKY_FITNESS_API_ENCRYPTION_KEY=$(openssl rand -hex 32)|" \ -e "s|^BETTER_AUTH_SECRET=.*|BETTER_AUTH_SECRET=$(openssl rand -hex 32)|" \ "/etc/sparkyfitness/.env" @@ -47,12 +48,13 @@ msg_ok "Configured Sparky Fitness" msg_info "Building Backend" cd /opt/sparkyfitness/SparkyFitnessServer -$STD npm install +$STD pnpm install msg_ok "Built Backend" msg_info "Building Frontend (Patience)" -cd /opt/sparkyfitness/SparkyFitnessFrontend +cd /opt/sparkyfitness $STD pnpm install +cd /opt/sparkyfitness/SparkyFitnessFrontend $STD pnpm run build cp -a /opt/sparkyfitness/SparkyFitnessFrontend/dist/. /var/www/sparkyfitness/ msg_ok "Built Frontend" @@ -68,7 +70,7 @@ Requires=postgresql.service Type=simple WorkingDirectory=/opt/sparkyfitness/SparkyFitnessServer EnvironmentFile=/etc/sparkyfitness/.env -ExecStart=/usr/bin/node SparkyFitnessServer.js +ExecStart=/opt/sparkyfitness/SparkyFitnessServer/node_modules/.bin/tsx SparkyFitnessServer.js Restart=always RestartSec=5 diff --git a/install/split-pro-install.sh b/install/split-pro-install.sh new file mode 100644 index 000000000..722793d3b --- /dev/null +++ b/install/split-pro-install.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: johanngrobe +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/oss-apps/split-pro + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +NODE_VERSION="22" NODE_MODULE="pnpm" setup_nodejs +PG_VERSION="17" PG_MODULES="cron" setup_postgresql + +msg_info "Installing Dependencies" +$STD apt install -y openssl +msg_ok "Installed Dependencies" + +PG_DB_NAME="splitpro" PG_DB_USER="splitpro" PG_DB_EXTENSIONS="pg_cron" setup_postgresql_db +fetch_and_deploy_gh_release "split-pro" "oss-apps/split-pro" "tarball" + +msg_info "Installing Dependencies" +cd /opt/split-pro +$STD pnpm install --frozen-lockfile +msg_ok "Installed Dependencies" + +msg_info "Building Split Pro" +cd /opt/split-pro +mkdir -p /opt/split-pro_data/uploads +ln -sf /opt/split-pro_data/uploads /opt/split-pro/uploads +NEXTAUTH_SECRET=$(openssl rand -base64 32) +cp .env.example .env +sed -i "s|^DATABASE_URL=.*|DATABASE_URL=\"postgresql://${PG_DB_USER}:${PG_DB_PASS}@localhost:5432/${PG_DB_NAME}\"|" .env +sed -i "s|^NEXTAUTH_SECRET=.*|NEXTAUTH_SECRET=\"${NEXTAUTH_SECRET}\"|" .env +sed -i "s|^NEXTAUTH_URL=.*|NEXTAUTH_URL=\"http://${LOCAL_IP}:3000\"|" .env +sed -i "s|^NEXTAUTH_URL_INTERNAL=.*|NEXTAUTH_URL_INTERNAL=\"http://localhost:3000\"|" .env +sed -i "/^POSTGRES_CONTAINER_NAME=/d" .env +sed -i "/^POSTGRES_USER=/d" .env +sed -i "/^POSTGRES_PASSWORD=/d" .env +sed -i "/^POSTGRES_DB=/d" .env +sed -i "/^POSTGRES_PORT=/d" .env +$STD pnpm build +$STD pnpm exec prisma migrate deploy +msg_ok "Built Split Pro" + +msg_info "Creating Service" +cat </etc/systemd/system/split-pro.service +[Unit] +Description=Split Pro +After=network.target postgresql.service +Requires=postgresql.service + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/split-pro +EnvironmentFile=/opt/split-pro/.env +ExecStart=/usr/bin/pnpm start +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now split-pro +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/step-ca-install.sh b/install/step-ca-install.sh new file mode 100644 index 000000000..9f0c3073b --- /dev/null +++ b/install/step-ca-install.sh @@ -0,0 +1,341 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Joerg Heinemann (heinemannj) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/smallstep/certificates + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +setup_deb822_repo \ + "smallstep" \ + "https://packages.smallstep.com/keys/apt/repo-signing-key.gpg" \ + "https://packages.smallstep.com/stable/debian" \ + "debs" \ + "main" + +msg_info "Installing step-ca and step-cli" +$STD apt install -y step-ca step-cli + +STEPPATH="/etc/step-ca" +STEPHOME="/etc/step" + +export STEPPATH=$STEPPATH +echo "export STEPPATH=${STEPPATH}" >> /etc/profile +export STEPHOME=$STEPHOME +echo "export STEPHOME=${STEPHOME}" >> /etc/profile + +mkdir -p "$STEPHOME" + +# Patch for making $STD happy (/usr/bin/step is a symlink to /usr/bin/step-cli) +STEPBIN="$(which step)" +rm -f "$STEPBIN" +cp -f "$(which step-cli)" "$STEPBIN" + +# Low port-binding capabilities (ports < 1024) +# - Default step-ca listener port: 443 +setcap CAP_NET_BIND_SERVICE=+eip "$(which step-ca)" + +# Service User used by systemd step-ca.service +$STD useradd --user-group --system --home "$(step path)" --shell /bin/false step +msg_ok "Installed step-ca and step-cli" + +DomainName="$(hostname -d)" + +PKIName="$(prompt_input "Enter PKIName" "MyHomePKI" 30)" +PKICountry="$(prompt_input "Enter PKICountry" "DE" 30)" +PKIOrganizationalUnit="$(prompt_input "Enter PKIOrganizationalUnit" "MyHomeLab" 30)" +PKIProvisioner="$(prompt_input "Enter PKIProvisioner" "pki@$DomainName" 30)" +AcmeProvisioner="$(prompt_input "Enter AcmeProvisioner" "acme@$DomainName" 30)" +X509MinDur="$(prompt_input "Enter X509MinDur" "48h" 30)" +X509MaxDur="$(prompt_input "Enter X509MaxDur" "87600h" 30)" +X509DefaultDur="$(prompt_input "Enter X509DefaultDur" "168h" 30)" + +msg_info "Initializing step-ca" + +# Initialize step-ca +DeploymentType="standalone" +FQDN="$(hostname -f)" +IP="${LOCAL_IP}" +LISTENER=":443" +LISTENER_INSECURE=":80" + +# Set different signing CA and Provisioner Passwords +EncryptionPwdDir="$(step path)/encryption" +PwdFile="$EncryptionPwdDir/ca.pwd" +ProvisionerPwdFile="$EncryptionPwdDir/provisioner.pwd" +mkdir -p "$EncryptionPwdDir" +gpg -q --gen-random --armor 2 32 >"$PwdFile" +gpg -q --gen-random --armor 2 32 >"$ProvisionerPwdFile" + +# Used by systemd step-ca.service +ln -s "$PwdFile" "$(step path)/password.txt" + +# Usage of: +# - SSH feature of step-ca +# - BadgerDB (badgerv2) => Default DB backend of step-ca +# - badgerFileLoadingMode: FileIO (instead of MemoryMap) for LXC with low RAM +$STD step ca init \ + --deployment-type="$DeploymentType" \ + --ssh \ + --name="$PKIName" \ + --dns="$FQDN" \ + --dns="$IP" \ + --address="$LISTENER" \ + --provisioner="$PKIProvisioner" \ + --password-file="$PwdFile" \ + --provisioner-password-file="$ProvisionerPwdFile" + +# Define enhanced x509 CA and Certificate Templates +mkdir -p "$(step path)/templates/ca" +mkdir -p "$(step path)/templates/x509" + +CARootTemplate="$(step path)/templates/ca/root.tpl" +CAIntermediateTemplate="$(step path)/templates/ca/intermediate.tpl" +X509LeafTemplate="$(step path)/templates/x509/leaf.tpl" +X509LeafTemplateData="$(step path)/templates/x509/leaf_data.tpl" + +cat <<'EOF' >"$CARootTemplate" +{ + "subject": { + "country": {{ toJson .Insecure.User.country }}, + "organization": {{ toJson .Insecure.User.organization }}, + "organizationalUnit": {{ toJson .Insecure.User.organizationalUnit }}, + "commonName": {{ toJson .Subject.CommonName }} + }, + "issuer": {{ toJson .Subject }}, + "keyUsage": ["certSign", "crlSign"], + "basicConstraints": { + "isCA": true, + "maxPathLen": 1 + }, + "issuingCertificateURL": [{{ toJson .Insecure.User.issuingCertificateURL }}], + "crlDistributionPoints": [{{ toJson .Insecure.User.crlDistributionPoints }}] +} +EOF + +cat <<'EOF' >"$CAIntermediateTemplate" +{ + "subject": { + "country": {{ toJson .Insecure.User.country }}, + "organization": {{ toJson .Insecure.User.organization }}, + "organizationalUnit": {{ toJson .Insecure.User.organizationalUnit }}, + "commonName": {{ toJson .Subject.CommonName }} + }, + "keyUsage": ["certSign", "crlSign"], + "basicConstraints": { + "isCA": true, + "maxPathLen": 0 + }, + "issuingCertificateURL": [{{ toJson .Insecure.User.issuingCertificateURL }}], + "crlDistributionPoints": [{{ toJson .Insecure.User.crlDistributionPoints }}] +} +EOF + +cat <<'EOF' >"$X509LeafTemplate" +{ + "subject": { +{{- if .Insecure.User.Country }} + "country": {{ toJson .Insecure.User.country }}, +{{- else }} + "country": {{ toJson .country }}, +{{- end }} +{{- if .Insecure.User.organization }} + "organization": {{ toJson .Insecure.User.organization }}, +{{- else }} + "organization": {{ toJson .organization }}, +{{- end }} +{{- if .Insecure.User.organizationalUnit }} + "organizationalUnit": {{ toJson .Insecure.User.organizationalUnit }}, +{{- else }} + "organizationalUnit": {{ toJson .organizationalUnit }}, +{{- end }} + "commonName": {{ toJson .Subject.CommonName }} + }, + "sans": {{ toJson .SANs }}, +{{- if typeIs "*rsa.PublicKey" .Insecure.CR.PublicKey }} + "keyUsage": ["keyEncipherment", "digitalSignature"], +{{- else }} + "keyUsage": ["digitalSignature"], +{{- end }} + "extKeyUsage": ["serverAuth", "clientAuth"], +{{- if .Insecure.User.issuingCertificateURL }} + "issuingCertificateURL": [{{ toJson .Insecure.User.issuingCertificateURL }}], +{{- else }} + "issuingCertificateURL": [{{ toJson .issuingCertificateURL }}], +{{- end }} +{{- if .Insecure.User.crlDistributionPoints }} + "crlDistributionPoints": [{{ toJson .Insecure.User.crlDistributionPoints }}] +{{- else }} + "crlDistributionPoints": [{{ toJson .crlDistributionPoints }}] +{{- end }} +} +EOF + +cat <"$X509LeafTemplateData" +{ + "country": "${PKICountry}", + "organization": "${PKIName}", + "organizationalUnit": "${PKIOrganizationalUnit}", + "issuingCertificateURL": ["https://${FQDN}${LISTENER}/intermediates.pem"], + "crlDistributionPoints": ["https://${FQDN}${LISTENER}/crl"] +} +EOF + +# Configure CA Provisioners, DB and CRL settings +$STD step ca provisioner add "$AcmeProvisioner" \ + --type ACME \ + --admin-name "$AcmeProvisioner" + +$STD step ca provisioner update "$PKIProvisioner" \ + --x509-min-dur="$X509MinDur" \ + --x509-max-dur="$X509MaxDur" \ + --x509-default-dur="$X509DefaultDur" \ + --x509-template="$X509LeafTemplate" \ + --x509-template-data="$X509LeafTemplateData" \ + --allow-renewal-after-expiry + +$STD step ca provisioner update "$AcmeProvisioner" \ + --x509-min-dur="$X509MinDur" \ + --x509-max-dur="$X509MaxDur" \ + --x509-default-dur="$X509DefaultDur" \ + --x509-template="$X509LeafTemplate" \ + --x509-template-data="$X509LeafTemplateData" \ + --allow-renewal-after-expiry + +CAConfig="$(step path)/config/ca.json" +jq --arg a "${PKICountry}" '.country = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}" +jq --arg a "${PKIName}" '.organization = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}" +jq --arg a "${PKIOrganizationalUnit}" '.organizationalUnit = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}" +jq --arg a "${PKIName} Online CA" '.commonName = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}" +jq '.db.badgerFileLoadingMode = "FileIO"' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}" +jq '.crl.enabled = true' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}" +jq '.crl.generateOnRevoke = true' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}" +jq '.crl.cacheDuration = "24h0m0s"' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}" +jq '.crl.renewPeriod = "16h0m0s"' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}" +jq --arg a "https://${FQDN}${LISTENER}/crl" '.crl.idpURL = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}" +jq --arg a "$LISTENER_INSECURE" '.insecureAddress = $a' "${CAConfig}" > "${CAConfig}_tmp" && mv "${CAConfig}_tmp" "${CAConfig}" + +# Generate Root CA Certificate and Key +# - Validity: 219168h (~25 Years) +# - maxPathLen: 1 (Root -> Intermediate -> Leaf) => Only one Intermediate CA allowed below Root CA +# - Active revocation on Intermediate CA and Leaf Certificates by the usage of build-in Certificate Revocation List (CRL) +FLAGS=(--force + --template="${CARootTemplate}" + --not-after="219168h" + --password-file="${PwdFile}" + --set country="${PKICountry}" + --set organization="${PKIName}" + --set organizationalUnit="${PKIOrganizationalUnit}" + --set issuingCertificateURL="https://${FQDN}${LISTENER}/roots.pem" + --set crlDistributionPoints="https://${FQDN}${LISTENER}/crl") + +$STD step certificate create "${PKIName} Root CA" \ + "$(step path)/certs/root_ca.crt" \ + "$(step path)/secrets/root_ca_key" \ + "${FLAGS[@]}" + +# Generate Intermediate CA Certificate Bundle and Key +# - Validity: 175368h (~20 Years) +# - maxPathLen: 0 (Root -> Intermediate -> Leaf) => Intermediate CA is only allowed to issue Leaf Certificates +# - Active revocation on Leaf Certificates by the usage of build-in Certificate Revocation List (CRL) +# - Bundle: Certificate Chain (including Root CA Certificate) +FLAGS=(--force + --template="${CAIntermediateTemplate}" + --ca="$(step path)/certs/root_ca.crt" + --ca-key="$(step path)/secrets/root_ca_key" + --not-after="175368h" + --ca-password-file="${PwdFile}" + --password-file="${PwdFile}" + --bundle + --set country="${PKICountry}" + --set organization="${PKIName}" + --set organizationalUnit="${PKIOrganizationalUnit}" + --set issuingCertificateURL="https://${FQDN}${LISTENER}/roots.pem" + --set crlDistributionPoints="https://${FQDN}${LISTENER}/crl") + +$STD step certificate create "${PKIName} Intermediate CA" \ + "$(step path)/certs/intermediate_ca.crt" \ + "$(step path)/secrets/intermediate_ca_key" \ + "${FLAGS[@]}" + +# Install Root CA Certificate to System Trust Store +$STD step certificate install --all "$(step path)/certs/root_ca.crt" +$STD update-ca-certificates + +chown -R step:step "$(step path)" +chmod -R 700 "$(step path)" +msg_ok "Initialized step-ca" + +msg_info "Start step-ca as a Daemon" + +# https://smallstep.com/docs/step-ca/certificate-authority-server-production/#running-step-ca-as-a-daemon +cat <<'EOF' >/etc/systemd/system/step-ca.service +[Unit] +Description=step-ca service +Documentation=https://smallstep.com/docs/step-ca +Documentation=https://smallstep.com/docs/step-ca/certificate-authority-server-production +After=network-online.target +Wants=network-online.target +StartLimitIntervalSec=30 +StartLimitBurst=3 +ConditionFileNotEmpty=/etc/step-ca/config/ca.json +ConditionFileNotEmpty=/etc/step-ca/password.txt + +[Service] +Type=simple +User=step +Group=step +Environment=STEPPATH=/etc/step-ca +WorkingDirectory=/etc/step-ca +ExecStart=/usr/bin/step-ca config/ca.json --password-file password.txt +ExecReload=/bin/kill -USR1 $MAINPID +Restart=on-failure +RestartSec=5 +TimeoutStopSec=30 +StartLimitAction=reboot + +; Process capabilities & privileges +AmbientCapabilities=CAP_NET_BIND_SERVICE +CapabilityBoundingSet=CAP_NET_BIND_SERVICE +SecureBits=keep-caps +NoNewPrivileges=yes + +; Sandboxing +SystemCallArchitectures=native +SystemCallFilter=@system-service +SystemCallFilter=~@resources @privileged +RestrictNamespaces=yes +LockPersonality=yes +MemoryDenyWriteExecute=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +PrivateMounts=yes +ProtectControlGroups=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +ProtectSystem=strict +ProtectHome=yes +ReadWritePaths=/etc/step-ca/db + +; Read only paths +ReadOnlyPaths=/etc/step-ca + +[Install] +WantedBy=multi-user.target +EOF +$STD systemctl enable -q --now step-ca +msg_ok "Started step-ca as a Daemon" + +fetch_and_deploy_gh_release "step-badger" "lukasz-lobocki/step-badger" "prebuild" "latest" "/opt/step-badger" "step-badger_Linux_x86_64.tar.gz" +ln -s /opt/step-badger/step-badger /usr/local/bin/step-badger + +motd_ssh +customize +cleanup_lxc diff --git a/install/storybook-install.sh b/install/storybook-install.sh new file mode 100644 index 000000000..1d1e4a66b --- /dev/null +++ b/install/storybook-install.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/storybookjs/storybook + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +NODE_VERSION="24" NODE_MODULE="pnpm" setup_nodejs + +msg_info "Preparing Storybook" +mkdir -p /opt/storybook +cd /opt/storybook +msg_ok "Important: Interactive configuration will start now." + +npx -y storybook@latest init --yes --no-dev +PROJECT_PATH=$(find /opt/storybook -maxdepth 2 -name ".storybook" -type d 2>/dev/null | head -n1 | xargs dirname) + +if [[ -z "$PROJECT_PATH" ]]; then + PROJECT_PATH="/opt/storybook" +fi + +cd "$PROJECT_PATH" +echo "$PROJECT_PATH" >/opt/storybook/.projectpath + +msg_info "Creating Service" +cat </etc/systemd/system/storybook.service +[Unit] +Description=Storybook Dev Server +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=${PROJECT_PATH} +ExecStart=/usr/bin/npx storybook dev --host 0.0.0.0 --port 6006 --no-open +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now storybook +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/storyteller-install.sh b/install/storyteller-install.sh new file mode 100644 index 000000000..e02ee1a6b --- /dev/null +++ b/install/storyteller-install.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://gitlab.com/storyteller-platform/storyteller + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + build-essential \ + git \ + pkg-config \ + libsqlite3-dev \ + sqlite3 \ + python3-setuptools \ + ffmpeg +msg_ok "Installed Dependencies" + +NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs + +fetch_and_deploy_gh_release "readium" "readium/cli" "prebuild" "latest" "/opt/readium" "readium_linux_x86_64.tar.gz" +ln -sf /opt/readium/readium /usr/local/bin/readium +fetch_and_deploy_gl_release "storyteller" "storyteller-platform/storyteller" "tarball" "latest" "/opt/storyteller" + +msg_info "Setting up Storyteller" +cd /opt/storyteller +$STD yarn install --network-timeout 600000 +$STD gcc -g -fPIC -rdynamic -shared web/sqlite/uuid.c -o web/sqlite/uuid.c.so +STORYTELLER_SECRET_KEY=$(openssl rand -base64 32) +cat </opt/storyteller/.env +STORYTELLER_SECRET_KEY=${STORYTELLER_SECRET_KEY} +STORYTELLER_DATA_DIR=/opt/storyteller/data +PORT=8001 +HOSTNAME=0.0.0.0 +READIUM_PORT=9000 +NODE_ENV=production +NEXT_TELEMETRY_DISABLED=1 +EOF +mkdir -p /opt/storyteller/data +{ + echo "Storyteller Credentials" + echo "=======================" + echo "Secret Key: ${STORYTELLER_SECRET_KEY}" +} >~/storyteller.creds +msg_ok "Set up Storyteller" + +msg_info "Building Storyteller" +cd /opt/storyteller +export CI=1 +export NODE_ENV=production +export NEXT_TELEMETRY_DISABLED=1 +export SQLITE_NATIVE_BINDING=/opt/storyteller/node_modules/better-sqlite3/build/Release/better_sqlite3.node +$STD yarn workspaces foreach -Rpt --from @storyteller-platform/web --exclude @storyteller-platform/eslint run build +mkdir -p /opt/storyteller/web/.next/standalone/web/.next/static +cp -rT /opt/storyteller/web/.next/static /opt/storyteller/web/.next/standalone/web/.next/static +if [[ -d /opt/storyteller/web/public ]]; then + mkdir -p /opt/storyteller/web/.next/standalone/web/public + cp -rT /opt/storyteller/web/public /opt/storyteller/web/.next/standalone/web/public +fi +mkdir -p /opt/storyteller/web/.next/standalone/web/migrations +cp -rT /opt/storyteller/web/migrations /opt/storyteller/web/.next/standalone/web/migrations +mkdir -p /opt/storyteller/web/.next/standalone/web/sqlite +cp -rT /opt/storyteller/web/sqlite /opt/storyteller/web/.next/standalone/web/sqlite +ln -sf /opt/storyteller/.env /opt/storyteller/web/.next/standalone/web/.env +msg_ok "Built Storyteller" + +msg_info "Creating Service" +cat </etc/systemd/system/storyteller.service +[Unit] +Description=Storyteller +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/storyteller/web/.next/standalone/web +EnvironmentFile=/opt/storyteller/.env +ExecStart=/usr/bin/node --enable-source-maps server.js +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now storyteller +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/tandoor-install.sh b/install/tandoor-install.sh index c8f572297..b010a735e 100644 --- a/install/tandoor-install.sh +++ b/install/tandoor-install.sh @@ -47,6 +47,7 @@ $STD yarn install $STD yarn build cat </opt/tandoor/.env SECRET_KEY=$SECRET_KEY +ALLOWED_HOSTS=$LOCAL_IP TZ=Europe/Berlin DB_ENGINE=django.db.backends.postgresql diff --git a/install/tasmoadmin-install.sh b/install/tasmoadmin-install.sh index 2186e11b5..9f84f719b 100644 --- a/install/tasmoadmin-install.sh +++ b/install/tasmoadmin-install.sh @@ -23,7 +23,7 @@ fetch_and_deploy_gh_release "tasmoadmin" "TasmoAdmin/TasmoAdmin" "prebuild" "lat msg_info "Configuring TasmoAdmin" rm -rf /etc/php/8.4/apache2/conf.d/10-opcache.ini chown -R www-data:www-data /var/www/tasmoadmin -chmod 777 /var/www/tasmoadmin/tmp /var/www/tasmoadmin/data +chmod 775 /var/www/tasmoadmin/tmp /var/www/tasmoadmin/data cat </etc/apache2/sites-available/tasmoadmin.conf ServerName tasmoadmin diff --git a/install/tdarr-install.sh b/install/tdarr-install.sh index f40ecf088..5cc28bfd8 100644 --- a/install/tdarr-install.sh +++ b/install/tdarr-install.sh @@ -20,12 +20,16 @@ msg_ok "Installed Dependencies" msg_info "Installing Tdarr" mkdir -p /opt/tdarr cd /opt/tdarr -RELEASE=$(curl -fsSL https://f000.backblazeb2.com/file/tdarrs/versions.json | grep -oP '(?<="Tdarr_Updater": ")[^"]+' | grep linux_x64 | head -n 1) -curl -fsSL "$RELEASE" -o Tdarr_Updater.zip +RELEASE=$(curl_with_retry "https://f000.backblazeb2.com/file/tdarrs/versions.json" "-" | grep -oP '(?<="Tdarr_Updater": ")[^"]+' | grep linux_x64 | head -n 1) +curl_with_retry "$RELEASE" "Tdarr_Updater.zip" $STD unzip Tdarr_Updater.zip chmod +x Tdarr_Updater $STD ./Tdarr_Updater rm -rf /opt/tdarr/Tdarr_Updater.zip +[[ -f /opt/tdarr/Tdarr_Server/Tdarr_Server ]] || { + msg_error "Tdarr_Updater failed — tdarr.io may be blocked by local DNS" + exit 250 +} msg_ok "Installed Tdarr" setup_hwaccel diff --git a/install/teable-install.sh b/install/teable-install.sh new file mode 100644 index 000000000..203cd5d85 --- /dev/null +++ b/install/teable-install.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/teableio/teable + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + build-essential \ + python3 \ + git +msg_ok "Installed Dependencies" + +NODE_VERSION="24" NODE_MODULE="pnpm" setup_nodejs +PG_VERSION="16" setup_postgresql +PG_DB_NAME="teable" PG_DB_USER="teable" setup_postgresql_db + +fetch_and_deploy_gh_release "teable" "teableio/teable" "tarball" + +msg_info "Setting up Teable" +cd /opt/teable +TEABLE_VERSION=$(cat ~/.teable) +echo "NEXT_PUBLIC_BUILD_VERSION=\"${TEABLE_VERSION}\"" >>apps/nextjs-app/.env +export HUSKY=0 +export NODE_OPTIONS="--max-old-space-size=8192" +$STD pnpm install --frozen-lockfile +$STD pnpm -F @teable/db-main-prisma prisma-generate --schema ./prisma/postgres/schema.prisma +msg_ok "Set up Teable" + +msg_info "Building Teable" +NODE_ENV=production NEXT_BUILD_ENV_TYPECHECK=false \ + $STD pnpm -r --filter '!playground' run build +msg_ok "Built Teable" + +msg_info "Running Database Migrations" +PRISMA_DATABASE_URL="postgresql://teable:${PG_DB_PASS}@localhost:5432/teable?schema=public" \ + $STD pnpm -F @teable/db-main-prisma prisma-migrate deploy --schema ./prisma/postgres/schema.prisma +msg_ok "Ran Database Migrations" + +msg_info "Configuring Teable" +mkdir -p /opt/teable/.assets /opt/teable/.temporary +SECRET_KEY=$(openssl rand -base64 32) +cat </opt/teable/.env +PRISMA_DATABASE_URL=postgresql://teable:${PG_DB_PASS}@localhost:5432/teable?schema=public&statement_cache_size=1 +PUBLIC_ORIGIN=http://${LOCAL_IP}:3000 +SECRET_KEY=${SECRET_KEY} +PORT=3000 +NODE_ENV=production +NEXT_TELEMETRY_DISABLED=1 +BACKEND_CACHE_PROVIDER=sqlite +BACKEND_CACHE_SQLITE_URI=sqlite:///opt/teable/.assets/.cache.db +NEXTJS_DIR=apps/nextjs-app +EOF +ln -sf /opt/teable /app +rm -rf /opt/teable/static +if [ -d "/opt/teable/apps/nestjs-backend/static/static" ]; then + ln -sf /opt/teable/apps/nestjs-backend/static/static /opt/teable/static +else + ln -sf /opt/teable/apps/nestjs-backend/static /opt/teable/static +fi +msg_ok "Configured Teable" + +msg_info "Creating Service" +cat </etc/systemd/system/teable.service +[Unit] +Description=Teable +After=network.target postgresql.service + +[Service] +Type=simple +WorkingDirectory=/opt/teable +EnvironmentFile=/opt/teable/.env +ExecStart=/usr/bin/node apps/nestjs-backend/dist/index.js +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now teable +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/technitiumdns-install.sh b/install/technitiumdns-install.sh index b26e06071..f38de6cae 100644 --- a/install/technitiumdns-install.sh +++ b/install/technitiumdns-install.sh @@ -20,21 +20,18 @@ setup_deb822_repo \ "https://packages.microsoft.com/debian/13/prod/" \ "trixie" \ "main" -$STD apt install -y aspnetcore-runtime-9.0 +$STD apt install -y aspnetcore-runtime-10.0 msg_ok "Installed Dependencies" RELEASE=$(curl -fsSL https://technitium.com/dns/ | grep -oP 'Version \K[\d.]+') -msg_info "Installing Technitium DNS" -mkdir -p /opt/technitium/dns -curl -fsSL "https://download.technitium.com/dns/DnsServerPortable.tar.gz" -o /opt/DnsServerPortable.tar.gz -$STD tar zxvf /opt/DnsServerPortable.tar.gz -C /opt/technitium/dns/ -rm -f /opt/DnsServerPortable.tar.gz +fetch_and_deploy_from_url "https://download.technitium.com/dns/DnsServerPortable.tar.gz" /opt/technitium/dns echo "${RELEASE}" >~/.technitium -msg_ok "Installed Technitium DNS" msg_info "Creating service" +mkdir -p /etc/dns /var/log/technitium/dns +sed -i '/^User=/d;/^Group=/d' /opt/technitium/dns/systemd.service cp /opt/technitium/dns/systemd.service /etc/systemd/system/technitium.service -systemctl enable -q --now technitium +systemctl enable -q --now technitium msg_ok "Service created" motd_ssh diff --git a/install/teleport-install.sh b/install/teleport-install.sh new file mode 100644 index 000000000..c98bcb466 --- /dev/null +++ b/install/teleport-install.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Slaviša Arežina (tremor021) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://goteleport.com/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +setup_deb822_repo \ + "teleport" \ + "https://deb.releases.teleport.dev/teleport-pubkey.asc" \ + "https://apt.releases.teleport.dev/debian" \ + "trixie" \ + "stable/v18" + +msg_info "Configuring Teleport" +$STD apt install -y teleport +$STD teleport configure -o /etc/teleport.yaml +systemctl enable -q --now teleport +sleep 10 +tctl users add teleport-admin --roles=editor,access --logins=root >~/teleportadmin.txt +sed -i "s|https://[^:]*:3080|https://${LOCAL_IP}:3080|g" ~/teleportadmin.txt +msg_ok "Configured Teleport" + +motd_ssh +customize +cleanup_lxc diff --git a/install/termix-install.sh b/install/termix-install.sh index 5737d0d6b..7614fb53b 100644 --- a/install/termix-install.sh +++ b/install/termix-install.sh @@ -19,11 +19,43 @@ $STD apt install -y \ python3 \ nginx \ openssl \ - gettext-base + gettext-base \ + libcairo2-dev \ + libjpeg62-turbo-dev \ + libpng-dev \ + libtool-bin \ + uuid-dev \ + libvncserver-dev \ + freerdp3-dev \ + libssh2-1-dev \ + libtelnet-dev \ + libwebsockets-dev \ + libpulse-dev \ + libvorbis-dev \ + libwebp-dev \ + libssl-dev \ + libpango1.0-dev \ + libswscale-dev \ + libavcodec-dev \ + libavutil-dev \ + libavformat-dev msg_ok "Installed Dependencies" +msg_info "Building Guacamole Server (guacd)" +fetch_and_deploy_gh_tag "guacd" "apache/guacamole-server" "latest" "/opt/guacamole-server" +cd /opt/guacamole-server +export CPPFLAGS="-Wno-error=deprecated-declarations" +$STD autoreconf -fi +$STD ./configure --with-init-dir=/etc/init.d --enable-allow-freerdp-snapshots +$STD make +$STD make install +$STD ldconfig +cd /opt +rm -rf /opt/guacamole-server +msg_ok "Built Guacamole Server (guacd)" + NODE_VERSION="22" setup_nodejs -fetch_and_deploy_gh_release "termix" "Termix-SSH/Termix" +fetch_and_deploy_gh_release "termix" "Termix-SSH/Termix" "tarball" msg_info "Building Frontend" cd /opt/termix @@ -68,23 +100,60 @@ sed -i 's|/app/html|/opt/termix/html|g' /etc/nginx/nginx.conf sed -i 's|/app/nginx|/opt/termix/nginx|g' /etc/nginx/nginx.conf sed -i 's|listen ${PORT};|listen 80;|g' /etc/nginx/nginx.conf +mkdir -p /tmp/nginx +echo "d /tmp/nginx 0755 nobody nobody -" > /etc/tmpfiles.d/nginx-termix.conf +mkdir -p /etc/systemd/system/nginx.service.d/ +cat > /etc/systemd/system/nginx.service.d/pidfile.conf << EOF +[Service] +PIDFile=/tmp/nginx/nginx.pid +EOF +systemctl daemon-reload rm -f /etc/nginx/sites-enabled/default nginx -t systemctl reload nginx msg_ok "Configured Nginx" msg_info "Creating Service" +mkdir -p /etc/guacamole +cat </etc/guacamole/guacd.conf +[server] +bind_host = 127.0.0.1 +bind_port = 4822 +EOF + +cat </opt/termix/.env +NODE_ENV=production +DATA_DIR=/opt/termix/data +GUACD_HOST=127.0.0.1 +GUACD_PORT=4822 +EOF + +cat </etc/systemd/system/guacd.service +[Unit] +Description=Guacamole Proxy Daemon (guacd) +After=network.target + +[Service] +Type=simple +ExecStart=/usr/local/sbin/guacd -f -b 127.0.0.1 -l 4822 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF + cat </etc/systemd/system/termix.service [Unit] Description=Termix Backend -After=network.target +After=network.target guacd.service +Wants=guacd.service [Service] Type=simple User=root WorkingDirectory=/opt/termix -Environment=NODE_ENV=production -Environment=DATA_DIR=/opt/termix/data +EnvironmentFile=/opt/termix/.env ExecStart=/usr/bin/node /opt/termix/dist/backend/backend/starter.js Restart=on-failure RestartSec=5 @@ -92,7 +161,7 @@ RestartSec=5 [Install] WantedBy=multi-user.target EOF -systemctl enable -q --now termix +systemctl enable -q --now guacd termix msg_ok "Created Service" motd_ssh diff --git a/install/the-lounge-install.sh b/install/the-lounge-install.sh index 25ae88f52..d23aa3a9c 100644 --- a/install/the-lounge-install.sh +++ b/install/the-lounge-install.sh @@ -14,6 +14,7 @@ network_check update_os fetch_and_deploy_gh_release "thelounge" "thelounge/thelounge-deb" "binary" +systemctl enable -q --now thelounge motd_ssh customize diff --git a/install/threadfin-install.sh b/install/threadfin-install.sh index b834f6bed..46fd7d5c4 100644 --- a/install/threadfin-install.sh +++ b/install/threadfin-install.sh @@ -20,9 +20,8 @@ $STD apt install -y \ vlc msg_ok "Installed Dependencies" -fetch_and_deploy_gh_release "threadfin" "threadfin/threadfin" "singlefile" "latest" "/opt/threadfin" "Threadfin_linux_amd64" -mv /root/.threadfin /root/.threadfin_version -mkdir -p /root/.threadfin +fetch_and_deploy_gh_release "threadfin-app" "threadfin/threadfin" "singlefile" "latest" "/opt/threadfin" "Threadfin_linux_amd64" +mv /opt/threadfin/threadfin-app /opt/threadfin/threadfin msg_info "Creating Service" cat </etc/systemd/system/threadfin.service diff --git a/install/tracearr-install.sh b/install/tracearr-install.sh index 3ebab3338..f714fb652 100644 --- a/install/tracearr-install.sh +++ b/install/tracearr-install.sh @@ -62,6 +62,7 @@ fetch_and_deploy_gh_release "tracearr" "connorgallopo/Tracearr" "tarball" "lates msg_info "Building Tracearr" export TZ=$(cat /etc/timezone) +export NODE_OPTIONS="--max-old-space-size=4096" cd /opt/tracearr.build $STD pnpm install --frozen-lockfile --force $STD pnpm turbo telemetry disable diff --git a/install/transmute-install.sh b/install/transmute-install.sh new file mode 100644 index 000000000..a3b83d72d --- /dev/null +++ b/install/transmute-install.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/transmute-app/transmute + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +UV_PYTHON="3.13" setup_uv +NODE_VERSION="25" setup_nodejs +setup_ffmpeg +setup_gs + +msg_info "Installing Dependencies" +$STD apt install -y \ + inkscape \ + tesseract-ocr \ + libreoffice-impress \ + libreoffice-common \ + libmagic1 \ + xvfb \ + libsm6 \ + libxext6 \ + libpango-1.0-0 \ + libopengl0 \ + libpangocairo-1.0-0 \ + libgdk-pixbuf-2.0-0 \ + libffi-dev \ + libcairo2 \ + librsvg2-bin \ + unrar-free \ + python3-numpy \ + python3-lxml \ + python3-tinycss2 \ + python3-cssselect +msg_ok "Installed Dependencies" + +fetch_and_deploy_gh_release "pandoc" "jgm/pandoc" "binary" "latest" "" "pandoc-*-amd64.deb" +fetch_and_deploy_gh_release "calibre" "kovidgoyal/calibre" "prebuild" "latest" "/opt/calibre" "calibre-*-x86_64.txz" +ln -sf /opt/calibre/ebook-convert /usr/bin/ebook-convert +ln -sf /usr/local/bin/ffmpeg /usr/bin/ffmpeg +fetch_and_deploy_gh_release "drawio" "jgraph/drawio-desktop" "binary" "latest" "" "drawio-amd64-*.deb" +fetch_and_deploy_gh_release "transmute" "transmute-app/transmute" "tarball" + +msg_info "Setting up Python Backend" +cd /opt/transmute +$STD uv venv --clear /opt/transmute/.venv +$STD uv pip install --python /opt/transmute/.venv/bin/python -r requirements.txt +ln -sf /opt/transmute/.venv/bin/weasyprint /usr/bin/weasyprint +msg_ok "Set up Python Backend" + +msg_info "Configuring Transmute" +SECRET_KEY=$(openssl rand -hex 64) +cat </opt/transmute/backend/.env +AUTH_SECRET_KEY=${SECRET_KEY} +HOST=0.0.0.0 +PORT=3313 +DATA_DIR=/opt/transmute/data +WEB_DIR=/opt/transmute/frontend/dist +QT_QPA_PLATFORM=offscreen +EOF +mkdir -p /opt/transmute/data +msg_ok "Configured Transmute" + +msg_info "Building Frontend" +cd /opt/transmute/frontend +$STD npm ci +$STD npm run build +msg_ok "Built Frontend" + +msg_info "Creating Service" +cat </etc/systemd/system/transmute.service +[Unit] +Description=Transmute File Converter +After=network.target + +[Service] +Type=simple +WorkingDirectory=/opt/transmute +EnvironmentFile=/opt/transmute/backend/.env +ExecStart=/usr/bin/xvfb-run -a -s "-screen 0 1024x768x24 -nolisten tcp" /opt/transmute/.venv/bin/python backend/main.py +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now transmute +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/trek-install.sh b/install/trek-install.sh new file mode 100644 index 000000000..535597f8a --- /dev/null +++ b/install/trek-install.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/mauriceboe/TREK + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y build-essential +msg_ok "Installed Dependencies" + +NODE_VERSION="22" setup_nodejs +fetch_and_deploy_gh_release "trek" "mauriceboe/TREK" "tarball" + +msg_info "Building Client" +cd /opt/trek/client +$STD npm ci +$STD npm run build +msg_ok "Built Client" + +msg_info "Setting up Server" +cd /opt/trek/server +$STD npm ci +mkdir -p /opt/trek/server/public +cp -r /opt/trek/client/dist/* /opt/trek/server/public/ +cp -r /opt/trek/client/public/fonts /opt/trek/server/public/fonts 2>/dev/null || true +mkdir -p /opt/trek/{data/logs,uploads/{files,covers,avatars,photos}} +rm -rf /opt/trek/server/data /opt/trek/server/uploads +ln -s /opt/trek/data /opt/trek/server/data +ln -s /opt/trek/uploads /opt/trek/server/uploads +ENCRYPTION_KEY=$(openssl rand -hex 32) +ADMIN_EMAIL="admin@trek.local" +ADMIN_PASSWORD=$(openssl rand -base64 18 | tr -dc 'A-Za-z0-9' | head -c 16) +cat </opt/trek/server/.env +NODE_ENV=production +PORT=3000 +ENCRYPTION_KEY=${ENCRYPTION_KEY} +ADMIN_EMAIL=${ADMIN_EMAIL} +ADMIN_PASSWORD=${ADMIN_PASSWORD} +COOKIE_SECURE=false +FORCE_HTTPS=false +LOG_LEVEL=info +TZ=UTC +EOF +chmod 600 /opt/trek/server/.env +msg_ok "Set up Server" + +msg_info "Creating Service" +cat </etc/systemd/system/trek.service +[Unit] +Description=TREK Travel Planner +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/trek/server +EnvironmentFile=/opt/trek/server/.env +ExecStart=/usr/bin/node --import tsx src/index.ts +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now trek +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/tubearchivist-install.sh b/install/tubearchivist-install.sh new file mode 100644 index 000000000..8afe94377 --- /dev/null +++ b/install/tubearchivist-install.sh @@ -0,0 +1,294 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/tubearchivist/tubearchivist + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + build-essential \ + git \ + nginx \ + redis-server \ + atomicparsley \ + python3-dev \ + libldap2-dev \ + libsasl2-dev \ + libssl-dev \ + sqlite3 \ + ffmpeg +msg_ok "Installed Dependencies" + +UV_PYTHON="3.13" setup_uv +NODE_VERSION="24" setup_nodejs + +fetch_and_deploy_gh_release "deno" "denoland/deno" "prebuild" "latest" "/usr/local/bin" "deno-x86_64-unknown-linux-gnu.zip" + +msg_info "Installing ElasticSearch" +setup_deb822_repo \ + "elastic-8.x" \ + "https://artifacts.elastic.co/GPG-KEY-elasticsearch" \ + "https://artifacts.elastic.co/packages/8.x/apt" \ + "stable" \ + "main" +ES_JAVA_OPTS="-Xms1g -Xmx1g" $STD apt install -y elasticsearch +msg_ok "Installed ElasticSearch" + +msg_info "Configuring ElasticSearch" +cat </etc/elasticsearch/elasticsearch.yml +cluster.name: tubearchivist +path.data: /var/lib/elasticsearch +path.logs: /var/log/elasticsearch +path.repo: ["/var/lib/elasticsearch/snapshot"] +network.host: 127.0.0.1 +xpack.security.enabled: false +xpack.security.transport.ssl.enabled: false +xpack.security.http.ssl.enabled: false +EOF +mkdir -p /var/lib/elasticsearch/snapshot +chown -R elasticsearch:elasticsearch /var/lib/elasticsearch/snapshot +cat </etc/elasticsearch/jvm.options.d/heap.options +-Xms1g +-Xmx1g +EOF +sysctl -w vm.max_map_count=262144 2>/dev/null || true +cat </etc/sysctl.d/99-elasticsearch.conf +vm.max_map_count=262144 +EOF +systemctl enable -q --now elasticsearch +msg_ok "Configured ElasticSearch" + +fetch_and_deploy_gh_release "tubearchivist" "tubearchivist/tubearchivist" "tarball" + +msg_info "Building Frontend" +cd /opt/tubearchivist/frontend +$STD npm install +$STD npm run build:deploy +mkdir -p /opt/tubearchivist/backend/static +cp -r /opt/tubearchivist/frontend/dist/* /opt/tubearchivist/backend/static/ +msg_ok "Built Frontend" + +msg_info "Setting up Tube Archivist" +cp /opt/tubearchivist/docker_assets/backend_start.py /opt/tubearchivist/backend/ +$STD uv venv /opt/tubearchivist/.venv +$STD uv pip install --python /opt/tubearchivist/.venv/bin/python -r /opt/tubearchivist/backend/requirements.txt +if [[ -f /opt/tubearchivist/backend/requirements.plugins.txt ]]; then + mkdir -p /opt/yt_plugins/bgutil + $STD uv pip install --python /opt/tubearchivist/.venv/bin/python --target /opt/yt_plugins/bgutil -r /opt/tubearchivist/backend/requirements.plugins.txt +fi +TA_PASSWORD=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) +ES_PASSWORD=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13) +mkdir -p /opt/tubearchivist/{cache,media} +ln -sf /opt/tubearchivist/cache /cache +ln -sf /opt/tubearchivist/media /youtube +cat </opt/tubearchivist/.env +TA_HOST=http://${LOCAL_IP}:8000 +TA_USERNAME=admin +TA_PASSWORD=${TA_PASSWORD} +TA_BACKEND_PORT=8080 +TA_APP_DIR=/opt/tubearchivist/backend +TA_CACHE_DIR=/cache +TA_MEDIA_DIR=/youtube +ES_SNAPSHOT_DIR=/var/lib/elasticsearch/snapshot +ELASTIC_PASSWORD=${ES_PASSWORD} +REDIS_CON=redis://localhost:6379 +ES_URL=http://localhost:9200 +TZ=UTC +PYTHONUNBUFFERED=1 +YTDLP_PLUGIN_DIRS=/opt/yt_plugins +EOF +{ + echo "Tube Archivist Credentials" + echo "==========================" + echo "Username: admin" + echo "Password: ${TA_PASSWORD}" + echo "Elasticsearch Password: ${ES_PASSWORD}" +} >~/tubearchivist.creds +systemctl enable -q --now redis-server +msg_ok "Set up Tube Archivist" + +msg_info "Configuring Nginx" +sed -i 's/^user www-data;$/user root;/' /etc/nginx/nginx.conf +cat <<'EOF' >/etc/nginx/sites-available/default +server { + listen 8000; + + location = /_auth { + internal; + proxy_pass http://localhost:8080/api/ping/; + proxy_pass_request_body off; + proxy_set_header Content-Length ""; + proxy_set_header Host $http_host; + proxy_set_header Cookie $http_cookie; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /cache/videos/ { + auth_request /_auth; + alias /cache/videos/; + } + + location /cache/channels/ { + auth_request /_auth; + alias /cache/channels/; + } + + location /cache/playlists/ { + auth_request /_auth; + alias /cache/playlists/; + } + + location /media/ { + auth_request /_auth; + alias /youtube/; + types { + text/vtt vtt; + } + } + + location /youtube/ { + auth_request /_auth; + alias /youtube/; + types { + video/mp4 mp4; + } + } + + location /api { + include proxy_params; + proxy_pass http://localhost:8080; + } + + location /admin { + include proxy_params; + proxy_pass http://localhost:8080; + } + + location /static/ { + alias /opt/tubearchivist/backend/staticfiles/; + } + + root /opt/tubearchivist/backend/static; + index index.html; + + location ~* ^/(?!static/|cache/).*\.(?:css|js|png|jpg|jpeg|gif|ico|svg|woff2?)$ { + try_files $uri $uri/ /index.html =404; + } + + location = /index.html { + add_header Cache-Control "no-store, no-cache, must-revalidate"; + add_header Pragma "no-cache"; + expires 0; + } + + location / { + add_header Cache-Control "no-store, no-cache, must-revalidate"; + add_header Pragma "no-cache"; + expires 0; + try_files $uri $uri/ /index.html =404; + } +} +EOF +systemctl enable -q nginx +systemctl restart nginx +msg_ok "Configured Nginx" + +msg_info "Creating Services" +cat <<'RUNEOF' >/opt/tubearchivist/backend/run.sh +#!/bin/bash +set -e +cd /opt/tubearchivist/backend +set -a +source /opt/tubearchivist/.env +set +a +PYTHON=/opt/tubearchivist/.venv/bin/python + +echo "Waiting for ElasticSearch..." +for i in $(seq 1 30); do + if curl -sf http://localhost:9200/_cluster/health >/dev/null 2>&1; then + break + fi + sleep 2 +done + +$PYTHON manage.py migrate +$PYTHON manage.py collectstatic --noinput -c +$PYTHON manage.py ta_envcheck +$PYTHON manage.py ta_connection +$PYTHON manage.py ta_startup + +exec $PYTHON backend_start.py +RUNEOF +chmod +x /opt/tubearchivist/backend/run.sh +ln -sf /opt/tubearchivist/.env /opt/tubearchivist/backend/.env +cat </etc/systemd/system/tubearchivist.service +[Unit] +Description=Tube Archivist Backend +After=network.target elasticsearch.service redis-server.service + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/tubearchivist/backend +EnvironmentFile=/opt/tubearchivist/.env +Environment=PATH=/opt/tubearchivist/.venv/bin:/usr/local/bin:/usr/bin:/bin +ExecStart=/opt/tubearchivist/backend/run.sh +Restart=on-failure +RestartSec=10 + +[Install] +WantedBy=multi-user.target +EOF +cat </etc/systemd/system/tubearchivist-celery.service +[Unit] +Description=Tube Archivist Celery Worker +After=tubearchivist.service redis-server.service elasticsearch.service + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/tubearchivist/backend +EnvironmentFile=/opt/tubearchivist/.env +Environment=PATH=/opt/tubearchivist/.venv/bin:/usr/local/bin:/usr/bin:/bin +ExecStart=/opt/tubearchivist/.venv/bin/celery -A task worker --loglevel=error --concurrency=4 --max-tasks-per-child=5 --max-memory-per-child=150000 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +cat </etc/systemd/system/tubearchivist-beat.service +[Unit] +Description=Tube Archivist Celery Beat +After=tubearchivist.service redis-server.service + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/tubearchivist/backend +EnvironmentFile=/opt/tubearchivist/.env +Environment=PATH=/opt/tubearchivist/.venv/bin:/usr/local/bin:/usr/bin:/bin +ExecStartPre=/bin/bash -c 'for i in \$(seq 1 60); do sqlite3 /cache/db.sqlite3 "SELECT 1 FROM django_celery_beat_crontabschedule LIMIT 1" 2>/dev/null && exit 0; sleep 2; done; exit 1' +ExecStart=/opt/tubearchivist/.venv/bin/celery -A task beat --loglevel=error --scheduler django_celery_beat.schedulers:DatabaseScheduler +Restart=always +RestartSec=5 +RuntimeMaxSec=3600 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now tubearchivist tubearchivist-celery tubearchivist-beat +msg_ok "Created Services" + +motd_ssh +customize +cleanup_lxc diff --git a/install/typesense-install.sh b/install/typesense-install.sh deleted file mode 100644 index 99da42eb1..000000000 --- a/install/typesense-install.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2021-2026 community-scripts ORG -# Author: tlissak -# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://typesense.org/ - -source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" -color -verb_ip6 -catch_errors -setting_up_container -network_check -update_os - -msg_info "Installing TypeSense" -RELEASE=$(curl -fsSL https://api.github.com/repos/typesense/typesense/releases/latest | grep "tag_name" | awk '{print substr($2, 3, length($2)-4) }') -cd /opt -curl -fsSL "https://dl.typesense.org/releases/${RELEASE}/typesense-server-${RELEASE}-amd64.deb" -o "/opt/typesense-server-${RELEASE}-amd64.deb" -$STD apt install -y /opt/typesense-server-${RELEASE}-amd64.deb -echo 'enable-cors = true' >>/etc/typesense/typesense-server.ini -rm -rf /opt/typesense-server-${RELEASE}-amd64.deb -echo "${RELEASE}" >"/opt/${APPLICATION}_version.txt" -msg_ok "Installed TypeSense" - -motd_ssh -customize -cleanup_lxc diff --git a/install/uhf-install.sh b/install/uhf-install.sh index c1fa1cf0b..3f66b1fec 100644 --- a/install/uhf-install.sh +++ b/install/uhf-install.sh @@ -15,7 +15,7 @@ update_os setup_hwaccel msg_info "Installing Dependencies" -$STD apt install -y ffmpeg +setup_ffmpeg msg_ok "Installed Dependencies" msg_info "Setting Up UHF Server Environment" diff --git a/install/versitygw-install.sh b/install/versitygw-install.sh new file mode 100644 index 000000000..25ef43977 --- /dev/null +++ b/install/versitygw-install.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/versity/versitygw + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +fetch_and_deploy_gh_release "versitygw" "versity/versitygw" "binary" + +WEBUI_CONF="" +read -rp "Would you like to enable the VersityGW WebGUI (Beta)? (y/N): " webui_prompt +if [[ "${webui_prompt,,}" =~ ^(y|yes)$ ]]; then + WEBUI_CONF="\nVGW_WEBUI_PORT=:7071\nVGW_WEBUI_NO_TLS=true" + msg_ok "WebGUI will be enabled on port 7071" +fi + +msg_info "Configuring VersityGW" +mkdir -p /opt/versitygw-data +ACCESS_KEY=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | cut -c1-20) +SECRET_KEY=$(openssl rand -base64 36 | tr -dc 'a-zA-Z0-9' | cut -c1-40) + +cat </etc/versitygw.d/gateway.conf +VGW_BACKEND=posix +VGW_BACKEND_ARG=/opt/versitygw-data +VGW_PORT=:7070 +ROOT_ACCESS_KEY_ID=${ACCESS_KEY} +ROOT_SECRET_ACCESS_KEY=${SECRET_KEY} +EOF + +if [[ -n "$WEBUI_CONF" ]]; then + echo -e "$WEBUI_CONF" >>/etc/versitygw.d/gateway.conf +fi +msg_ok "Configured VersityGW" + +msg_info "Enabling Service" +systemctl enable -q --now versitygw@gateway +msg_ok "Enabled Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/victoriametrics-install.sh b/install/victoriametrics-install.sh index 95cc94472..7d5b174d6 100644 --- a/install/victoriametrics-install.sh +++ b/install/victoriametrics-install.sh @@ -14,16 +14,16 @@ network_check update_os msg_info "Getting latest version of VictoriaMetrics" -victoriametrics_filename=$(curl -fsSL "https://api.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases/latest" | - jq -r '.assets[].name' | - grep -E '^victoria-metrics-linux-amd64-v[0-9.]+\.tar\.gz$') -vmutils_filename=$(curl -fsSL "https://api.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases/latest" | - jq -r '.assets[].name' | - grep -E '^vmutils-linux-amd64-v[0-9.]+\.tar\.gz$') -msg_ok "Got latest version of VictoriaMetrics" -fetch_and_deploy_gh_release "victoriametrics" "VictoriaMetrics/VictoriaMetrics" "prebuild" "latest" "/opt/victoriametrics" "$victoriametrics_filename" -fetch_and_deploy_gh_release "vmutils" "VictoriaMetrics/VictoriaMetrics" "prebuild" "latest" "/opt/victoriametrics" "$vmutils_filename" +victoriametrics_release=$(curl -fsSL "https://api.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases" | + jq -r '.[] | select(.assets[].name | match("^victoria-metrics-linux-amd64-v[0-9.]+.tar.gz$")) | .tag_name' | + head -n 1) +victoriametrics_filename="victoria-metrics-linux-amd64-${victoriametrics_release}.tar.gz" +vmutils_filename="vmutils-linux-amd64-${victoriametrics_release}.tar.gz" +msg_ok "Got version $victoriametrics_release of VictoriaMetrics" + +fetch_and_deploy_gh_release "victoriametrics" "VictoriaMetrics/VictoriaMetrics" "prebuild" "$victoriametrics_release" "/opt/victoriametrics" "$victoriametrics_filename" +fetch_and_deploy_gh_release "vmutils" "VictoriaMetrics/VictoriaMetrics" "prebuild" "$victoriametrics_release" "/opt/victoriametrics" "$vmutils_filename" read -r -p "${TAB3}Would you like to add VictoriaLogs? " prompt diff --git a/install/wanderer-install.sh b/install/wanderer-install.sh index 4c6394673..1f8c3314d 100644 --- a/install/wanderer-install.sh +++ b/install/wanderer-install.sh @@ -60,6 +60,16 @@ wait -n EOF chmod +x /opt/wanderer/start.sh +cat <<'EOF' >/usr/local/bin/wanderer-pb +#!/usr/bin/env bash +set -a +source /opt/wanderer/.env +set +a +cd /opt/wanderer/source/db +exec ./pocketbase "$@" --dir="$PB_DB_LOCATION" +EOF +chmod +x /usr/local/bin/wanderer-pb + cat </etc/systemd/system/wanderer-web.service [Unit] Description=wanderer diff --git a/install/wealthfolio-install.sh b/install/wealthfolio-install.sh index 84a633466..aa8e89184 100644 --- a/install/wealthfolio-install.sh +++ b/install/wealthfolio-install.sh @@ -23,8 +23,8 @@ $STD apt install -y \ msg_ok "Installed Dependencies" setup_rust -NODE_VERSION="20" NODE_MODULE="pnpm" setup_nodejs -fetch_and_deploy_gh_release "wealthfolio" "afadil/wealthfolio" "tarball" "v3.0.3" +NODE_VERSION="24" NODE_MODULE="pnpm" setup_nodejs +fetch_and_deploy_gh_release "wealthfolio" "afadil/wealthfolio" "tarball" msg_info "Building Frontend (patience)" cd /opt/wealthfolio diff --git a/install/web-check-install.sh b/install/web-check-install.sh index 751992e51..20f08c3dd 100644 --- a/install/web-check-install.sh +++ b/install/web-check-install.sh @@ -18,13 +18,10 @@ export DEBIAN_FRONTEND=noninteractive $STD apt -y install --no-install-recommends \ git \ traceroute \ - make \ - g++ \ - traceroute \ + build-essential \ xvfb \ dbus \ xorg \ - xvfb \ gtk2-engines-pixbuf \ dbus-x11 \ xfonts-base \ @@ -43,16 +40,13 @@ rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED msg_ok "Setup Python3" msg_info "Installing Chromium" -curl -fsSL https://dl-ssl.google.com/linux/linux_signing_key.pub | gpg --dearmor -o /usr/share/keyrings/google-chrome-keyring.gpg -cat </dev/null -Types: deb -URIs: http://dl.google.com/linux/chrome/deb/ -Suites: stable -Components: main -Architectures: amd64 -Signed-By: /usr/share/keyrings/google-chrome-keyring.gpg -EOF -$STD apt update +setup_deb822_repo \ + "google-chrome" \ + "https://dl-ssl.google.com/linux/linux_signing_key.pub" \ + "http://dl.google.com/linux/chrome/deb/" \ + "stable" \ + "main" \ + "amd64" $STD apt -y install \ chromium \ libxss1 \ @@ -64,13 +58,14 @@ msg_info "Setting up Chromium" chmod 755 /usr/bin/chromium msg_ok "Setup Chromium" -fetch_and_deploy_gh_release "web-check" "CrazyWolf13/web-check" "tarball" +fetch_and_deploy_gh_release "web-check" "Lissy93/web-check" "tarball" msg_info "Installing Web-Check (Patience)" cd /opt/web-check cat <<'EOF' >/opt/web-check/.env CHROME_PATH=/usr/bin/chromium PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium +PUPPETEER_SKIP_DOWNLOAD='true' HEADLESS=true GOOGLE_CLOUD_API_KEY='' REACT_APP_SHODAN_API_KEY='' diff --git a/install/whodb-install.sh b/install/whodb-install.sh new file mode 100644 index 000000000..78e7ef802 --- /dev/null +++ b/install/whodb-install.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://whodb.com/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +fetch_and_deploy_gh_release "whodb" "clidey/whodb" "singlefile" "latest" "/opt/whodb" "whodb-*-linux-amd64" + +msg_info "Creating Service" +cat </etc/systemd/system/whodb.service +[Unit] +Description=WhoDB Database Management +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=/opt/whodb +ExecStart=/opt/whodb/whodb +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +systemctl enable -q --now whodb +msg_ok "Created Service" + +motd_ssh +customize +cleanup_lxc diff --git a/install/wishlist-install.sh b/install/wishlist-install.sh index f1821e0c7..841f6d2b0 100644 --- a/install/wishlist-install.sh +++ b/install/wishlist-install.sh @@ -20,7 +20,7 @@ $STD apt install -y \ caddy msg_ok "Installed dependencies" -NODE_VERSION="24" NODE_MODULE="pnpm" setup_nodejs +NODE_VERSION="24" NODE_MODULE="pnpm@10" setup_nodejs fetch_and_deploy_gh_release "wishlist" "cmintey/wishlist" "tarball" LATEST_APP_VERSION=$(get_latest_github_release "cmintey/wishlist") @@ -30,7 +30,7 @@ cp .env.example .env sed -i "s|^ORIGIN=.*|ORIGIN=http://${LOCAL_IP}:3280|" /opt/wishlist/.env echo "" >>/opt/wishlist/.env echo "NODE_ENV=production" >>/opt/wishlist/.env -$STD pnpm install +$STD pnpm install --frozen-lockfile $STD pnpm svelte-kit sync $STD pnpm prisma generate sed -i 's|/usr/src/app/|/opt/wishlist/|g' $(grep -rl '/usr/src/app/' /opt/wishlist) diff --git a/install/yamtrack-install.sh b/install/yamtrack-install.sh new file mode 100644 index 000000000..3673b0474 --- /dev/null +++ b/install/yamtrack-install.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/FuzzyGrim/Yamtrack + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y \ + nginx \ + redis-server +msg_ok "Installed Dependencies" + +PG_VERSION="16" setup_postgresql +PG_DB_NAME="yamtrack" PG_DB_USER="yamtrack" setup_postgresql_db +PYTHON_VERSION="3.12" setup_uv + +fetch_and_deploy_gh_release "yamtrack" "FuzzyGrim/Yamtrack" "tarball" + +msg_info "Installing Python Dependencies" +cd /opt/yamtrack +$STD uv venv .venv +$STD uv pip install --no-cache-dir -r requirements.txt +msg_ok "Installed Python Dependencies" + +msg_info "Configuring Yamtrack" +SECRET=$(openssl rand -hex 32) +cat </opt/yamtrack/src/.env +SECRET=${SECRET} +DB_HOST=localhost +DB_NAME=${PG_DB_NAME} +DB_USER=${PG_DB_USER} +DB_PASSWORD=${PG_DB_PASS} +DB_PORT=5432 +REDIS_URL=redis://localhost:6379 +URLS=http://${LOCAL_IP}:8000 +EOF + +cd /opt/yamtrack/src +$STD /opt/yamtrack/.venv/bin/python manage.py migrate +$STD /opt/yamtrack/.venv/bin/python manage.py collectstatic --noinput +msg_ok "Configured Yamtrack" + +msg_info "Configuring Nginx" +rm -f /etc/nginx/sites-enabled/default /etc/nginx/sites-available/default +cp /opt/yamtrack/nginx.conf /etc/nginx/nginx.conf +sed -i 's|user abc;|user www-data;|' /etc/nginx/nginx.conf +sed -i 's|pid /tmp/nginx.pid;|pid /run/nginx.pid;|' /etc/nginx/nginx.conf +sed -i 's|/yamtrack/staticfiles/|/opt/yamtrack/src/staticfiles/|' /etc/nginx/nginx.conf +sed -i 's|error_log /dev/stderr|error_log /var/log/nginx/error.log|' /etc/nginx/nginx.conf +sed -i 's|access_log /dev/stdout|access_log /var/log/nginx/access.log|' /etc/nginx/nginx.conf +$STD nginx -t +systemctl enable -q nginx +$STD systemctl restart nginx +msg_ok "Configured Nginx" + +msg_info "Creating Services" +cat </etc/systemd/system/yamtrack.service +[Unit] +Description=Yamtrack Gunicorn +After=network.target postgresql.service redis-server.service +Requires=postgresql.service redis-server.service + +[Service] +Type=simple +WorkingDirectory=/opt/yamtrack/src +ExecStart=/opt/yamtrack/.venv/bin/gunicorn config.wsgi:application -b 127.0.0.1:8001 -w 2 --timeout 120 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF + +cat </etc/systemd/system/yamtrack-celery.service +[Unit] +Description=Yamtrack Celery Worker +After=network.target postgresql.service redis-server.service yamtrack.service +Requires=postgresql.service redis-server.service + +[Service] +Type=simple +WorkingDirectory=/opt/yamtrack/src +ExecStart=/opt/yamtrack/.venv/bin/celery -A config worker --beat --scheduler django --loglevel INFO +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF + +systemctl enable -q --now redis-server yamtrack yamtrack-celery +msg_ok "Created Services" + +motd_ssh +customize +cleanup_lxc diff --git a/install/yourls-install.sh b/install/yourls-install.sh new file mode 100644 index 000000000..df502a6b7 --- /dev/null +++ b/install/yourls-install.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://yourls.org/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +msg_info "Installing Dependencies" +$STD apt install -y nginx +msg_ok "Installed Dependencies" + +setup_mariadb +MARIADB_DB_NAME="yourls" MARIADB_DB_USER="yourls" setup_mariadb_db +PHP_VERSION="8.3" PHP_FPM="YES" PHP_MODULE="mysql,mbstring,gd,xml,curl" setup_php + +fetch_and_deploy_gh_release "yourls" "YOURLS/YOURLS" "tarball" + +msg_info "Configuring YOURLS" +COOKIEKEY=$(openssl rand -hex 24) +YOURLS_PASS=$(openssl rand -base64 12 | tr -dc 'a-zA-Z0-9' | cut -c1-16) +cat </opt/yourls/user/config.php + '${YOURLS_PASS}', +]; +define( 'YOURLS_URL_CONVERT', 36 ); +define( 'YOURLS_DEBUG', false ); +EOF +chown -R www-data:www-data /opt/yourls +msg_ok "Configured YOURLS" + +msg_info "Configuring Nginx" +cat </etc/nginx/sites-available/yourls +server { + listen 80 default_server; + server_name _; + root /opt/yourls; + index index.php; + + location / { + try_files \$uri \$uri/ /yourls-loader.php\$is_args\$args; + } + + location ~ \.php\$ { + try_files \$uri =404; + fastcgi_split_path_info ^(.+\.php)(/.+)\$; + fastcgi_pass unix:/run/php/php8.3-fpm.sock; + fastcgi_index index.php; + include fastcgi_params; + fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name; + fastcgi_param PATH_INFO \$fastcgi_path_info; + } + + location ~* \.(jpg|jpeg|gif|css|png|js|ico|woff|woff2)\$ { + access_log off; + expires max; + } + + location ~ /\.ht { + deny all; + } +} +EOF +ln -sf /etc/nginx/sites-available/yourls /etc/nginx/sites-enabled/yourls +rm -f /etc/nginx/sites-enabled/default +$STD nginx -t +systemctl enable -q --now nginx +systemctl reload nginx +msg_ok "Configured Nginx" + +motd_ssh +customize +cleanup_lxc diff --git a/install/zerobyte-install.sh b/install/zerobyte-install.sh index 80ec20ef3..4d43d5e50 100644 --- a/install/zerobyte-install.sh +++ b/install/zerobyte-install.sh @@ -18,6 +18,7 @@ echo "davfs2 davfs2/suid_file boolean false" | debconf-set-selections $STD apt-get install -y \ bzip2 \ fuse3 \ + git \ sshfs \ davfs2 \ openssh-client diff --git a/misc/alpine-install.func b/misc/alpine-install.func index 23af25128..c54a7d598 100644 --- a/misc/alpine-install.func +++ b/misc/alpine-install.func @@ -90,11 +90,18 @@ setting_up_container() { network_check() { set +e trap - ERR + ipv4_connected=false + + # Check IPv4 connectivity to Cloudflare, Google & Quad9 DNS servers if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then - ipv4_status="${GN}✔${CL} IPv4" + msg_ok "IPv4 Internet Connected" + ipv4_connected=true else - ipv4_status="${RD}✖${CL} IPv4" - read -r -p "Internet NOT connected. Continue anyway? " prompt + msg_error "IPv4 Internet Not Connected" + fi + + if [[ $ipv4_connected == false ]]; then + read -r -p "No Internet detected, would you like to continue anyway? " prompt if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then echo -e "${INFO}${RD}Expect Issues Without Internet${CL}" else @@ -102,20 +109,60 @@ network_check() { exit 122 fi fi - RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }') - if [[ -z "$RESOLVEDIP" ]]; then - msg_error "Internet: ${ipv4_status} DNS Failed" + + # DNS resolution checks for GitHub-related domains + GIT_HOSTS=("github.com" "raw.githubusercontent.com" "api.github.com" "git.community-scripts.org") + GIT_STATUS="Git DNS:" + DNS_FAILED=false + + for HOST in "${GIT_HOSTS[@]}"; do + RESOLVEDIP=$(getent hosts "$HOST" | awk '{ print $1 }' | grep -E '(^([0-9]{1,3}\.){3}[0-9]{1,3}$)|(^[a-fA-F0-9:]+$)' | head -n1) + if [[ -z "$RESOLVEDIP" ]]; then + GIT_STATUS+="$HOST:($DNSFAIL)" + DNS_FAILED=true + else + GIT_STATUS+=" $HOST:($DNSOK)" + fi + done + + if [[ "$DNS_FAILED" == true ]]; then + fatal "$GIT_STATUS" else - msg_ok "Internet: ${ipv4_status} DNS: ${BL}${RESOLVEDIP}${CL}" + msg_ok "$GIT_STATUS" fi + set -e trap 'error_handler $LINENO "$BASH_COMMAND"' ERR } -# This function updates the Container OS by running apt-get update and upgrade +# This function updates the Container OS by running apk upgrade with mirror fallback update_os() { msg_info "Updating Container OS" - $STD apk -U upgrade + if ! $STD apk -U upgrade; then + msg_warn "apk update failed (dl-cdn.alpinelinux.org), trying alternate mirrors..." + local alpine_mirrors="mirror.init7.net ftp.halifax.rwth-aachen.de mirrors.edge.kernel.org alpine.mirror.wearetriple.com mirror.leaseweb.com uk.alpinelinux.org dl-2.alpinelinux.org dl-4.alpinelinux.org" + local apk_ok=false + for m in $(printf '%s\n' $alpine_mirrors | shuf); do + if timeout 2 bash -c "echo >/dev/tcp/$m/80" 2>/dev/null; then + msg_custom "${INFO}" "${YW}" "Attempting mirror: ${m}" + cat </etc/apk/repositories +http://$m/alpine/latest-stable/main +http://$m/alpine/latest-stable/community +EOF + if $STD apk -U upgrade; then + msg_ok "CDN set to ${m}: tests passed" + apk_ok=true + break + else + msg_warn "Mirror ${m} failed" + fi + fi + done + if [[ "$apk_ok" != true ]]; then + msg_error "All Alpine mirrors failed. Check network or try again later." + exit 1 + fi + fi local tools_content tools_content=$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) || { msg_error "Failed to download tools.func" diff --git a/misc/alpine-tools.func b/misc/alpine-tools.func index 958c30680..f5ba60368 100644 --- a/misc/alpine-tools.func +++ b/misc/alpine-tools.func @@ -20,7 +20,7 @@ need_tool() { msg_info "Installing tools: $*" apk add --no-cache "$@" >/dev/null 2>&1 || { msg_error "apk add failed for: $*" - return 1 + return 100 } msg_ok "Tools ready: $*" fi @@ -52,17 +52,17 @@ ensure_usr_local_bin_persist() { download_with_progress() { # $1 url, $2 dest local url="$1" out="$2" cl - need_tool curl pv || return 1 + need_tool curl pv || return 127 cl=$(curl -fsSLI "$url" 2>/dev/null | awk 'tolower($0) ~ /^content-length:/ {print $2}' | tr -d '\r') if [ -n "$cl" ]; then curl -fsSL "$url" | pv -s "$cl" >"$out" || { msg_error "Download failed: $url" - return 1 + return 250 } else curl -fL# -o "$out" "$url" || { msg_error "Download failed: $url" - return 1 + return 250 } fi } @@ -82,14 +82,14 @@ check_for_gh_release() { net_resolves api.github.com || { msg_error "DNS/network error: api.github.com" - return 1 + return 6 } - need_tool curl jq || return 1 + need_tool curl jq || return 127 tag=$(curl -fsSL "https://api.github.com/repos/${source}/releases/latest" | jq -r '.tag_name // empty') [ -z "$tag" ] && { msg_error "Unable to fetch latest tag for $app" - return 1 + return 22 } release="${tag#v}" @@ -133,12 +133,12 @@ fetch_and_deploy_gh() { net_resolves api.github.com || { msg_error "DNS/network error" - return 1 + return 6 } - need_tool curl jq tar || return 1 + need_tool curl jq tar || return 127 [ "$mode" = "prebuild" ] || [ "$mode" = "singlefile" ] && need_tool unzip >/dev/null 2>&1 || true - tmpd="$(mktemp -d)" || return 1 + tmpd="$(mktemp -d)" || return 252 mkdir -p "$target" # Release JSON @@ -146,13 +146,13 @@ fetch_and_deploy_gh() { json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/latest")" || { msg_error "GitHub API failed" rm -rf "$tmpd" - return 1 + return 22 } else json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/tags/$version")" || { msg_error "GitHub API failed" rm -rf "$tmpd" - return 1 + return 22 } fi @@ -163,7 +163,7 @@ fetch_and_deploy_gh() { [ -z "$version" ] && { msg_error "No tag in release json" rm -rf "$tmpd" - return 1 + return 65 } case "$mode" in @@ -173,26 +173,26 @@ fetch_and_deploy_gh() { filename="${app_lc}-${version}.tar.gz" download_with_progress "$url" "$tmpd/$filename" || { rm -rf "$tmpd" - return 1 + return 250 } tar -xzf "$tmpd/$filename" -C "$tmpd" || { msg_error "tar extract failed" rm -rf "$tmpd" - return 1 + return 251 } unpack="$(find "$tmpd" -mindepth 1 -maxdepth 1 -type d | head -n1)" # copy content of unpack to target (cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || { msg_error "copy failed" rm -rf "$tmpd" - return 1 + return 252 } ;; prebuild) [ -n "$pattern" ] || { msg_error "prebuild requires asset pattern" rm -rf "$tmpd" - return 1 + return 65 } url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" ' BEGIN{IGNORECASE=1} @@ -201,19 +201,19 @@ fetch_and_deploy_gh() { [ -z "$url" ] && { msg_error "asset not found for pattern: $pattern" rm -rf "$tmpd" - return 1 + return 250 } filename="${url##*/}" download_with_progress "$url" "$tmpd/$filename" || { rm -rf "$tmpd" - return 1 + return 250 } # unpack archive (Zip or tarball) case "$filename" in *.zip) need_tool unzip || { rm -rf "$tmpd" - return 1 + return 127 } mkdir -p "$tmpd/unp" unzip -q "$tmpd/$filename" -d "$tmpd/unp" @@ -225,7 +225,7 @@ fetch_and_deploy_gh() { *) msg_error "unsupported archive: $filename" rm -rf "$tmpd" - return 1 + return 251 ;; esac # top-level folder strippen @@ -234,13 +234,13 @@ fetch_and_deploy_gh() { (cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || { msg_error "copy failed" rm -rf "$tmpd" - return 1 + return 252 } else (cd "$tmpd/unp" && tar -cf - .) | (cd "$target" && tar -xf -) || { msg_error "copy failed" rm -rf "$tmpd" - return 1 + return 252 } fi ;; @@ -248,7 +248,7 @@ fetch_and_deploy_gh() { [ -n "$pattern" ] || { msg_error "singlefile requires asset pattern" rm -rf "$tmpd" - return 1 + return 65 } url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" ' BEGIN{IGNORECASE=1} @@ -257,19 +257,19 @@ fetch_and_deploy_gh() { [ -z "$url" ] && { msg_error "asset not found for pattern: $pattern" rm -rf "$tmpd" - return 1 + return 250 } filename="${url##*/}" download_with_progress "$url" "$target/$app" || { rm -rf "$tmpd" - return 1 + return 250 } chmod +x "$target/$app" ;; *) msg_error "Unknown mode: $mode" rm -rf "$tmpd" - return 1 + return 65 ;; esac @@ -291,20 +291,20 @@ setup_yq() { return 0 fi - need_tool curl || return 1 + need_tool curl || return 127 local arch bin url tmp case "$(uname -m)" in x86_64) arch="amd64" ;; aarch64) arch="arm64" ;; *) msg_error "Unsupported arch for yq: $(uname -m)" - return 1 + return 238 ;; esac url="https://github.com/mikefarah/yq/releases/latest/download/yq_linux_${arch}" tmp="$(mktemp)" - download_with_progress "$url" "$tmp" || return 1 - install -m 0755 "$tmp" /usr/local/bin/yq + download_with_progress "$url" "$tmp" || return 250 + /usr/bin/install -m 0755 "$tmp" /usr/local/bin/yq rm -f "$tmp" msg_ok "Setup yq ($(yq --version 2>/dev/null))" } @@ -313,13 +313,13 @@ setup_yq() { # Adminer – Alpine # ------------------------------ setup_adminer() { - need_tool curl || return 1 + need_tool curl || return 127 msg_info "Setup Adminer (Alpine)" mkdir -p /var/www/localhost/htdocs/adminer curl -fsSL https://github.com/vrana/adminer/releases/latest/download/adminer.php \ -o /var/www/localhost/htdocs/adminer/index.php || { msg_error "Adminer download failed" - return 1 + return 250 } msg_ok "Adminer at /adminer (served by your webserver)" } @@ -329,7 +329,7 @@ setup_adminer() { # optional: PYTHON_VERSION="3.12" # ------------------------------ setup_uv() { - need_tool curl tar || return 1 + need_tool curl tar || return 127 local UV_BIN="/usr/local/bin/uv" local arch tarball url tmpd ver installed @@ -338,7 +338,7 @@ setup_uv() { aarch64) arch="aarch64-unknown-linux-musl" ;; *) msg_error "Unsupported arch for uv: $(uname -m)" - return 1 + return 238 ;; esac @@ -346,7 +346,7 @@ setup_uv() { ver="${ver#v}" [ -z "$ver" ] && { msg_error "uv: cannot determine latest version" - return 1 + return 250 } if has "$UV_BIN"; then @@ -360,29 +360,29 @@ setup_uv() { msg_info "Setup uv $ver" fi - tmpd="$(mktemp -d)" || return 1 + tmpd="$(mktemp -d)" || return 252 tarball="uv-${arch}.tar.gz" url="https://github.com/astral-sh/uv/releases/download/v${ver}/${tarball}" download_with_progress "$url" "$tmpd/uv.tar.gz" || { rm -rf "$tmpd" - return 1 + return 250 } tar -xzf "$tmpd/uv.tar.gz" -C "$tmpd" || { msg_error "uv: extract failed" rm -rf "$tmpd" - return 1 + return 251 } # tar contains ./uv if [ -x "$tmpd/uv" ]; then - install -m 0755 "$tmpd/uv" "$UV_BIN" + /usr/bin/install -m 0755 "$tmpd/uv" "$UV_BIN" else # fallback: in subfolder - install -m 0755 "$tmpd"/*/uv "$UV_BIN" 2>/dev/null || { + /usr/bin/install -m 0755 "$tmpd"/*/uv "$UV_BIN" 2>/dev/null || { msg_error "uv binary not found in tar" rm -rf "$tmpd" - return 1 + return 252 } fi rm -rf "$tmpd" @@ -395,13 +395,13 @@ setup_uv() { $0 ~ "^cpython-"maj"\\." { print $0 }' | awk -F- '{print $2}' | sort -V | tail -n1)" [ -z "$match" ] && { msg_error "No matching Python for $PYTHON_VERSION" - return 1 + return 250 } if ! uv python list | grep -q "cpython-${match}-linux"; then msg_info "Installing Python $match via uv" uv python install "$match" || { msg_error "uv python install failed" - return 1 + return 150 } msg_ok "Python $match installed (uv)" fi @@ -421,7 +421,7 @@ setup_java() { msg_info "Setup Java (OpenJDK $JAVA_VERSION)" apk add --no-cache "$pkg" >/dev/null 2>&1 || { msg_error "apk add $pkg failed" - return 1 + return 100 } # set JAVA_HOME local prof="/etc/profile.d/20-java.sh" @@ -441,32 +441,32 @@ setup_go() { msg_info "Setup Go (apk)" apk add --no-cache go >/dev/null 2>&1 || { msg_error "apk add go failed" - return 1 + return 100 } msg_ok "Go ready: $(go version 2>/dev/null)" return 0 fi - need_tool curl tar || return 1 + need_tool curl tar || return 127 local ARCH TARBALL URL TMP case "$(uname -m)" in x86_64) ARCH="amd64" ;; aarch64) ARCH="arm64" ;; *) msg_error "Unsupported arch for Go: $(uname -m)" - return 1 + return 238 ;; esac TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz" URL="https://go.dev/dl/${TARBALL}" msg_info "Setup Go $GO_VERSION (tarball)" TMP="$(mktemp)" - download_with_progress "$URL" "$TMP" || return 1 + download_with_progress "$URL" "$TMP" || return 250 rm -rf /usr/local/go tar -C /usr/local -xzf "$TMP" || { msg_error "extract go failed" rm -f "$TMP" - return 1 + return 251 } rm -f "$TMP" ln -sf /usr/local/go/bin/go /usr/local/bin/go @@ -488,7 +488,7 @@ setup_composer() { # Fallback to generic php if 83 not available apk add --no-cache php-cli php-openssl php-phar php-iconv >/dev/null 2>&1 || { msg_error "Failed to install php-cli for composer" - return 1 + return 100 } } msg_ok "PHP CLI ready: $(php -v | head -n1)" @@ -500,14 +500,14 @@ setup_composer() { msg_info "Setup Composer" fi - need_tool curl || return 1 + need_tool curl || return 127 curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php || { msg_error "composer installer download failed" - return 1 + return 250 } php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer >/dev/null 2>&1 || { msg_error "composer install failed" - return 1 + return 150 } rm -f /tmp/composer-setup.php ensure_usr_local_bin_persist diff --git a/misc/api.func b/misc/api.func index 7815d4a33..899c6dfda 100644 --- a/misc/api.func +++ b/misc/api.func @@ -344,21 +344,36 @@ explain_exit_code() { # - Escapes a string for safe JSON embedding # - Strips ANSI escape sequences and non-printable control characters # - Handles backslashes, quotes, newlines, tabs, and carriage returns +# - Uses jq when available (guaranteed correct), falls back to awk # ------------------------------------------------------------------------------ json_escape() { - # Escape a string for safe JSON embedding using awk (handles any input size). - # Pipeline: strip ANSI → remove control chars → escape \ " TAB → join lines with \n - printf '%s' "$1" | + local input + # Pipeline: strip ANSI → remove control chars → escape for JSON + input=$(printf '%s' "$1" | sed 's/\x1b\[[0-9;]*[a-zA-Z]//g' | - tr -d '\000-\010\013\014\016-\037\177\r' | + tr -d '\000-\010\013\014\016-\037\177\r') + + # Prefer jq: guaranteed correct JSON string encoding (handles all edge cases) + if command -v jq &>/dev/null; then + # jq -Rs reads raw stdin as string, outputs JSON-encoded string with quotes. + # We strip the surrounding quotes since the heredoc adds them. + printf '%s' "$input" | jq -Rs '.' | sed 's/^"//;s/"$//' + return + fi + + # Fallback: character-by-character processing with awk (avoids gsub replacement pitfalls) + printf '%s' "$input" | awk ' - BEGIN { ORS = "" } + BEGIN { ORS="" } { - gsub(/\\/, "\\\\") # backslash → \\ - gsub(/"/, "\\\"") # double quote → \" - gsub(/\t/, "\\t") # tab → \t - if (NR > 1) printf "\\n" - printf "%s", $0 + if (NR > 1) printf "%s", "\\n" + for (i = 1; i <= length($0); i++) { + c = substr($0, i, 1) + if (c == "\\") printf "%s", "\\\\" + else if (c == "\"") printf "%s", "\\\"" + else if (c == "\t") printf "%s", "\\t" + else printf "%s", c + } }' } @@ -504,7 +519,7 @@ detect_gpu() { GPU_PASSTHROUGH="unknown" local gpu_line - gpu_line=$(lspci 2>/dev/null | grep -iE "VGA|3D|Display" | head -1) + gpu_line=$(lspci 2>/dev/null | grep -iE "VGA|3D|Display" | head -1 || true) if [[ -n "$gpu_line" ]]; then # Extract model: everything after the colon, clean up @@ -543,7 +558,7 @@ detect_cpu() { if [[ -f /proc/cpuinfo ]]; then local vendor_id - vendor_id=$(grep -m1 "vendor_id" /proc/cpuinfo 2>/dev/null | cut -d: -f2 | tr -d ' ') + vendor_id=$(grep -m1 "vendor_id" /proc/cpuinfo 2>/dev/null | cut -d: -f2 | tr -d ' ' || true) case "$vendor_id" in GenuineIntel) CPU_VENDOR="intel" ;; @@ -557,7 +572,7 @@ detect_cpu() { esac # Extract model name and clean it up - CPU_MODEL=$(grep -m1 "model name" /proc/cpuinfo 2>/dev/null | cut -d: -f2 | sed 's/^ *//' | sed 's/(R)//g' | sed 's/(TM)//g' | sed 's/ */ /g' | cut -c1-64) + CPU_MODEL=$(grep -m1 "model name" /proc/cpuinfo 2>/dev/null | cut -d: -f2 | sed 's/^ *//' | sed 's/(R)//g' | sed 's/(TM)//g' | sed 's/ */ /g' | cut -c1-64 || true) fi export CPU_VENDOR CPU_MODEL @@ -627,8 +642,8 @@ post_to_api() { [[ "${DEV_MODE:-}" == "true" ]] && echo "[DEBUG] post_to_api() DIAGNOSTICS=$DIAGNOSTICS RANDOM_UUID=$RANDOM_UUID NSAPP=$NSAPP" >&2 - # Set type for later status updates - TELEMETRY_TYPE="lxc" + # Set type for later status updates (preserve if already set, e.g. turnkey) + TELEMETRY_TYPE="${TELEMETRY_TYPE:-lxc}" local pve_version="" if command -v pveversion &>/dev/null; then @@ -664,7 +679,7 @@ post_to_api() { { "random_id": "${RANDOM_UUID}", "execution_id": "${EXECUTION_ID:-${RANDOM_UUID}}", - "type": "lxc", + "type": "${TELEMETRY_TYPE}", "nsapp": "${NSAPP:-unknown}", "status": "installing", "ct_type": ${CT_TYPE:-1}, @@ -692,6 +707,7 @@ EOF # Send initial "installing" record with retry. # This record MUST exist for all subsequent updates to succeed. local http_code="" attempt + local _post_success=false for attempt in 1 2 3; do if [[ "${DEV_MODE:-}" == "true" ]]; then http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \ @@ -703,11 +719,19 @@ EOF -H "Content-Type: application/json" \ -d "$JSON_PAYLOAD" -o /dev/null 2>/dev/null) || http_code="000" fi - [[ "$http_code" =~ ^2[0-9]{2}$ ]] && break + if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then + _post_success=true + break + fi [[ "$attempt" -lt 3 ]] && sleep 1 done - POST_TO_API_DONE=true + # Only mark done if at least one attempt succeeded. + # If all 3 failed, POST_TO_API_DONE stays false so post_update_to_api + # and on_exit() know the initial record was never created. + # The server has fallback logic to create a new record on status updates, + # so subsequent calls can still succeed even without the initial record. + POST_TO_API_DONE=${_post_success} } # ------------------------------------------------------------------------------ @@ -798,15 +822,19 @@ EOF # Send initial "installing" record with retry (must succeed for updates to work) local http_code="" attempt + local _post_success=false for attempt in 1 2 3; do http_code=$(curl -sS -w "%{http_code}" -m "${TELEMETRY_TIMEOUT}" -X POST "${TELEMETRY_URL}" \ -H "Content-Type: application/json" \ -d "$JSON_PAYLOAD" -o /dev/null 2>/dev/null) || http_code="000" - [[ "$http_code" =~ ^2[0-9]{2}$ ]] && break + if [[ "$http_code" =~ ^2[0-9]{2}$ ]]; then + _post_success=true + break + fi [[ "$attempt" -lt 3 ]] && sleep 1 done - POST_TO_API_DONE=true + POST_TO_API_DONE=${_post_success} } # ------------------------------------------------------------------------------ @@ -1083,6 +1111,12 @@ EOF # - Used to group errors in dashboard # ------------------------------------------------------------------------------ categorize_error() { + # Allow build.func to override category based on log analysis (exit code 1 subclassification) + if [[ -n "${ERROR_CATEGORY_OVERRIDE:-}" ]]; then + echo "$ERROR_CATEGORY_OVERRIDE" + return + fi + local code="$1" case "$code" in # Network errors (curl/wget) @@ -1328,8 +1362,8 @@ post_addon_to_api() { # Detect OS info local os_type="" os_version="" if [[ -f /etc/os-release ]]; then - os_type=$(grep "^ID=" /etc/os-release | cut -d= -f2 | tr -d '"') - os_version=$(grep "^VERSION_ID=" /etc/os-release | cut -d= -f2 | tr -d '"') + os_type=$(grep "^ID=" /etc/os-release | cut -d= -f2 | tr -d '"' || true) + os_version=$(grep "^VERSION_ID=" /etc/os-release | cut -d= -f2 | tr -d '"' || true) fi local JSON_PAYLOAD diff --git a/misc/build.func b/misc/build.func index 41e53bcac..c6b3e6006 100644 --- a/misc/build.func +++ b/misc/build.func @@ -178,10 +178,10 @@ get_current_ip() { # Check for Debian/Ubuntu (uses hostname -I) if grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then # Try IPv4 first - CURRENT_IP=$(hostname -I 2>/dev/null | tr ' ' '\n' | grep -E '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' | head -n1) + CURRENT_IP=$(hostname -I 2>/dev/null | tr ' ' '\n' | grep -E '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' | head -n1 || true) # Fallback to IPv6 if no IPv4 if [[ -z "$CURRENT_IP" ]]; then - CURRENT_IP=$(hostname -I 2>/dev/null | tr ' ' '\n' | grep -E ':' | head -n1) + CURRENT_IP=$(hostname -I 2>/dev/null | tr ' ' '\n' | grep -E ':' | head -n1 || true) fi # Check for Alpine (uses ip command) elif grep -q 'ID=alpine' /etc/os-release; then @@ -226,6 +226,14 @@ update_motd_ip() { local current_hostname="$(hostname)" local current_ip="$(hostname -I | awk '{print $1}')" + # Escape sed special chars in replacement strings (& \ |) + current_os="${current_os//\\/\\\\}" + current_os="${current_os//&/\\&}" + current_hostname="${current_hostname//\\/\\\\}" + current_hostname="${current_hostname//&/\\&}" + current_ip="${current_ip//\\/\\\\}" + current_ip="${current_ip//&/\\&}" + # Update only if values actually changed if ! grep -q "OS:.*$current_os" "$PROFILE_FILE" 2>/dev/null; then sed -i "s|OS:.*|OS: \${GN}$current_os\${CL}\\\"|" "$PROFILE_FILE" @@ -264,12 +272,12 @@ install_ssh_keys_into_ct() { msg_info "Installing selected SSH keys into CT ${CTID}" pct exec "$CTID" -- sh -c 'mkdir -p /root/.ssh && chmod 700 /root/.ssh' || { msg_error "prepare /root/.ssh failed" - return 1 + return 252 } pct push "$CTID" "$SSH_KEYS_FILE" /root/.ssh/authorized_keys >/dev/null 2>&1 || pct exec "$CTID" -- sh -c "cat > /root/.ssh/authorized_keys" <"$SSH_KEYS_FILE" || { msg_error "write authorized_keys failed" - return 1 + return 252 } pct exec "$CTID" -- sh -c 'chmod 600 /root/.ssh/authorized_keys' || true msg_ok "Installed SSH keys into CT ${CTID}" @@ -510,7 +518,7 @@ validate_bridge() { [[ -z "$bridge" ]] && return 1 # Check if bridge interface exists - if ! ip link show "$bridge" &>/dev/null; then + if ! ip link show dev "$bridge" &>/dev/null; then return 1 fi @@ -534,6 +542,10 @@ validate_gateway_in_subnet() { local ip="${static_ip%%/*}" local cidr="${static_ip##*/}" + # /31 and /32 are valid point-to-point / zero-trust DMZ configurations + # where the gateway is technically outside the subnet — skip validation + ((cidr >= 31)) && return 0 + # Convert CIDR to netmask bits local mask=$((0xFFFFFFFF << (32 - cidr) & 0xFFFFFFFF)) @@ -832,7 +844,7 @@ choose_and_set_storage_for_file() { template) key="var_template_storage" ;; *) msg_error "Unknown storage class: $class" - return 1 + return 65 ;; esac @@ -855,7 +867,7 @@ choose_and_set_storage_for_file() { fi else # If the current value is preselectable, we could show it, but per your requirement we always offer selection - select_storage "$class" || return 1 + select_storage "$class" || return 150 fi _write_storage_to_vars "$vf" "$key" "$STORAGE_RESULT" @@ -972,32 +984,47 @@ base_settings() { fi IPV6_METHOD=${var_ipv6_method:-"none"} - IPV6_STATIC=${var_ipv6_static:-""} GATE=${var_gateway:-""} APT_CACHER=${var_apt_cacher:-""} APT_CACHER_IP=${var_apt_cacher_ip:-""} # Runtime check: Verify APT cacher is reachable if configured if [[ -n "$APT_CACHER_IP" && "$APT_CACHER" == "yes" ]]; then - if ! curl -s --connect-timeout 2 "http://${APT_CACHER_IP}:3142" >/dev/null 2>&1; then - msg_warn "APT Cacher configured but not reachable at ${APT_CACHER_IP}:3142" + local _check_host _check_port _check_url + _check_host=$(echo "$APT_CACHER_IP" | sed -e 's|https\?://||' -e 's|/.*||' | cut -d: -f1) + _check_port=$(echo "$APT_CACHER_IP" | sed -e 's|https\?://||' -e 's|/.*||' | cut -s -d: -f2) + if [[ "$APT_CACHER_IP" =~ ^https?:// ]]; then + _check_url="$APT_CACHER_IP" + _check_port="${_check_port:-80}" + else + _check_port="${_check_port:-3142}" + _check_url="http://${APT_CACHER_IP}:${_check_port}" + fi + if ! curl -s --connect-timeout 2 "${_check_url}" >/dev/null 2>&1; then + msg_warn "APT Cacher configured but not reachable at ${_check_url}" msg_custom "⚠️" "${YW}" "Disabling APT Cacher for this installation" APT_CACHER="" APT_CACHER_IP="" else - msg_ok "APT Cacher verified at ${APT_CACHER_IP}:3142" + msg_ok "APT Cacher verified at ${_check_url}" fi fi MTU=${var_mtu:-""} - SD=${var_searchdomain:-""} - NS=${var_ns:-""} + _sd_val="${var_searchdomain:-""}" + [[ -n "$_sd_val" ]] && SD="-searchdomain=$_sd_val" || SD="" + _ns_val="${var_ns:-""}" + [[ -n "$_ns_val" ]] && NS="-nameserver=$_ns_val" || NS="" MAC=${var_mac:-""} VLAN=${var_vlan:-""} SSH=${var_ssh:-"no"} SSH_AUTHORIZED_KEY=${var_ssh_authorized_key:-""} - UDHCPC_FIX=${var_udhcpc_fix:-""} - TAGS="community-script,${var_tags:-}" + # Build TAGS: ensure community-script prefix, use semicolons (pct format), no duplicates + if [[ "${var_tags:-}" == *community-script* ]]; then + TAGS="${var_tags:-community-script}" + else + TAGS="community-script${var_tags:+;${var_tags}}" + fi ENABLE_FUSE=${var_fuse:-"${1:-no}"} ENABLE_TUN=${var_tun:-"${1:-no}"} @@ -1006,6 +1033,7 @@ base_settings() { ENABLE_NESTING=${var_nesting:-"1"} ENABLE_KEYCTL=${var_keyctl:-"0"} ENABLE_MKNOD=${var_mknod:-"0"} + ALLOW_MOUNT_FS=${var_mount_fs:-""} PROTECT_CT=${var_protection:-"no"} CT_TIMEZONE=${var_timezone:-"$timezone"} [[ "${CT_TIMEZONE:-}" == Etc/* ]] && CT_TIMEZONE="host" # pct doesn't accept Etc/* zones @@ -1035,10 +1063,11 @@ load_vars_file() { # Allowed var_* keys local VAR_WHITELIST=( - var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu var_keyctl + var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_github_token var_gpu var_keyctl var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage var_searchdomain + var_post_install ) # Whitelist check helper @@ -1184,12 +1213,47 @@ load_vars_file() { continue fi ;; + var_mknod) + if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then + msg_warn "Invalid mknod value '$var_val' in $file (must be 0 or 1), ignoring" + continue + fi + ;; + var_mount_fs) + # Normalize: strip spaces, trailing commas + var_val="${var_val// /}" + var_val="${var_val%%,}" + var_val="${var_val##,}" + if [[ -n "$var_val" ]] && [[ ! "$var_val" =~ ^[a-zA-Z0-9]+(,[a-zA-Z0-9]+)*$ ]]; then + msg_warn "Invalid mount_fs value '$var_val' in $file (comma-separated fs names only, e.g. nfs,cifs), ignoring" + continue + fi + ;; var_ipv6_method) if [[ "$var_val" != "auto" && "$var_val" != "dhcp" && "$var_val" != "static" && "$var_val" != "none" ]]; then msg_warn "Invalid IPv6 method '$var_val' in $file (must be auto/dhcp/static/none), ignoring" continue fi ;; + var_apt_cacher_ip) + # Allow: plain IP/hostname, http://host, https://host:port + if [[ -n "$var_val" ]] && ! [[ "$var_val" =~ ^(https?://)?[a-zA-Z0-9._-]+(:[0-9]+)?(/.*)?$ ]]; then + msg_warn "Invalid APT Cacher address '$var_val' in $file, ignoring" + continue + fi + ;; + var_container_storage | var_template_storage) + # Validate that the storage exists and is active on the current node + local _storage_status + _storage_status=$(pvesm status 2>/dev/null | awk -v s="$var_val" '$1 == s { print $3 }') + if [[ -z "$_storage_status" ]]; then + msg_warn "Storage '$var_val' from $file not found on this node, ignoring" + continue + elif [[ "$_storage_status" == "disabled" ]]; then + msg_warn "Storage '$var_val' from $file is disabled on this node, ignoring" + continue + fi + ;; esac fi @@ -1217,10 +1281,11 @@ default_var_settings() { # Allowed var_* keys (alphabetically sorted) # Note: Removed var_ctid (can only exist once), var_ipv6_static (static IPs are unique) local VAR_WHITELIST=( - var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu var_keyctl + var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_github_token var_gpu var_keyctl var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage + var_post_install ) # Snapshot: environment variables (highest precedence) @@ -1243,7 +1308,7 @@ default_var_settings() { return 0 } done - return 1 + return 252 } # Allow override of storages via env (for non-interactive use cases) [ -n "${var_template_storage:-}" ] && TEMPLATE_STORAGE="$var_template_storage" @@ -1290,9 +1355,11 @@ var_ipv6_method=none var_ssh=no # var_ssh_authorized_key= -# APT cacher (optional - with example) +# APT cacher (optional - IP or URL) # var_apt_cacher=yes # var_apt_cacher_ip=192.168.1.10 +# var_apt_cacher_ip=http://proxy.local +# var_apt_cacher_ip=https://proxy.local:443 # Features/Tags/verbosity var_fuse=no @@ -1310,6 +1377,15 @@ var_verbose=no # Security (root PW) – empty => autologin # var_pw= + +# GitHub Personal Access Token (optional – avoids API rate limits during installs) +# Create at https://github.com/settings/tokens – read-only public access is sufficient +# var_github_token=ghp_your_token_here + +# Optional post-install script (host-side path to a *.sh on the Proxmox host) +# Runs ON THE HOST after the container is fully provisioned. +# Available env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG +# var_post_install=/opt/post-install/myhook.sh EOF # Now choose storages (always prompt unless just one exists) @@ -1336,7 +1412,7 @@ EOF local dv dv="$(_find_default_vars)" || { msg_error "default.vars not found after ensure step" - return 1 + return 252 } load_vars_file "$dv" @@ -1347,6 +1423,11 @@ EOF VERBOSE="no" fi + # 4) Map var_github_token → GITHUB_TOKEN (only if not already set in environment) + if [[ -z "${GITHUB_TOKEN:-}" && -n "${var_github_token:-}" ]]; then + export GITHUB_TOKEN="${var_github_token}" + fi + # 4) Apply base settings and show summary METHOD="mydefaults-global" base_settings "$VERBOSE" @@ -1379,10 +1460,11 @@ get_app_defaults_path() { if ! declare -p VAR_WHITELIST >/dev/null 2>&1; then # Note: Removed var_ctid (can only exist once), var_ipv6_static (static IPs are unique) declare -ag VAR_WHITELIST=( - var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_gpu - var_gateway var_hostname var_ipv6_method var_mac var_mtu - var_net var_ns var_os var_pw var_ram var_tags var_tun var_unprivileged - var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage + var_apt_cacher var_apt_cacher_ip var_brg var_cpu var_disk var_fuse var_github_token var_gpu var_keyctl + var_gateway var_hostname var_ipv6_method var_mac var_mknod var_mount_fs var_mtu + var_net var_nesting var_ns var_os var_protection var_pw var_ram var_tags var_timezone var_tun var_unprivileged + var_verbose var_version var_vlan var_ssh var_ssh_authorized_key var_container_storage var_template_storage var_searchdomain + var_post_install ) fi @@ -1595,6 +1677,7 @@ _build_current_app_vars_tmp() { [ -n "$_tpl_storage" ] && echo "var_template_storage=$(_sanitize_value "$_tpl_storage")" [ -n "$_ct_storage" ] && echo "var_container_storage=$(_sanitize_value "$_ct_storage")" + [ -n "${var_post_install:-}" ] && echo "var_post_install=$(_sanitize_value "${var_post_install}")" } >"$tmpf" echo "$tmpf" @@ -1621,7 +1704,7 @@ maybe_offer_save_app_defaults() { if whiptail --backtitle "Proxmox VE Helper Scripts" \ --yesno "Save these advanced settings as defaults for ${APP}?\n\nThis will create:\n${app_vars_path}" 12 72; then mkdir -p "$(dirname "$app_vars_path")" - install -m 0644 "$new_tmp" "$app_vars_path" + /usr/bin/install -m 0644 "$new_tmp" "$app_vars_path" msg_ok "Saved app defaults: ${app_vars_path}" fi rm -f "$new_tmp" "$diff_tmp" @@ -1655,7 +1738,7 @@ maybe_offer_save_app_defaults() { case "$sel" in "Update Defaults") - install -m 0644 "$new_tmp" "$app_vars_path" + /usr/bin/install -m 0644 "$new_tmp" "$app_vars_path" msg_ok "Updated app defaults: ${app_vars_path}" break ;; @@ -1683,8 +1766,8 @@ ensure_storage_selection_for_vars_file() { # Read stored values (if any) local tpl ct - tpl=$(grep -E '^var_template_storage=' "$vf" | cut -d= -f2-) - ct=$(grep -E '^var_container_storage=' "$vf" | cut -d= -f2-) + tpl=$(grep -E '^var_template_storage=' "$vf" | cut -d= -f2- || true) + ct=$(grep -E '^var_container_storage=' "$vf" | cut -d= -f2- || true) if [[ -n "$tpl" && -n "$ct" ]]; then TEMPLATE_STORAGE="$tpl" @@ -1732,9 +1815,14 @@ advanced_settings() { trap 'tput rmcup 2>/dev/null || true' RETURN # Initialize defaults - TAGS="community-script;${var_tags:-}" + # Build TAGS: ensure community-script prefix, use semicolons (pct format), no duplicates + if [[ "${var_tags:-}" == *community-script* ]]; then + TAGS="${var_tags:-community-script}" + else + TAGS="community-script${var_tags:+;${var_tags}}" + fi local STEP=1 - local MAX_STEP=28 + local MAX_STEP=29 # Store values for back navigation - inherit from var_* app defaults local _ct_type="${var_unprivileged:-1}" @@ -1768,6 +1856,7 @@ advanced_settings() { local _enable_mknod="${var_mknod:-0}" local _mount_fs="${var_mount_fs:-}" local _protect_ct="${var_protection:-no}" + local _post_install="${var_post_install:-}" # Detect host timezone for default (if not set via var_timezone) local _host_timezone="" @@ -1819,7 +1908,7 @@ advanced_settings() { if [[ -n "$BRIDGES" ]]; then while IFS= read -r bridge; do if [[ -n "$bridge" ]]; then - local description=$(grep -A 10 "iface $bridge" /etc/network/interfaces 2>/dev/null | grep '^#' | head -n1 | sed 's/^#\s*//;s/^[- ]*//') + local description=$(grep -A 10 "iface $bridge" /etc/network/interfaces 2>/dev/null | grep '^#' | head -n1 | sed 's/^#\s*//;s/^[- ]*//' || true) BRIDGE_MENU_OPTIONS+=("$bridge" "${description:- }") fi done <<<"$BRIDGES" @@ -1831,9 +1920,9 @@ advanced_settings() { while [ $STEP -le $MAX_STEP ]; do case $STEP in - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 1: Container Type - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 1) local default_on="ON" local default_off="OFF" @@ -1856,9 +1945,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 2: Root Password - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 2) if PW1=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "ROOT PASSWORD" \ @@ -1910,9 +1999,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 3: Container ID - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 3) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "CONTAINER ID" \ @@ -1944,9 +2033,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 4: Hostname - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 4) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "HOSTNAME" \ @@ -1967,9 +2056,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 5: Disk Size - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 5) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "DISK SIZE" \ @@ -1988,9 +2077,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 6: CPU Cores - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 6) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "CPU CORES" \ @@ -2009,9 +2098,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 7: RAM Size - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 7) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "RAM SIZE" \ @@ -2030,9 +2119,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 8: Network Bridge - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 8) if [[ ${#BRIDGE_MENU_OPTIONS[@]} -eq 0 ]]; then # Validate default bridge exists @@ -2068,9 +2157,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 9: IPv4 Configuration - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 9) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "IPv4 CONFIGURATION" \ @@ -2165,9 +2254,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 10: IPv6 Configuration - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 10) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "IPv6 CONFIGURATION" \ @@ -2240,9 +2329,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 11: MTU Size - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 11) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "MTU SIZE" \ @@ -2260,9 +2349,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 12: DNS Search Domain - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 12) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "DNS SEARCH DOMAIN" \ @@ -2276,9 +2365,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 13: DNS Server - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 13) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "DNS SERVER" \ @@ -2292,9 +2381,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 14: MAC Address - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 14) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "MAC ADDRESS" \ @@ -2312,9 +2401,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 15: VLAN Tag - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 15) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "VLAN TAG" \ @@ -2332,9 +2421,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 16: Tags - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 16) if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "CONTAINER TAGS" \ @@ -2354,18 +2443,18 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 17: SSH Settings - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 17) configure_ssh_settings "Step $STEP/$MAX_STEP" # configure_ssh_settings handles its own flow, always advance ((STEP++)) ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 18: FUSE Support - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 18) local fuse_default_flag="--defaultno" [[ "$_enable_fuse" == "yes" || "$_enable_fuse" == "1" ]] && fuse_default_flag="" @@ -2387,9 +2476,9 @@ advanced_settings() { ((STEP++)) ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 19: TUN/TAP Support - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 19) local tun_default_flag="--defaultno" [[ "$_enable_tun" == "yes" || "$_enable_tun" == "1" ]] && tun_default_flag="" @@ -2411,9 +2500,9 @@ advanced_settings() { ((STEP++)) ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 20: Nesting Support - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 20) local nesting_default_flag="" [[ "$_enable_nesting" == "0" || "$_enable_nesting" == "no" ]] && nesting_default_flag="--defaultno" @@ -2441,9 +2530,9 @@ advanced_settings() { ((STEP++)) ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 21: GPU Passthrough - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 21) local gpu_default_flag="--defaultno" [[ "$_enable_gpu" == "yes" ]] && gpu_default_flag="" @@ -2465,10 +2554,17 @@ advanced_settings() { ((STEP++)) ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 22: Keyctl Support (Docker/systemd) - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 22) + # Keyctl is always required for unprivileged containers — skip dialog + if [[ "$_ct_type" == "1" ]]; then + _enable_keyctl="1" + ((STEP++)) + continue + fi + local keyctl_default_flag="--defaultno" [[ "$_enable_keyctl" == "1" ]] && keyctl_default_flag="" @@ -2476,7 +2572,7 @@ advanced_settings() { --title "KEYCTL SUPPORT" \ --ok-button "Next" --cancel-button "Back" \ $keyctl_default_flag \ - --yesno "\nEnable Keyctl support?\n\nRequired for: Docker containers, systemd-networkd,\nand kernel keyring operations.\n\nNote: Automatically enabled for unprivileged containers.\n\n(App default: ${var_keyctl:-0})" 16 62; then + --yesno "\nEnable Keyctl support?\n\nRequired for: Docker containers, systemd-networkd,\nand kernel keyring operations.\n\n(App default: ${var_keyctl:-0})" 14 62; then _enable_keyctl="1" else if [ $? -eq 1 ]; then @@ -2489,9 +2585,9 @@ advanced_settings() { ((STEP++)) ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 23: APT Cacher Proxy - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 23) local apt_cacher_default_flag="--defaultno" [[ "$_apt_cacher" == "yes" ]] && apt_cacher_default_flag="" @@ -2505,7 +2601,7 @@ advanced_settings() { # Ask for IP if enabled if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "APT CACHER IP" \ - --inputbox "\nEnter APT Cacher-NG server IP address:" 10 58 "$_apt_cacher_ip" \ + --inputbox "\nEnter APT Cacher-NG IP or URL:\n(e.g. 192.168.1.10, http://host, https://host:443)" 12 62 "$_apt_cacher_ip" \ 3>&1 1>&2 2>&3); then _apt_cacher_ip="$result" fi @@ -2521,9 +2617,9 @@ advanced_settings() { ((STEP++)) ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 24: Container Timezone - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 24) local tz_hint="$_ct_timezone" [[ -z "$tz_hint" ]] && tz_hint="(empty - will use host timezone)" @@ -2546,9 +2642,9 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 25: Container Protection - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 25) local protect_default_flag="--defaultno" [[ "$_protect_ct" == "yes" || "$_protect_ct" == "1" ]] && protect_default_flag="" @@ -2570,9 +2666,9 @@ advanced_settings() { ((STEP++)) ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 26: Device Node Creation (mknod) - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 26) local mknod_default_flag="--defaultno" [[ "$_enable_mknod" == "1" ]] && mknod_default_flag="" @@ -2594,9 +2690,9 @@ advanced_settings() { ((STEP++)) ;; - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # STEP 27: Mount Filesystems - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ 27) local mount_hint="" [[ -n "$_mount_fs" ]] && mount_hint="$_mount_fs" || mount_hint="(none)" @@ -2606,6 +2702,10 @@ advanced_settings() { --ok-button "Next" --cancel-button "Back" \ --inputbox "\nAllow specific filesystem mounts.\n\nComma-separated list: nfs, cifs, fuse, ext4, etc.\nLeave empty for defaults (none).\n\nCurrent: $mount_hint" 14 62 "$_mount_fs" \ 3>&1 1>&2 2>&3); then + # Normalize: strip spaces and trailing/leading commas + result="${result// /}" + result="${result%%,}" + result="${result##,}" _mount_fs="$result" ((STEP++)) else @@ -2613,10 +2713,62 @@ advanced_settings() { fi ;; - # ------------------------------------------------------------------------------ - # STEP 28: Verbose Mode & Confirmation - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 28: Optional host-side post-install hook (path on the Proxmox HOST) + # ═══════════════════════════════════════════════════════════════════════════ 28) + local _hook_prompt="Optional: absolute path to a *.sh file ON THE PROXMOX HOST. + +It runs as root on the HOST (NOT in the LXC) after the container +is fully provisioned and started. + +Available env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG. + +Leave empty to skip." + while true; do + if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ + --title "POST-INSTALL HOOK (HOST)" \ + --ok-button "Next" --cancel-button "Back" \ + --inputbox "$_hook_prompt" 16 70 "${_post_install}" \ + 3>&1 1>&2 2>&3); then + # Normalize: strip surrounding whitespace + result="$(printf '%s' "$result" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')" + if [[ -z "$result" ]]; then + _post_install="" + ((STEP++)) + break + fi + # Reject obvious shell-meta sneaking through + if [[ "$result" == *';'* || "$result" == *'$('* || "$result" == *'`'* || "$result" == *'&&'* || "$result" == *'||'* ]]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID PATH" \ + --msgbox "Path contains shell metacharacters. Please provide a plain absolute file path." 10 70 + continue + fi + if [[ "$result" != /* ]]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID PATH" \ + --msgbox "Path must be absolute (start with /).\n\nGot: $result" 10 70 + continue + fi + if [[ ! -f "$result" ]]; then + if ! whiptail --backtitle "Proxmox VE Helper Scripts" --title "FILE NOT FOUND" \ + --yesno "File does not exist on host:\n\n$result\n\nKeep this path anyway?" 12 70; then + continue + fi + fi + _post_install="$result" + ((STEP++)) + break + else + ((STEP--)) + break + fi + done + ;; + + # ═══════════════════════════════════════════════════════════════════════════ + # STEP 29: Verbose Mode & Confirmation + # ═══════════════════════════════════════════════════════════════════════════ + 29) local verbose_default_flag="--defaultno" [[ "$_verbose" == "yes" ]] && verbose_default_flag="" @@ -2645,6 +2797,11 @@ advanced_settings() { local apt_display="${_apt_cacher:-no}" [[ "$_apt_cacher" == "yes" && -n "$_apt_cacher_ip" ]] && apt_display="$_apt_cacher_ip" + local post_install_display="${_post_install:-(none)}" + local post_install_warn="" + [[ -n "$_post_install" ]] && post_install_warn=" + ⚠ Hook runs as root on Proxmox HOST (not in LXC)" + local summary="Container Type: $ct_type_desc Container ID: $_ct_id Hostname: $_hostname @@ -2662,12 +2819,14 @@ Network: Features: FUSE: $_enable_fuse | TUN: $_enable_tun Nesting: $nesting_desc | Keyctl: $keyctl_desc + Mknod: $([ "$_enable_mknod" == "1" ] && echo Enabled || echo Disabled) | Mount FS: ${_mount_fs:-(none)} GPU: $_enable_gpu | Protection: $protect_desc Advanced: Timezone: $tz_display APT Cacher: $apt_display - Verbose: $_verbose" + Verbose: $_verbose + Post-Install Script: ${post_install_display}${post_install_warn}" if whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \ --title "CONFIRM SETTINGS" \ @@ -2681,9 +2840,9 @@ Advanced: esac done - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ # Apply all collected values to global variables - # ------------------------------------------------------------------------------ + # ═══════════════════════════════════════════════════════════════════════════ CT_TYPE="$_ct_type" PW="$_pw" CT_ID="$_ct_id" @@ -2710,6 +2869,7 @@ Advanced: APT_CACHER="$_apt_cacher" APT_CACHER_IP="$_apt_cacher_ip" VERBOSE="$_verbose" + var_post_install="$_post_install" # Update var_* based on user choice (for functions that check these) var_gpu="$_enable_gpu" @@ -2731,13 +2891,6 @@ Advanced: [[ -n "$_mac" ]] && MAC=",hwaddr=$_mac" || MAC="" [[ -n "$_vlan" ]] && VLAN=",tag=$_vlan" || VLAN="" - # Alpine UDHCPC fix - if [ "$var_os" == "alpine" ] && [ "$NET" == "dhcp" ] && [ -n "$_ns" ]; then - UDHCPC_FIX="yes" - else - UDHCPC_FIX="no" - fi - export UDHCPC_FIX export SSH_KEYS_FILE # Exit alternate screen buffer before showing summary (so output remains visible) @@ -2762,6 +2915,8 @@ Advanced: echo -e "${CONTAINERTYPE}${BOLD}${DGN}Nesting: ${BGN}$([ "${ENABLE_NESTING:-1}" == "1" ] && echo "Enabled" || echo "Disabled")${CL}" [[ "${ENABLE_KEYCTL:-0}" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Keyctl: ${BGN}Enabled${CL}" echo -e "${GPU}${BOLD}${DGN}GPU Passthrough: ${BGN}${ENABLE_GPU:-no}${CL}" + [[ "${ENABLE_MKNOD:-0}" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Mknod: ${BGN}Enabled${CL}" + [[ -n "${ALLOW_MOUNT_FS:-}" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Mount FS: ${BGN}${ALLOW_MOUNT_FS}${CL}" [[ "${PROTECT_CT:-no}" == "yes" || "${PROTECT_CT:-no}" == "1" ]] && echo -e "${CONTAINERTYPE}${BOLD}${DGN}Protection: ${BGN}Enabled${CL}" [[ -n "${CT_TIMEZONE:-}" ]] && echo -e "${INFO}${BOLD}${DGN}Timezone: ${BGN}$CT_TIMEZONE${CL}" [[ "$APT_CACHER" == "yes" ]] && echo -e "${INFO}${BOLD}${DGN}APT Cacher: ${BGN}$APT_CACHER_IP${CL}" @@ -2784,6 +2939,8 @@ Advanced: log_msg "IPv6: $IPV6_METHOD" log_msg "FUSE Support: ${ENABLE_FUSE:-no}" log_msg "Nesting: $([ "${ENABLE_NESTING:-1}" == "1" ] && echo "Enabled" || echo "Disabled")" + log_msg "Mknod: $([ "${ENABLE_MKNOD:-0}" == "1" ] && echo "Enabled" || echo "Disabled")" + [[ -n "${ALLOW_MOUNT_FS:-}" ]] && log_msg "Mount FS: ${ALLOW_MOUNT_FS}" log_msg "GPU Passthrough: ${ENABLE_GPU:-no}" log_msg "Verbose Mode: $VERBOSE" log_msg "Session ID: ${SESSION_ID}" @@ -3061,6 +3218,15 @@ install_script() { header_info CHOICE="" ;; + generated | GENERATED) + header_info + echo -e "${DEFAULT}${BOLD}${BL}Using Generated Settings on node $PVEHOST_NAME${CL}" + VERBOSE="no" + METHOD="generated" + base_settings "$VERBOSE" + echo_default + break + ;; *) msg_error "Invalid option: $CHOICE" exit 112 @@ -3143,6 +3309,10 @@ check_container_resources() { if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then msg_warn "Under-provisioned: Required ${var_cpu} CPU/${var_ram}MB RAM, Current ${current_cpu} CPU/${current_ram}MB RAM" echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n" + if is_unattended; then + msg_error "Aborted: under-provisioned LXC in unattended mode (${current_cpu} CPU/${current_ram}MB RAM < ${var_cpu} CPU/${var_ram}MB RAM)" + exit 113 + fi echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? " read -r prompt 80)); then msg_warn "Storage is dangerously low (${usage}% used on /boot)" + if is_unattended; then + msg_error "Aborted: storage too low in unattended mode (${usage}% used on /boot)" + exit 114 + fi echo -ne "Continue anyway? " read -r prompt >"$SSH_KEYS_FILE" done ;; @@ -3324,7 +3498,7 @@ configure_ssh_settings() { tag="${tag%\"}" tag="${tag#\"}" local line - line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2-) + line=$(grep -E "^${tag}\|" "$MAPFILE" | head -n1 | cut -d'|' -f2- || true) [[ -n "$line" ]] && printf '%s\n' "$line" >>"$SSH_KEYS_FILE" done else @@ -3417,6 +3591,52 @@ msg_menu() { return 0 } +# ------------------------------------------------------------------------------ +# run_addon_updates() +# +# - Scans /usr/local/bin/update_* for addon update scripts installed alongside +# the main application (e.g. by tools/addon/*.sh) +# - For each found addon, prompts the user (60s timeout, default no) whether +# it should be updated as well +# - Skipped entirely when PHS_SILENT=1 to keep unattended updates predictable +# ------------------------------------------------------------------------------ +run_addon_updates() { + shopt -s nullglob + local addons=(/usr/local/bin/update_*) + shopt -u nullglob + + ((${#addons[@]} == 0)) && return 0 + + if [[ "${PHS_SILENT:-0}" == "1" ]]; then + msg_info "Detected ${#addons[@]} addon update script(s) - skipping (PHS_SILENT)" + return 0 + fi + + echo + echo -e "${INFO}${YW} Detected installed addon update script(s):${CL}" + local a name + for a in "${addons[@]}"; do + echo -e "${TAB}- ${a##*/update_}" + done + echo + + local ans + for a in "${addons[@]}"; do + name="${a##*/update_}" + printf 'Do you also want to update addon "%s"? (y/N) [60s]: ' "$name" + ans="" + if read -r -t 60 ans; then :; else echo; fi + case "${ans,,}" in + y | yes) + bash "$a" || msg_warn "Addon update for $name failed (rc=$?)" + ;; + *) + msg_info "Skipped addon: $name" + ;; + esac + done +} + # ------------------------------------------------------------------------------ # start() # @@ -3436,6 +3656,7 @@ start() { ensure_profile_loaded get_lxc_ip update_script + run_addon_updates update_motd_ip cleanup_lxc else @@ -3464,6 +3685,7 @@ start() { ensure_profile_loaded get_lxc_ip update_script + run_addon_updates update_motd_ip cleanup_lxc fi @@ -3505,6 +3727,7 @@ build_container() { # Gateway if [[ -n "$GATE" ]]; then case "$GATE" in + ,gw=) ;; ,gw=*) NET_STRING+="$GATE" ;; *) NET_STRING+=",gw=$GATE" ;; esac @@ -3531,8 +3754,10 @@ build_container() { auto) NET_STRING="$NET_STRING,ip6=auto" ;; dhcp) NET_STRING="$NET_STRING,ip6=dhcp" ;; static) - NET_STRING="$NET_STRING,ip6=$IPV6_ADDR" - [ -n "$IPV6_GATE" ] && NET_STRING="$NET_STRING,gw6=$IPV6_GATE" + if [[ -n "$IPV6_ADDR" ]]; then + NET_STRING="$NET_STRING,ip6=$IPV6_ADDR" + [ -n "$IPV6_GATE" ] && NET_STRING="$NET_STRING,gw6=$IPV6_GATE" + fi ;; none) ;; esac @@ -3556,9 +3781,27 @@ build_container() { FEATURES="${FEATURES}fuse=1" fi + # Mknod support (user configurable via advanced settings) + if [ "${ENABLE_MKNOD:-0}" == "1" ]; then + [ -n "$FEATURES" ] && FEATURES="$FEATURES," + FEATURES="${FEATURES}mknod=1" + fi + + # Mount filesystem types (user configurable via advanced settings) + if [ -n "${ALLOW_MOUNT_FS:-}" ]; then + # Sanitize: strip spaces, trailing/leading commas, then convert commas to semicolons + local _mount_clean="${ALLOW_MOUNT_FS// /}" + _mount_clean="${_mount_clean%%,}" + _mount_clean="${_mount_clean##,}" + _mount_clean="${_mount_clean%%;}" + _mount_clean="${_mount_clean//,/;}" + if [ -n "$_mount_clean" ]; then + [ -n "$FEATURES" ] && FEATURES="$FEATURES," + FEATURES="${FEATURES}mount=${_mount_clean}" + fi + fi + # Build PCT_OPTIONS as string for export - TEMP_DIR=$(mktemp -d) - pushd "$TEMP_DIR" >/dev/null local _func_url if [ "$var_os" == "alpine" ]; then _func_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/alpine-install.func" @@ -3971,7 +4214,7 @@ EOF # Wait for IP assignment (IPv4 or IPv6) local ip_in_lxc="" - for i in {1..20}; do + for i in {1..60}; do # Try IPv4 first ip_in_lxc=$(pct exec "$CTID" -- ip -4 addr show dev eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1) # Fallback to IPv6 if IPv4 not available @@ -3979,11 +4222,18 @@ EOF ip_in_lxc=$(pct exec "$CTID" -- ip -6 addr show dev eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n1) fi [ -n "$ip_in_lxc" ] && break - sleep 1 + # Progressive backoff: 1s for first 20, 2s for next 20, 3s for last 20 + if [ "$i" -le 20 ]; then + sleep 1 + elif [ "$i" -le 40 ]; then + sleep 2 + else + sleep 3 + fi done if [ -z "$ip_in_lxc" ]; then - msg_error "No IP assigned to CT $CTID after 20s" + msg_error "No IP assigned to CT $CTID after 60 attempts" msg_custom "🔧" "${YW}" "Troubleshooting:" echo " • Verify bridge ${BRG} exists and has connectivity" echo " • Check if DHCP server is reachable (if using DHCP)" @@ -4023,7 +4273,7 @@ EOF # Fix Debian 13 LXC template bug where / is owned by nobody:nogroup # This must be done from the host as unprivileged containers cannot chown / local rootfs - rootfs=$(pct config "$CTID" | grep -E '^rootfs:' | sed 's/rootfs: //' | cut -d',' -f1) + rootfs=$(pct config "$CTID" | grep -E '^rootfs:' | sed 's/rootfs: //' | cut -d',' -f1 || true) if [[ -n "$rootfs" ]]; then local mount_point="/var/lib/lxc/${CTID}/rootfs" if [[ -d "$mount_point" ]] && [[ "$(stat -c '%U' "$mount_point")" != "root" ]]; then @@ -4057,17 +4307,42 @@ EOF if [ "$var_os" == "alpine" ]; then sleep 3 pct exec "$CTID" -- /bin/sh -c 'cat </etc/apk/repositories -http://dl-cdn.alpinelinux.org/alpine/latest-stable/main -http://dl-cdn.alpinelinux.org/alpine/latest-stable/community +https://dl-cdn.alpinelinux.org/alpine/latest-stable/main +https://dl-cdn.alpinelinux.org/alpine/latest-stable/community EOF' pct exec "$CTID" -- ash -c "apk add bash newt curl openssh nano mc ncurses jq" >>"$BUILD_LOG" 2>&1 || { - msg_error "Failed to install base packages in Alpine container" - install_exit_code=1 + msg_warn "apk install failed (dl-cdn.alpinelinux.org), trying alternate mirrors..." + local alpine_exit=0 + pct exec "$CTID" -- ash -c ' + ALPINE_MIRRORS="mirror.init7.net ftp.halifax.rwth-aachen.de mirrors.edge.kernel.org alpine.mirror.wearetriple.com mirror.leaseweb.com uk.alpinelinux.org dl-2.alpinelinux.org dl-4.alpinelinux.org" + for m in $(printf "%s\n" $ALPINE_MIRRORS | shuf); do + if wget -q --spider --timeout=2 "http://$m/alpine/latest-stable/main/" 2>/dev/null; then + echo " Attempting mirror: $m" + cat </etc/apk/repositories +http://$m/alpine/latest-stable/main +http://$m/alpine/latest-stable/community +EOF + if apk update >/dev/null 2>&1 && apk add bash newt curl openssh nano mc ncurses jq >/dev/null 2>&1; then + echo " CDN set to $m: tests passed" + exit 0 + else + echo " Mirror $m failed" + fi + fi + done + exit 2 + ' && alpine_exit=0 || alpine_exit=$? + if [[ $alpine_exit -ne 0 ]]; then + msg_error "Failed to install base packages in Alpine container" + install_exit_code=1 + fi } else sleep 3 LANG=${LANG:-en_US.UTF-8} - pct exec "$CTID" -- bash -c "sed -i \"/$LANG/ s/^# //\" /etc/locale.gen" + local LANG_ESC="${LANG//./\\.}" + LANG_ESC="${LANG_ESC//|/\\|}" + pct exec "$CTID" -- bash -c "sed -i \"/$LANG_ESC/ s/^# //\" /etc/locale.gen" pct exec "$CTID" -- bash -c "locale_line=\$(grep -v '^#' /etc/locale.gen | grep -E '^[a-zA-Z]' | awk '{print \$1}' | head -n 1) && \ echo LANG=\$locale_line >/etc/default/locale && \ locale-gen >/dev/null && \ @@ -4090,9 +4365,140 @@ EOF' if [[ "${ARCH:-amd64}" == "arm64" ]]; then _base_pkgs+=" openssh-server wget gcc" fi + + # Detect broken DNS resolver (e.g. Tailscale MagicDNS) and inject public DNS + if ! pct exec "$CTID" -- bash -c "getent hosts deb.debian.org >/dev/null 2>&1 && getent hosts archive.ubuntu.com >/dev/null 2>&1"; then + msg_warn "APT repository DNS resolution failed in container, injecting public DNS servers" + pct exec "$CTID" -- bash -c "echo -e 'nameserver 8.8.8.8\nnameserver 1.1.1.1' >/etc/resolv.conf" + fi + pct exec "$CTID" -- bash -c "apt-get update 2>&1 && apt-get install -y ${_base_pkgs} 2>&1" >>"$BUILD_LOG" 2>&1 || { - msg_error "apt-get base packages installation failed" - install_exit_code=1 + local failed_mirror + failed_mirror=$(pct exec "$CTID" -- bash -c "grep -m1 -oP '(?<=URIs: https?://)[^/]+' /etc/apt/sources.list.d/debian.sources 2>/dev/null || grep -m1 -oP '(?<=deb https?://)[^/]+' /etc/apt/sources.list 2>/dev/null" 2>/dev/null || echo "unknown") + msg_warn "apt-get update failed (${failed_mirror}), trying alternate mirrors..." + local mirror_exit=0 + pct exec "$CTID" -- env APT_BASE="$_base_pkgs" bash -c ' + DISTRO=$(. /etc/os-release 2>/dev/null && echo "$ID" || echo "debian") + + if [ "$DISTRO" = "ubuntu" ]; then + EU_MIRRORS="de.archive.ubuntu.com fr.archive.ubuntu.com se.archive.ubuntu.com nl.archive.ubuntu.com it.archive.ubuntu.com ch.archive.ubuntu.com mirrors.xtom.de" + US_MIRRORS="us.archive.ubuntu.com archive.ubuntu.com mirrors.edge.kernel.org mirror.csclub.uwaterloo.ca mirrors.ocf.berkeley.edu mirror.math.princeton.edu" + AP_MIRRORS="au.archive.ubuntu.com jp.archive.ubuntu.com kr.archive.ubuntu.com tw.archive.ubuntu.com mirror.aarnet.edu.au" + else + EU_MIRRORS="ftp.de.debian.org ftp.fr.debian.org ftp.nl.debian.org ftp.uk.debian.org ftp.ch.debian.org ftp.se.debian.org ftp.it.debian.org ftp.fau.de ftp.halifax.rwth-aachen.de debian.mirror.lrz.de mirror.init7.net debian.ethz.ch mirrors.dotsrc.org debian.mirrors.ovh.net" + US_MIRRORS="ftp.us.debian.org ftp.ca.debian.org debian.csail.mit.edu mirrors.ocf.berkeley.edu mirrors.wikimedia.org debian.osuosl.org mirror.cogentco.com" + AP_MIRRORS="ftp.au.debian.org ftp.jp.debian.org ftp.tw.debian.org ftp.kr.debian.org ftp.hk.debian.org ftp.sg.debian.org mirror.aarnet.edu.au mirror.nitc.ac.in" + fi + + TZ=$(cat /etc/timezone 2>/dev/null || echo "UTC") + case "$TZ" in + Europe/*|Arctic/*) REGIONAL="$EU_MIRRORS"; OTHERS="$US_MIRRORS $AP_MIRRORS" ;; + America/*) REGIONAL="$US_MIRRORS"; OTHERS="$EU_MIRRORS $AP_MIRRORS" ;; + Asia/*|Australia/*|Pacific/*) REGIONAL="$AP_MIRRORS"; OTHERS="$EU_MIRRORS $US_MIRRORS" ;; + *) REGIONAL=""; OTHERS="$EU_MIRRORS $US_MIRRORS $AP_MIRRORS" ;; + esac + + echo "Acquire::By-Hash \"no\";" >/etc/apt/apt.conf.d/99no-by-hash + + try_mirrors() { + for src in /etc/apt/sources.list.d/debian.sources /etc/apt/sources.list; do + [ -f "$src" ] && sed -i "s|URIs: http[s]*://[^/]*/|URIs: http://${1}/|g; s|deb http[s]*://[^/]*/|deb http://${1}/|g" "$src" + done + rm -rf /var/lib/apt/lists/* + APT_OUT=$(apt-get update 2>&1) + APT_RC=$? + if echo "$APT_OUT" | grep -qi "hashsum\|hash sum"; then + echo " Mirror $1 failed (hash mismatch)" + return 1 + elif echo "$APT_OUT" | grep -qi "SSL\|certificate"; then + echo " Mirror $1 failed (SSL/certificate error)" + return 1 + elif [ $APT_RC -ne 0 ]; then + echo " Mirror $1 failed (apt-get update error)" + return 1 + elif apt-get install -y $APT_BASE >/dev/null 2>&1; then + echo " CDN set to $1: tests passed" + return 0 + else + echo " Mirror $1 failed (package install error)" + return 1 + fi + } + + scan_reachable() { + local result="" + for m in $1; do + if timeout 2 bash -c "echo >/dev/tcp/$m/80" 2>/dev/null; then + result="$result $m" + fi + done + echo "$result" | xargs + } + + # Phase 1: Scan global mirrors first (independent of local CDN issues) + OTHERS_OK=$(scan_reachable "$OTHERS") + OTHERS_PICK=$(printf "%s\n" $OTHERS_OK | shuf | head -3 | xargs) + + for mirror in $OTHERS_PICK; do + echo " Attempting mirror: $mirror" + try_mirrors "$mirror" && exit 0 + done + + # Phase 2: Try primary mirror + if [ "$DISTRO" = "ubuntu" ]; then + PRIMARY="archive.ubuntu.com" + else + PRIMARY="ftp.debian.org" + fi + if timeout 2 bash -c "echo >/dev/tcp/$PRIMARY/80" 2>/dev/null; then + echo " Attempting mirror: $PRIMARY" + try_mirrors "$PRIMARY" && exit 0 + fi + + # Phase 3: Fall back to regional mirrors + REGIONAL_OK=$(scan_reachable "$REGIONAL") + REGIONAL_PICK=$(printf "%s\n" $REGIONAL_OK | shuf | head -3 | xargs) + + for mirror in $REGIONAL_PICK; do + echo " Attempting mirror: $mirror" + try_mirrors "$mirror" && exit 0 + done + + exit 2 + ' && mirror_exit=0 || mirror_exit=$? + if [[ $mirror_exit -eq 2 ]]; then + msg_warn "Multiple mirrors failed (possible CDN synchronization issue)." + if [[ "$var_os" == "ubuntu" ]]; then + msg_warn "Find Ubuntu mirrors at: https://launchpad.net/ubuntu/+archivemirrors" + else + msg_warn "Find Debian mirrors at: https://www.debian.org/mirror/list" + fi + local custom_mirror="" + while true; do + read -rp " Enter a mirror hostname (or 'skip' to abort): " custom_mirror /dev/null 2>&1 && apt-get install -y ${_base_pkgs} >/dev/null 2>&1 + " && break + msg_warn "Mirror '${custom_mirror}' also failed. Try another or type 'skip'." + done + if [[ "$custom_mirror" == "skip" ]]; then + msg_error "apt-get base packages installation failed" + install_exit_code=1 + fi + elif [[ $mirror_exit -ne 0 ]]; then + msg_error "apt-get base packages installation failed" + install_exit_code=1 + fi } fi @@ -4203,6 +4609,53 @@ EOF' fi fi + # Defense-in-depth: Ensure error handling stays disabled during recovery. + # Some functions (e.g. silent/$STD) unconditionally re-enable set -Eeuo pipefail + # and trap 'error_handler' ERR. If any code path above called such a function, + # the grep/sed pipelines below would trigger error_handler on non-match (exit 1). + set +Eeuo pipefail + trap - ERR + + # --- Exit code 1 subclassification: analyze logs BEFORE telemetry call --- + # Exit code 1 is generic ("General error"). Analyze logs to determine the + # real error category so telemetry gets a useful classification instead of "shell". + local is_oom=false + local is_network_issue=false + local is_apt_issue=false + local is_cmd_not_found=false + local is_disk_full=false + + if [[ $install_exit_code -eq 1 && -f "$combined_log" ]]; then + if grep -qiE 'E: Unable to|E: Package|E: Failed to fetch|dpkg.*error|broken packages|unmet dependencies|dpkg --configure -a' "$combined_log"; then + is_apt_issue=true + fi + if grep -qiE 'Cannot allocate memory|Out of memory|oom-killer|Killed process|JavaScript heap' "$combined_log"; then + is_oom=true + fi + if grep -qiE 'Could not resolve|DNS|Connection refused|Network is unreachable|No route to host|Temporary failure resolving|Failed to fetch' "$combined_log"; then + is_network_issue=true + fi + if grep -qiE ': command not found|No such file or directory.*/s?bin/' "$combined_log"; then + is_cmd_not_found=true + fi + if grep -qiE 'ENOSPC|no space left on device|Disk quota exceeded|errno -28' "$combined_log"; then + is_disk_full=true + fi + fi + + # Set override for categorize_error() so telemetry gets the real category + if [[ "$is_apt_issue" == true ]]; then + export ERROR_CATEGORY_OVERRIDE="dependency" + elif [[ "$is_oom" == true ]]; then + export ERROR_CATEGORY_OVERRIDE="resource" + elif [[ "$is_network_issue" == true ]]; then + export ERROR_CATEGORY_OVERRIDE="network" + elif [[ "$is_disk_full" == true ]]; then + export ERROR_CATEGORY_OVERRIDE="storage" + elif [[ "$is_cmd_not_found" == true ]]; then + export ERROR_CATEGORY_OVERRIDE="dependency" + fi + # Report failure to telemetry API (now with log available on host) # NOTE: Do NOT use msg_info/spinner here — the background spinner process # causes SIGTSTP in non-interactive shells (bash -c "$(curl ...)"), which @@ -4211,13 +4664,6 @@ EOF' post_update_to_api "failed" "$install_exit_code" $STD echo -e "${TAB}${CM:-✔} Failure reported" - # Defense-in-depth: Ensure error handling stays disabled during recovery. - # Some functions (e.g. silent/$STD) unconditionally re-enable set -Eeuo pipefail - # and trap 'error_handler' ERR. If any code path above called such a function, - # the grep/sed pipelines below would trigger error_handler on non-match (exit 1). - set +Eeuo pipefail - trap - ERR - # Show combined log location if [[ -n "$CTID" && -n "${SESSION_ID:-}" ]]; then msg_custom "📋" "${YW}" "Installation log: ${combined_log}" @@ -4246,12 +4692,9 @@ EOF' # Prompt user for cleanup with 60s timeout echo "" - # Detect error type for smart recovery options - local is_oom=false - local is_network_issue=false - local is_apt_issue=false - local is_cmd_not_found=false - local is_disk_full=false + # Extend error detection for non-exit-1 codes (exit 1 was already analyzed above) + # The is_* flags were set above for exit code 1 log analysis; here we add + # exit-code-specific detections for other codes. local error_explanation="" if declare -f explain_exit_code >/dev/null 2>&1; then error_explanation="$(explain_exit_code "$install_exit_code")" @@ -4301,26 +4744,6 @@ EOF' ;; esac - # Exit 1 subclassification: analyze logs to identify actual root cause - # Many exit 1 errors are actually APT, OOM, network, or command-not-found issues - if [[ $install_exit_code -eq 1 && -f "$combined_log" ]]; then - if grep -qiE 'E: Unable to|E: Package|E: Failed to fetch|dpkg.*error|broken packages|unmet dependencies|dpkg --configure -a' "$combined_log"; then - is_apt_issue=true - fi - if grep -qiE 'Cannot allocate memory|Out of memory|oom-killer|Killed process|JavaScript heap' "$combined_log"; then - is_oom=true - fi - if grep -qiE 'Could not resolve|DNS|Connection refused|Network is unreachable|No route to host|Temporary failure resolving|Failed to fetch' "$combined_log"; then - is_network_issue=true - fi - if grep -qiE ': command not found|No such file or directory.*/s?bin/' "$combined_log"; then - is_cmd_not_found=true - fi - if grep -qiE 'ENOSPC|no space left on device|Disk quota exceeded|errno -28' "$combined_log"; then - is_disk_full=true - fi - fi - # Show error explanation if available if [[ -n "$error_explanation" ]]; then echo -e "${TAB}${RD}Error: ${error_explanation}${CL}" @@ -4524,6 +4947,7 @@ EOF' if [[ $apt_retry_code -eq 0 ]]; then msg_ok "Installation completed successfully after APT repair!" + INSTALL_COMPLETE=true post_update_to_api "done" "0" "force" return 0 else @@ -4651,7 +5075,7 @@ EOF' destroy_lxc() { if [[ -z "$CT_ID" ]]; then msg_error "No CT_ID found. Nothing to remove." - return 1 + return 65 fi # Abort on Ctrl-C / Ctrl-D / ESC @@ -4690,12 +5114,12 @@ resolve_storage_preselect() { case "$class" in template) required_content="vztmpl" ;; container) required_content="rootdir" ;; - *) return 1 ;; + *) return 65 ;; esac [[ -z "$preselect" ]] && return 1 if ! pvesm status -content "$required_content" | awk 'NR>1{print $1}' | grep -qx -- "$preselect"; then msg_warn "Preselected storage '${preselect}' does not support content '${required_content}' (or not found)" - return 1 + return 238 fi local line total used free @@ -4748,6 +5172,10 @@ fix_gpu_gids() { pct stop "$CTID" >/dev/null 2>&1 sleep 1 + # Validate GIDs are numeric before sed + [[ "$render_gid" =~ ^[0-9]+$ ]] || render_gid="104" + [[ "$video_gid" =~ ^[0-9]+$ ]] || video_gid="44" + # Update dev entries with correct GIDs sed -i.bak -E "s|(dev[0-9]+: /dev/dri/renderD[0-9]+),gid=[0-9]+|\1,gid=${render_gid}|g" "$LXC_CONFIG" sed -i -E "s|(dev[0-9]+: /dev/dri/card[0-9]+),gid=[0-9]+|\1,gid=${video_gid}|g" "$LXC_CONFIG" @@ -4815,7 +5243,7 @@ select_storage() { ;; *) msg_error "Invalid storage class '$CLASS'" - return 1 + return 65 ;; esac @@ -4897,7 +5325,7 @@ validate_storage_space() { # Check if storage exists and is active if [[ -z "$storage_line" ]]; then [[ "$show_dialog" == "yes" ]] && whiptail --msgbox "⚠️ Warning: Storage '$storage' not found!\n\nThe storage may be unavailable or disabled." 10 60 - return 2 + return 236 fi # Check storage status (column 3) @@ -4905,7 +5333,7 @@ validate_storage_space() { status=$(awk '{print $3}' <<<"$storage_line") if [[ "$status" == "disabled" ]]; then [[ "$show_dialog" == "yes" ]] && whiptail --msgbox "⚠️ Warning: Storage '$storage' is disabled!\n\nPlease enable the storage first." 10 60 - return 2 + return 236 fi # Get storage type and free space (column 6) @@ -4928,7 +5356,7 @@ validate_storage_space() { if [[ "$show_dialog" == "yes" ]]; then whiptail --msgbox "⚠️ Warning: Storage '$storage' may not have enough space!\n\nStorage Type: ${storage_type}\nRequired: ${required_gb}GB\nAvailable: ${free_gb_fmt}\n\nYou can continue, but creation might fail." 14 70 fi - return 1 + return 236 fi return 0 @@ -4965,15 +5393,185 @@ create_lxc_container() { # Extract Debian OS minor from template name: debian-13-standard_13.1-1_amd64.tar.zst => "13.1" parse_template_osver() { sed -n 's/.*_\([0-9][0-9]*\(\.[0-9]\+\)\?\)-.*/\1/p' <<<"$1"; } + # Switch to the previous OS major version template and retry pct create + # Determines the fallback version automatically based on available templates. + # Returns: 0 = success, 1 = failed + fallback_to_previous_os_version() { + local old_template="$TEMPLATE" + local os_type="${PCT_OSTYPE:-}" + local current_ver="${PCT_OSVERSION:-}" + + # Determine template search pattern based on OS type + local tpl_pattern="" + case "$os_type" in + debian | ubuntu) tpl_pattern="-standard_" ;; + alpine | fedora | rocky | centos) tpl_pattern="-default_" ;; + *) tpl_pattern="" ;; + esac + + msg_info "Searching for an older $os_type template (current: $os_type $current_ver)" + + # Collect all available versions for this OS type (local + online) + local -a all_versions=() + + # Local templates + mapfile -t _local_vers < <( + pveam list "$TEMPLATE_STORAGE" 2>/dev/null | + awk -v os="$os_type" -v pat="$tpl_pattern" '$1 ~ ("^"os"|/"os) && $1 ~ pat {print $1}' | + sed 's|.*/||' | + sed -E "s/^${os_type}-([0-9]+(\.[0-9]+)?).*/\1/" | + sort -u -V + ) + all_versions+=("${_local_vers[@]}") + + # Online templates (only if needed) + if command -v timeout &>/dev/null; then + timeout 30 pveam update >/dev/null 2>&1 || true + else + pveam update >/dev/null 2>&1 || true + fi + mapfile -t _online_vers < <( + pveam available -section system 2>/dev/null | + awk '{print $2}' | + grep -E "^${os_type}-[0-9]" | + { [[ -n "$tpl_pattern" ]] && grep "$tpl_pattern" || cat; } | + sed -E "s/^${os_type}-([0-9]+(\.[0-9]+)?).*/\1/" | + sort -u -V 2>/dev/null || true + ) + all_versions+=("${_online_vers[@]}") + + # Deduplicate and sort, find the highest version below current + local fallback_ver="" + fallback_ver=$(printf '%s\n' "${all_versions[@]}" | sort -u -V | awk -v cur="$current_ver" '{ + # Compare major versions: extract major part + split($0, a, ".") + split(cur, b, ".") + if (a[1]+0 < b[1]+0) ver=$0 + } END { if (ver) print ver }') + + if [[ -z "$fallback_ver" ]]; then + msg_error "No older $os_type template version found." + return 1 + fi + + msg_ok "Fallback version: $os_type $fallback_ver" + + # Find the actual template file for this version + local fallback_search="${os_type}-${fallback_ver}" + local fallback_template="" + + # Check local first + mapfile -t _fb_local < <( + pveam list "$TEMPLATE_STORAGE" 2>/dev/null | + awk -v search="$fallback_search" -v pat="$tpl_pattern" '$1 ~ search && $1 ~ pat {print $1}' | + sed 's|.*/||' | sort -t - -k 2 -V + ) + if [[ ${#_fb_local[@]} -gt 0 ]]; then + fallback_template="${_fb_local[-1]}" + else + # Check online + mapfile -t _fb_online < <( + pveam available -section system 2>/dev/null | + awk '{print $2}' | + grep -E "^${fallback_search}.*${tpl_pattern}" | + sort -t - -k 2 -V 2>/dev/null || true + ) + [[ ${#_fb_online[@]} -gt 0 ]] && fallback_template="${_fb_online[-1]}" + fi + + if [[ -z "$fallback_template" ]]; then + msg_error "No template found for $os_type $fallback_ver." + return 1 + fi + + msg_ok "Found template: $fallback_template" + + # Download if needed + local fallback_path + fallback_path="$(pvesm path "$TEMPLATE_STORAGE:vztmpl/$fallback_template" 2>/dev/null || true)" + [[ -z "$fallback_path" ]] && fallback_path="/var/lib/vz/template/cache/$fallback_template" + + if [[ ! -f "$fallback_path" ]]; then + msg_info "Downloading $os_type $fallback_ver template" + if ! pveam download "$TEMPLATE_STORAGE" "$fallback_template" >>"${BUILD_LOG:-/dev/null}" 2>&1; then + msg_error "Failed to download $os_type $fallback_ver template." + return 1 + fi + msg_ok "Template downloaded" + fi + + # Update variables + TEMPLATE="$fallback_template" + TEMPLATE_PATH="$fallback_path" + PCT_OSVERSION="$fallback_ver" + export PCT_OSVERSION + + # Retry pct create + msg_info "Retrying container creation with $os_type $fallback_ver" + if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then + msg_ok "Container created successfully with $os_type $fallback_ver (fallback from $old_template)." + return 0 + else + msg_error "Container creation with $os_type $fallback_ver also failed. See $LOGFILE" + return 1 + fi + } + # Offer upgrade for pve-container/lxc-pve if candidate > installed; optional auto-retry pct create # Returns: - # 0 = no upgrade needed + # 0 = no upgrade needed / container created after upgrade or explicit fallback # 1 = upgraded (and if do_retry=yes and retry succeeded, creation done) - # 2 = user declined + # 2 = user chose ignore # 3 = upgrade attempted but failed OR retry failed + # 4 = user cancelled offer_lxc_stack_upgrade_and_maybe_retry() { local do_retry="${1:-no}" # yes|no local _pvec_i _pvec_c _lxcp_i _lxcp_c need=0 + local _ans + + has_previous_os_version_template() { + local os_type="${PCT_OSTYPE:-}" + local current_ver="${PCT_OSVERSION:-}" + local tpl_pattern="${TEMPLATE_PATTERN:-${TEMPLATE:-}}" + local -a all_versions=() + + [[ -n "$os_type" && -n "$current_ver" ]] || return 1 + + mapfile -t _local_vers < <( + pveam list "$TEMPLATE_STORAGE" 2>/dev/null | + awk '{print $1}' | + sed 's|.*/||' | + grep -E "^${os_type}-[0-9]" | + { [[ -n "$tpl_pattern" ]] && grep "$tpl_pattern" || cat; } | + sed -E "s/^${os_type}-([0-9]+(\.[0-9]+)?).*/\1/" | + sort -u -V + ) + all_versions+=("${_local_vers[@]}") + + if command -v timeout &>/dev/null; then + timeout 30 pveam update >/dev/null 2>&1 || true + else + pveam update >/dev/null 2>&1 || true + fi + mapfile -t _online_vers < <( + pveam available -section system 2>/dev/null | + awk '{print $2}' | + grep -E "^${os_type}-[0-9]" | + { [[ -n "$tpl_pattern" ]] && grep "$tpl_pattern" || cat; } | + sed -E "s/^${os_type}-([0-9]+(\.[0-9]+)?).*/\1/" | + sort -u -V 2>/dev/null || true + ) + all_versions+=("${_online_vers[@]}") + + printf '%s\n' "${all_versions[@]}" | sort -u -V | awk -v cur="$current_ver" ' + { + split($0, a, ".") + split(cur, b, ".") + if (a[1]+0 < b[1]+0) found=1 + } + END { exit found ? 0 : 1 } + ' + } _pvec_i="$(pkg_ver pve-container)" _lxcp_i="$(pkg_ver lxc-pve)" @@ -4991,17 +5589,81 @@ create_lxc_container() { return 0 fi - msg_info "An update for the Proxmox LXC stack is available" + msg_warn "An update for the Proxmox LXC stack is available" echo " pve-container: installed=${_pvec_i:-n/a} candidate=${_pvec_c:-n/a}" echo " lxc-pve : installed=${_lxcp_i:-n/a} candidate=${_lxcp_c:-n/a}" + echo " note : option 1 runs host apt update + apt upgrade" echo - read -rp "Do you want to upgrade now? [y/N] " _ans /dev/null; then + msg_error "LXC stack upgrade caused PVE tool breakage (likely Perl module incompatibility)." + msg_custom "⚠️" "${YW}" "A partial package upgrade has left the PVE stack in an inconsistent state." + msg_custom "🔧" "${YW}" "Please run the following on the Proxmox host, then retry:" + echo -e "${TAB} apt update && apt upgrade -y" + echo -e "${TAB} reboot" + return 3 + fi if [[ "$do_retry" == "yes" ]]; then msg_info "Retrying container creation after upgrade" if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then @@ -5110,9 +5772,10 @@ create_lxc_container() { exit 205 } if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then - unset CTID - msg_error "Cannot use ID that is already in use." - exit 206 + msg_warn "Container/VM ID $CTID is already in use (detected late). Reassigning..." + CTID=$(get_valid_container_id "$((CTID + 1))") + export CTID + msg_ok "Reassigned to container ID $CTID" fi # Report installation start to API early - captures failures in storage/template/create @@ -5165,10 +5828,35 @@ create_lxc_container() { fi msg_info "Validating storage '$CONTAINER_STORAGE'" - STORAGE_TYPE=$(grep -E "^[^:]+: $CONTAINER_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1 | head -1) + # Check if storage.cfg is accessible (pmxcfs must be mounted) + if [[ ! -f /etc/pve/storage.cfg ]]; then + if ! mountpoint -q /etc/pve 2>/dev/null; then + msg_error "Proxmox cluster filesystem (pmxcfs) is not mounted at /etc/pve." + msg_custom "🔧" "${YW}" "Try: systemctl restart pve-cluster" + else + msg_error "/etc/pve/storage.cfg does not exist." + msg_custom "🔧" "${YW}" "Check Proxmox cluster filesystem integrity: pvecm status" + fi + exit 213 + fi + + STORAGE_TYPE=$(grep -E "^[^:]+:[[:space:]]*$CONTAINER_STORAGE[[:space:]]*$" /etc/pve/storage.cfg | cut -d: -f1 | head -1 || true) + + # Fallback: use pvesm status to determine storage type + if [[ -z "$STORAGE_TYPE" ]]; then + STORAGE_TYPE=$(pvesm status -storage "$CONTAINER_STORAGE" 2>/dev/null | awk 'NR>1{print $2}') + fi if [[ -z "$STORAGE_TYPE" ]]; then msg_error "Storage '$CONTAINER_STORAGE' not found in /etc/pve/storage.cfg" + msg_custom "📋" "${YW}" "Available storages: $(pvesm status 2>/dev/null | awk 'NR>1{printf "%s (%s) ", $1, $2}' || echo 'n/a')" + if [[ -r /etc/pve/storage.cfg ]]; then + msg_custom "📋" "${YW}" "Storage definitions found in config:" + grep -E '^[a-z]+:' /etc/pve/storage.cfg 2>/dev/null | while IFS= read -r _line; do + echo "${TAB} $_line" + done + fi + msg_custom "📖" "${YW}" "See https://pve.proxmox.com/wiki/Storage for storage configuration details." exit 213 fi @@ -5199,12 +5887,16 @@ create_lxc_container() { if ! pvesm status -content rootdir 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$CONTAINER_STORAGE"; then msg_error "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) does not support 'rootdir' content." + msg_custom "💡" "${YW}" "Enable 'Disk image' (rootdir) for storage '${CONTAINER_STORAGE}' in:" + msg_custom " " "${YW}" "Datacenter → Storage → ${CONTAINER_STORAGE} → Edit → Content" + msg_custom "📖" "${YW}" "See: https://pve.proxmox.com/wiki/Storage" + msg_custom "🔗" "${YW}" "Help: https://github.com/community-scripts/ProxmoxVE/discussions" exit 213 fi msg_ok "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) validated" msg_info "Validating template storage '$TEMPLATE_STORAGE'" - TEMPLATE_TYPE=$(grep -E "^[^:]+: $TEMPLATE_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1) + TEMPLATE_TYPE=$(grep -E "^[^:]+: $TEMPLATE_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1 || true) if ! pvesm status -content vztmpl 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$TEMPLATE_STORAGE"; then msg_warn "Template storage '$TEMPLATE_STORAGE' may not support 'vztmpl'" @@ -5614,37 +6306,103 @@ create_lxc_container() { msg_debug "Logfile: $LOGFILE" # First attempt (PCT_OPTIONS is a multi-line string, use it directly) + # Disable globbing: unquoted $PCT_OPTIONS needs word-splitting but must not glob-expand + # (e.g. passwords containing * or ? would match filenames otherwise) + set -f if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >"$LOGFILE" 2>&1; then msg_debug "Container creation failed on ${TEMPLATE_STORAGE}. Checking error..." - # Check if template issue - retry with fresh download - if grep -qiE 'unable to open|corrupt|invalid' "$LOGFILE"; then - msg_info "Template may be corrupted - re-downloading" - rm -f "$TEMPLATE_PATH" - download_template - msg_ok "Template re-downloaded" + # Check for Perl module breakage (partial PVE upgrade) + if grep -qiE 'Compilation failed|Bareword.*not allowed' "$LOGFILE"; then + msg_error "Container creation failed due to broken Perl modules on the PVE host." + msg_custom "⚠️" "${YW}" "This usually happens after a partial PVE package upgrade." + msg_custom "🔧" "${YW}" "Please run the following on the Proxmox host, then retry:" + echo -e "${TAB} apt update && apt dist-upgrade -y" + echo -e "${TAB} reboot" + _flush_pct_log + exit 232 fi - # Retry after repair - if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then - # Fallback to local storage if not already on local - if [[ "$TEMPLATE_STORAGE" != "local" ]]; then - msg_info "Retrying container creation with fallback to local storage" - LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" - if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then - msg_ok "Trying local storage fallback" - msg_info "Downloading template to local" - if [[ "$ARCH" == "arm64" ]]; then - download_arm64_template "$LOCAL_TEMPLATE_PATH" + # Check if CTID collision (race condition: ID claimed between validation and creation) + if grep -qiE 'already exists|already in use' "$LOGFILE"; then + local old_ctid="$CTID" + CTID=$(get_valid_container_id "$((CTID + 1))") + export CTID + msg_warn "Container ID $old_ctid was claimed by another process. Retrying with ID $CTID" + LOGFILE="/tmp/pct_create_${CTID}_$(date +%Y%m%d_%H%M%S)_${SESSION_ID}.log" + if pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >"$LOGFILE" 2>&1; then + msg_ok "Container successfully created with new ID $CTID" + else + msg_error "Container creation failed even with new ID $CTID. See $LOGFILE" + _flush_pct_log + exit 209 + fi + else + # Not a CTID collision - check if template issue and retry with fresh download + if grep -qiE 'unable to open|corrupt|invalid' "$LOGFILE"; then + msg_info "Template may be corrupted - re-downloading" + rm -f "$TEMPLATE_PATH" + download_template + msg_ok "Template re-downloaded" + fi + + # Retry after repair + if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then + # Fallback to local storage if not already on local + if [[ "$TEMPLATE_STORAGE" != "local" ]]; then + msg_info "Retrying container creation with fallback to local storage" + LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE" + if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then + msg_ok "Trying local storage fallback" + msg_info "Downloading template to local" + if [[ "$ARCH" == "arm64" ]]; then + download_arm64_template "$LOCAL_TEMPLATE_PATH" + else + pveam download local "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1 + fi + msg_ok "Template downloaded to local" else - pveam download local "$TEMPLATE" >>"${BUILD_LOG:-/dev/null}" 2>&1 + msg_ok "Trying local storage fallback" + fi + if ! pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then + # Local fallback also failed - check for LXC stack version issue + if grep -qiE 'unsupported .* version' "$LOGFILE"; then + msg_warn "pct reported 'unsupported version' – LXC stack might be too old for this template" + offer_lxc_stack_upgrade_and_maybe_retry "yes" + rc=$? + case $rc in + 0) : ;; # success - container created, continue + 2) + msg_error "LXC stack upgrade ignored. Please inspect: $LOGFILE" + _flush_pct_log + exit 231 + ;; + 3) + msg_error "LXC stack upgrade failed. Please inspect: $LOGFILE" + _flush_pct_log + exit 231 + ;; + 4) + msg_error "Cancelled by user." + _flush_pct_log + exit 231 + ;; + esac + else + msg_error "Container creation failed. See $LOGFILE" + if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then + set -x + pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS 2>&1 | tee -a "$LOGFILE" + set +x + fi + _flush_pct_log + exit 209 + fi + else + msg_ok "Container successfully created using local fallback." fi - msg_ok "Template downloaded to local" else - msg_ok "Trying local storage fallback" - fi - if ! pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then - # Local fallback also failed - check for LXC stack version issue + # Already on local storage and still failed - check LXC stack version if grep -qiE 'unsupported .* version' "$LOGFILE"; then msg_warn "pct reported 'unsupported version' – LXC stack might be too old for this template" offer_lxc_stack_upgrade_and_maybe_retry "yes" @@ -5652,12 +6410,17 @@ create_lxc_container() { case $rc in 0) : ;; # success - container created, continue 2) - msg_error "Upgrade declined. Please update and re-run: apt update && apt install --only-upgrade pve-container lxc-pve" + msg_error "LXC stack upgrade ignored. Please inspect: $LOGFILE" _flush_pct_log exit 231 ;; 3) - msg_error "Upgrade and/or retry failed. Please inspect: $LOGFILE" + msg_error "LXC stack upgrade failed. Please inspect: $LOGFILE" + _flush_pct_log + exit 231 + ;; + 4) + msg_error "Cancelled by user." _flush_pct_log exit 231 ;; @@ -5672,50 +6435,29 @@ create_lxc_container() { _flush_pct_log exit 209 fi - else - msg_ok "Container successfully created using local fallback." fi else - # Already on local storage and still failed - check LXC stack version - if grep -qiE 'unsupported .* version' "$LOGFILE"; then - msg_warn "pct reported 'unsupported version' – LXC stack might be too old for this template" - offer_lxc_stack_upgrade_and_maybe_retry "yes" - rc=$? - case $rc in - 0) : ;; # success - container created, continue - 2) - msg_error "Upgrade declined. Please update and re-run: apt update && apt install --only-upgrade pve-container lxc-pve" - _flush_pct_log - exit 231 - ;; - 3) - msg_error "Upgrade and/or retry failed. Please inspect: $LOGFILE" - _flush_pct_log - exit 231 - ;; - esac - else - msg_error "Container creation failed. See $LOGFILE" - if whiptail --yesno "pct create failed.\nDo you want to enable verbose debug mode and view detailed logs?" 12 70; then - set -x - pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS 2>&1 | tee -a "$LOGFILE" - set +x - fi - _flush_pct_log - exit 209 - fi + msg_ok "Container successfully created after template repair." fi - else - msg_ok "Container successfully created after template repair." - fi + fi # close CTID collision else-branch fi + set +f # re-enable globbing after pct create block - # Verify container exists - pct list | awk '{print $1}' | grep -qx "$CTID" || { - msg_error "Container ID $CTID not listed in 'pct list'. See $LOGFILE" + # Verify container exists (allow up to 10s for pmxcfs sync in clusters) + local _pct_visible=false + for _pct_check in {1..10}; do + if pct list | awk '{print $1}' | grep -qx "$CTID"; then + _pct_visible=true + break + fi + sleep 1 + done + if [[ "$_pct_visible" != true ]]; then + msg_error "Container ID $CTID not listed in 'pct list' after 10s. See $LOGFILE" + msg_custom "🔧" "${YW}" "This can happen in clusters with pmxcfs sync delays." _flush_pct_log exit 215 - } + fi # Verify config rootfs grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf" || { @@ -5755,20 +6497,32 @@ create_lxc_container() { # ------------------------------------------------------------------------------ description() { IP=$(pct exec "$CTID" ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1) + local script_slug script_url donate_url + + script_slug="${SCRIPT_SLUG:-${NSAPP}}" + script_slug="$(echo "$script_slug" | tr '[:upper:]' '[:lower:]' | tr ' ' '-')" + script_url="https://community-scripts.org/scripts/${script_slug}" + donate_url="https://community-scripts.org/donate" # Generate LXC Description DESCRIPTION=$( cat < - + Logo

${APP} LXC

- - spend Coffee + + Sponsoring and donations + +

+ +

+ + Open script page

@@ -5793,6 +6547,41 @@ EOF systemctl start ping-instances.service fi + # Optional host-side post-install hook + # Path comes from var_post_install (default.vars / app.vars / advanced settings). + # Runs ON THE PROXMOX HOST after the container is up and configured. + # Exposed env vars: APP, NSAPP, CTID, IP, HN, STORAGE, BRG. + # Output (stdout/stderr) is captured to /var/log/community-scripts/post-install-.log + if [[ -n "${var_post_install:-}" ]]; then + local _hook_log_dir="/var/log/community-scripts" + local _hook_log="${_hook_log_dir}/post-install-${CTID}.log" + mkdir -p "$_hook_log_dir" 2>/dev/null || true + + if [[ ! -f "${var_post_install}" ]]; then + msg_error "Post-install hook not found on host: ${var_post_install}" + whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "POST-INSTALL HOOK FAILED" \ + --msgbox "The configured post-install hook was not found on the Proxmox host:\n\n${var_post_install}\n\nThe LXC was created successfully, but the hook did NOT run." 14 72 || true + else + msg_info "Running post-install hook: ${var_post_install}" + local _hook_rc=0 + APP="$APP" NSAPP="${NSAPP:-}" CTID="$CTID" IP="$IP" HN="${HN:-}" \ + STORAGE="${STORAGE:-}" BRG="${BRG:-}" \ + bash "${var_post_install}" >"${_hook_log}" 2>&1 || _hook_rc=$? + if [[ $_hook_rc -eq 0 ]]; then + msg_ok "Post-install hook completed (log: ${_hook_log})" + else + msg_error "Post-install hook failed (rc=${_hook_rc}) – see ${_hook_log}" + local _hook_tail="" + _hook_tail="$(tail -n 15 "${_hook_log}" 2>/dev/null || true)" + whiptail --backtitle "Proxmox VE Helper Scripts" \ + --title "POST-INSTALL HOOK FAILED" \ + --msgbox "Hook exited with code ${_hook_rc}.\n\nScript: ${var_post_install}\nLog: ${_hook_log}\n\n--- Last log lines ---\n${_hook_tail}\n\nThe LXC itself was created successfully." 22 78 || true + fi + fi + fi + + INSTALL_COMPLETE=true post_update_to_api "done" "none" } diff --git a/misc/cloud-init.func b/misc/cloud-init.func index 0c8597f9b..9e97e4d9f 100644 --- a/misc/cloud-init.func +++ b/misc/cloud-init.func @@ -319,11 +319,11 @@ function setup_cloud_init() { if [ "$network_mode" = "static" ]; then if [ -n "$static_ip" ] && ! validate_ip_cidr "$static_ip"; then _ci_msg_error "Invalid static IP format: $static_ip (expected: x.x.x.x/xx)" - return 1 + return 65 fi if [ -n "$gateway" ] && ! validate_ip "$gateway"; then _ci_msg_error "Invalid gateway IP format: $gateway" - return 1 + return 65 fi fi @@ -433,7 +433,7 @@ function configure_cloud_init_interactive() { if ! command -v whiptail >/dev/null 2>&1; then echo "Warning: whiptail not available, skipping interactive configuration" export CLOUDINIT_ENABLE="no" - return 1 + return 127 fi # Ask if user wants to enable Cloud-Init @@ -603,7 +603,7 @@ function get_vm_ip() { elapsed=$((elapsed + 2)) done - return 1 + return 7 } # ------------------------------------------------------------------------------ @@ -621,7 +621,7 @@ function wait_for_cloud_init() { if [ -z "$vm_ip" ]; then _ci_msg_warn "Unable to determine VM IP address" - return 1 + return 7 fi _ci_msg_info "Waiting for Cloud-Init to complete on ${vm_ip}" @@ -638,7 +638,7 @@ function wait_for_cloud_init() { done _ci_msg_warn "Cloud-Init did not complete within ${timeout}s" - return 1 + return 150 } # ============================================================================== diff --git a/misc/core.func b/misc/core.func index 2c991b3e3..be9a65955 100644 --- a/misc/core.func +++ b/misc/core.func @@ -143,7 +143,7 @@ ensure_profile_loaded() { # Source all profile.d scripts to ensure PATH is complete if [[ -d /etc/profile.d ]]; then for script in /etc/profile.d/*.sh; do - [[ -r "$script" ]] && source "$script" + [[ -r "$script" ]] && source "$script" || true done fi @@ -533,29 +533,23 @@ silent() { fi if [[ $rc -ne 0 ]]; then - # Source explain_exit_code if needed - if ! declare -f explain_exit_code >/dev/null 2>&1; then - if ! source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func); then - explain_exit_code() { echo "unknown (error_handler.func download failed)"; } - fi - fi + # Return instead of exit so that callers can use `$STD cmd || true` + # or `if $STD cmd; then ...` to handle errors gracefully. + # When no || / if is used, set -e + ERR trap will still catch it + # and error_handler() will display the error and exit. + # + # Set flag so error_handler knows to show log tail from silent's logfile + export _SILENT_FAILED_RC="$rc" + export _SILENT_FAILED_CMD="$cmd" + export _SILENT_FAILED_LINE="$caller_line" + export _SILENT_FAILED_LOG="$logfile" - local explanation - explanation="$(explain_exit_code "$rc")" - - printf "\e[?25h" - msg_error "in line ${caller_line}: exit code ${rc} (${explanation})" - msg_custom "→" "${YWB}" "${cmd}" - - if [[ -s "$logfile" ]]; then - echo -e "\n${TAB}--- Last 20 lines of log ---" - tail -n 20 "$logfile" - echo -e "${TAB}-----------------------------------" - echo -e "${TAB}📋 Full log: ${logfile}\n" - fi - - exit "$rc" + return "$rc" fi + + # Clear stale flags on success (prevents false positives if a previous + # $STD cmd || true failed and a later non-silent command triggers error_handler) + unset _SILENT_FAILED_RC _SILENT_FAILED_CMD _SILENT_FAILED_LINE _SILENT_FAILED_LOG 2>/dev/null || true } # ------------------------------------------------------------------------------ @@ -864,7 +858,7 @@ get_header() { if [ ! -s "$local_header_path" ]; then if ! curl -fsSL "$header_url" -o "$local_header_path"; then msg_warn "Failed to download header: $header_url" - return 1 + return 250 fi fi @@ -880,6 +874,12 @@ get_header() { # - Returns silently if header not available # ------------------------------------------------------------------------------ header_info() { + # Guard against printing the header twice in the same session (e.g. when + # the ct script calls header_info at global scope AND again inside + # update_script()). + [[ "${_HEADER_SHOWN:-0}" == "1" ]] && return 0 + _HEADER_SHOWN=1 + local app_name=$(echo "${APP,,}" | tr -d ' ') local header_content @@ -1364,7 +1364,7 @@ prompt_select() { if [[ $num_options -eq 0 ]]; then msg_warn "prompt_select called with no options" echo "" >&2 - return 1 + return 65 fi # Validate default @@ -1606,7 +1606,7 @@ check_or_create_swap() { swap_size_mb=$(prompt_input "Enter swap size in MB (e.g., 2048 for 2GB):" "2048" 60) if ! [[ "$swap_size_mb" =~ ^[0-9]+$ ]]; then msg_error "Invalid swap size: '${swap_size_mb}' (must be a number in MB)" - return 1 + return 65 fi local swap_file="/swapfile" @@ -1614,19 +1614,19 @@ check_or_create_swap() { msg_info "Creating ${swap_size_mb}MB swap file at $swap_file" if ! dd if=/dev/zero of="$swap_file" bs=1M count="$swap_size_mb" status=progress; then msg_error "Failed to allocate swap file (dd failed)" - return 1 + return 150 fi if ! chmod 600 "$swap_file"; then msg_error "Failed to set permissions on $swap_file" - return 1 + return 150 fi if ! mkswap "$swap_file"; then msg_error "Failed to format swap file (mkswap failed)" - return 1 + return 150 fi if ! swapon "$swap_file"; then msg_error "Failed to activate swap (swapon failed)" - return 1 + return 150 fi msg_ok "Swap file created and activated successfully" } @@ -1705,13 +1705,13 @@ function get_lxc_ip() { fi done - return 1 + return 6 } LOCAL_IP="$(get_current_ip || true)" if [[ -z "$LOCAL_IP" ]]; then msg_error "Could not determine LOCAL_IP (checked: eth0, hostname -I, ip route, IPv6 targets)" - return 1 + return 6 fi fi diff --git a/misc/error_handler.func b/misc/error_handler.func index e547a1573..09cd8c25c 100644 --- a/misc/error_handler.func +++ b/misc/error_handler.func @@ -236,6 +236,16 @@ error_handler() { command="${command//\$STD/}" + # If error originated from silent(), use its captured metadata + # This provides the actual command and line number instead of "silent ..." + if [[ -n "${_SILENT_FAILED_RC:-}" ]]; then + exit_code="$_SILENT_FAILED_RC" + command="$_SILENT_FAILED_CMD" + line_number="$_SILENT_FAILED_LINE" + # Clear flags to prevent stale data on subsequent errors + unset _SILENT_FAILED_RC _SILENT_FAILED_CMD _SILENT_FAILED_LINE + fi + if [[ "$exit_code" -eq 0 ]]; then return 0 fi @@ -279,8 +289,12 @@ error_handler() { fi # Get active log file (BUILD_LOG or INSTALL_LOG) + # Prefer silent()'s logfile when available (contains the actual command output) local active_log="" - if declare -f get_active_logfile >/dev/null 2>&1; then + if [[ -n "${_SILENT_FAILED_LOG:-}" && -s "${_SILENT_FAILED_LOG}" ]]; then + active_log="$_SILENT_FAILED_LOG" + unset _SILENT_FAILED_LOG + elif declare -f get_active_logfile >/dev/null 2>&1; then active_log="$(get_active_logfile)" elif [[ -n "${SILENT_LOGFILE:-}" ]]; then active_log="$SILENT_LOGFILE" @@ -299,6 +313,51 @@ error_handler() { echo -e "${TAB}-----------------------------------\n" fi + # Detect probable Node.js heap OOM and print actionable guidance. + # This avoids generic SIGABRT/SIGKILL confusion for frontend build failures. + local node_oom_detected="false" + local node_build_context="false" + if [[ "$command" =~ (npm|pnpm|yarn|node|vite|turbo) ]]; then + node_build_context="true" + fi + if [[ "$exit_code" == "243" ]]; then + node_oom_detected="true" + elif [[ -n "$active_log" && -s "$active_log" ]]; then + if tail -n 200 "$active_log" 2>/dev/null | grep -Eqi 'Reached heap limit|JavaScript heap out of memory|Allocation failed - JavaScript heap out of memory|FATAL ERROR: Reached heap limit'; then + node_oom_detected="true" + fi + fi + + if [[ "$node_oom_detected" == "true" ]] || { [[ "$node_build_context" == "true" ]] && [[ "$exit_code" =~ ^(134|137)$ ]]; }; then + local heap_hint_mb="" + + # If explicitly configured, prefer the current value for troubleshooting output. + if [[ -n "${NODE_OPTIONS:-}" ]] && [[ "${NODE_OPTIONS}" =~ max-old-space-size=([0-9]+) ]]; then + heap_hint_mb="${BASH_REMATCH[1]}" + elif [[ -n "${var_ram:-}" ]] && [[ "${var_ram}" =~ ^[0-9]+$ ]]; then + heap_hint_mb=$((var_ram * 75 / 100)) + else + local mem_kb="" + mem_kb=$(awk '/^MemTotal:/ {print $2; exit}' /proc/meminfo 2>/dev/null || echo "") + if [[ "$mem_kb" =~ ^[0-9]+$ ]]; then + local mem_mb=$((mem_kb / 1024)) + heap_hint_mb=$((mem_mb * 75 / 100)) + fi + fi + + if [[ -z "$heap_hint_mb" ]] || ((heap_hint_mb < 1024)); then + heap_hint_mb=1024 + elif ((heap_hint_mb > 12288)); then + heap_hint_mb=12288 + fi + + if declare -f msg_warn >/dev/null 2>&1; then + msg_warn "Possible Node.js heap OOM. Try: export NODE_OPTIONS=\"--max-old-space-size=${heap_hint_mb}\" and rerun the build." + else + echo -e "${YW}Possible Node.js heap OOM. Try: export NODE_OPTIONS=\"--max-old-space-size=${heap_hint_mb}\" and rerun the build.${CL}" + fi + fi + # Detect context: Container (INSTALL_LOG set + inside container /root) vs Host if [[ -n "${INSTALL_LOG:-}" && -f "${INSTALL_LOG:-}" && -d /root ]]; then # CONTAINER CONTEXT: Copy log and create flag file for host @@ -507,14 +566,23 @@ _stop_container_if_installing() { on_exit() { local exit_code=$? - # Report orphaned "installing" records to telemetry API - # Catches ALL exit paths: errors, signals, AND clean exits where - # post_to_api was called but post_update_to_api was never called - if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then - if [[ $exit_code -ne 0 ]]; then - _send_abort_telemetry "$exit_code" - elif declare -f post_update_to_api >/dev/null 2>&1; then - post_update_to_api "done" "0" 2>/dev/null || true + # Report orphaned telemetry records + # Two scenarios handled: + # 1. POST_TO_API_DONE=true but POST_UPDATE_DONE=false: Record was created but + # never got a final status update → send abort/done now. + # 2. POST_TO_API_DONE=false but DIAGNOSTICS=yes: Initial post failed (server + # unreachable/timeout), but the server has fallback create-on-update logic, + # so a status update can still create the record. Worth one last try. + if [[ "${POST_UPDATE_DONE:-}" != "true" ]]; then + if [[ "${POST_TO_API_DONE:-}" == "true" || "${DIAGNOSTICS:-no}" == "yes" ]]; then + if [[ $exit_code -ne 0 ]]; then + _send_abort_telemetry "$exit_code" + elif [[ "${INSTALL_COMPLETE:-}" == "true" ]] && declare -f post_update_to_api >/dev/null 2>&1; then + # Only report success if the install was explicitly marked complete. + # Without this guard, early bailouts (e.g. user cancelled) with exit 0 + # would be falsely reported as successful installations. + post_update_to_api "done" "0" 2>/dev/null || true + fi fi fi diff --git a/misc/install.func b/misc/install.func index 94f005b26..a1a1e0ff3 100644 --- a/misc/install.func +++ b/misc/install.func @@ -210,6 +210,173 @@ network_check() { # SECTION 3: OS UPDATE & PACKAGE MANAGEMENT # ============================================================================== +# ------------------------------------------------------------------------------ +# apt_update_safe() +# +# - Runs apt-get update with CDN mirror fallback +# - On failure, detects distro (Debian/Ubuntu) and tries alternate mirrors +# - Three-phase approach: global mirrors → primary mirror → regional mirrors +# - Falls back to manual user prompt if all auto mirrors fail +# - Detects hash mismatch, SSL errors, and generic apt failures +# ------------------------------------------------------------------------------ +apt_update_safe() { + if $STD apt-get update; then + return 0 + fi + + local failed_mirror + failed_mirror=$(grep -m1 -oP '(?<=URIs: https?://)[^/]+' /etc/apt/sources.list.d/debian.sources 2>/dev/null || grep -m1 -oP '(?<=deb https?://)[^/]+' /etc/apt/sources.list 2>/dev/null || echo "unknown") + msg_warn "apt-get update failed (${failed_mirror}), trying alternate mirrors..." + + local distro + distro=$(. /etc/os-release 2>/dev/null && echo "$ID" || echo "debian") + + local eu_mirrors us_mirrors ap_mirrors + if [[ "$distro" == "ubuntu" ]]; then + eu_mirrors="de.archive.ubuntu.com fr.archive.ubuntu.com se.archive.ubuntu.com nl.archive.ubuntu.com it.archive.ubuntu.com ch.archive.ubuntu.com mirrors.xtom.de" + us_mirrors="us.archive.ubuntu.com archive.ubuntu.com mirrors.edge.kernel.org mirror.csclub.uwaterloo.ca mirrors.ocf.berkeley.edu mirror.math.princeton.edu" + ap_mirrors="au.archive.ubuntu.com jp.archive.ubuntu.com kr.archive.ubuntu.com tw.archive.ubuntu.com mirror.aarnet.edu.au" + else + eu_mirrors="ftp.de.debian.org ftp.fr.debian.org ftp.nl.debian.org ftp.uk.debian.org ftp.ch.debian.org ftp.se.debian.org ftp.it.debian.org ftp.fau.de ftp.halifax.rwth-aachen.de debian.mirror.lrz.de mirror.init7.net debian.ethz.ch mirrors.dotsrc.org debian.mirrors.ovh.net" + us_mirrors="ftp.us.debian.org ftp.ca.debian.org debian.csail.mit.edu mirrors.ocf.berkeley.edu mirrors.wikimedia.org debian.osuosl.org mirror.cogentco.com" + ap_mirrors="ftp.au.debian.org ftp.jp.debian.org ftp.tw.debian.org ftp.kr.debian.org ftp.hk.debian.org ftp.sg.debian.org mirror.aarnet.edu.au mirror.nitc.ac.in" + fi + + local tz regional others + tz=$(cat /etc/timezone 2>/dev/null || echo "UTC") + case "$tz" in + Europe/* | Arctic/*) + regional="$eu_mirrors" + others="$us_mirrors $ap_mirrors" + ;; + America/*) + regional="$us_mirrors" + others="$eu_mirrors $ap_mirrors" + ;; + Asia/* | Australia/* | Pacific/*) + regional="$ap_mirrors" + others="$eu_mirrors $us_mirrors" + ;; + *) + regional="" + others="$eu_mirrors $us_mirrors $ap_mirrors" + ;; + esac + + echo 'Acquire::By-Hash "no";' >/etc/apt/apt.conf.d/99no-by-hash + + _try_apt_mirror() { + local m=$1 + for src in /etc/apt/sources.list.d/debian.sources /etc/apt/sources.list; do + [[ -f "$src" ]] && sed -i "s|URIs: http[s]*://[^/]*/|URIs: http://${m}/|g; s|deb http[s]*://[^/]*/|deb http://${m}/|g" "$src" + done + rm -rf /var/lib/apt/lists/* + local out + out=$(apt-get update 2>&1) + if echo "$out" | grep -qi "hashsum\|hash sum"; then + msg_warn "Mirror ${m} failed (hash mismatch)" + return 1 + elif echo "$out" | grep -qi "SSL\|certificate"; then + msg_warn "Mirror ${m} failed (SSL/certificate error)" + return 1 + elif echo "$out" | grep -q "^E:"; then + msg_warn "Mirror ${m} failed (apt-get update error)" + return 1 + else + msg_ok "CDN set to ${m}: tests passed" + return 0 + fi + } + + _scan_reachable() { + local result="" + for m in $1; do + if timeout 2 bash -c "echo >/dev/tcp/$m/80" 2>/dev/null; then + result="$result $m" + fi + done + echo "$result" | xargs + } + + local apt_ok=false + + # Phase 1: Scan global mirrors first (independent of local CDN issues) + local others_ok + others_ok=$(_scan_reachable "$others") + local others_pick + others_pick=$(printf '%s\n' $others_ok | shuf | head -3 | xargs) + + for mirror in $others_pick; do + msg_custom "${INFO}" "${YW}" "Attempting mirror: ${mirror}" + if _try_apt_mirror "$mirror"; then + apt_ok=true + break + fi + done + + # Phase 2: Try primary mirror + if [[ "$apt_ok" != true ]]; then + local primary + if [[ "$distro" == "ubuntu" ]]; then + primary="archive.ubuntu.com" + else + primary="ftp.debian.org" + fi + if timeout 2 bash -c "echo >/dev/tcp/$primary/80" 2>/dev/null; then + msg_custom "${INFO}" "${YW}" "Attempting mirror: ${primary}" + if _try_apt_mirror "$primary"; then + apt_ok=true + fi + fi + fi + + # Phase 3: Fall back to regional mirrors + if [[ "$apt_ok" != true ]]; then + local regional_ok + regional_ok=$(_scan_reachable "$regional") + local regional_pick + regional_pick=$(printf '%s\n' $regional_ok | shuf | head -3 | xargs) + + for mirror in $regional_pick; do + msg_custom "${INFO}" "${YW}" "Attempting mirror: ${mirror}" + if _try_apt_mirror "$mirror"; then + apt_ok=true + break + fi + done + fi + + # Phase 4: All auto mirrors failed, prompt user + if [[ "$apt_ok" != true ]]; then + msg_warn "Multiple mirrors failed (possible CDN synchronization issue)." + if [[ "$distro" == "ubuntu" ]]; then + msg_warn "Find Ubuntu mirrors at: https://launchpad.net/ubuntu/+archivemirrors" + else + msg_warn "Find Debian mirrors at: https://www.debian.org/mirror/list" + fi + local custom_mirror + while true; do + read -rp " Enter a mirror hostname (or 'skip' to abort): " custom_mirror /etc/apt/apt.conf.d/00aptproxy + local _proxy_raw="${CACHER_IP}" + local _proxy_host _proxy_port _proxy_url + # Parse host and port from URL or plain IP/hostname + _proxy_host=$(echo "$_proxy_raw" | sed -e 's|https\?://||' -e 's|/.*||' | cut -d: -f1) + _proxy_port=$(echo "$_proxy_raw" | sed -e 's|https\?://||' -e 's|/.*||' | cut -s -d: -f2) + if [[ "$_proxy_raw" =~ ^https?:// ]]; then + # Full URL provided — use as-is for proxy output, extract port for nc check + _proxy_url="$_proxy_raw" + _proxy_port="${_proxy_port:-80}" + else + # Legacy: plain IP or hostname — default to http + port 3142 + _proxy_port="${_proxy_port:-3142}" + _proxy_url="http://${_proxy_raw}:${_proxy_port}" + fi cat </usr/local/bin/apt-proxy-detect.sh #!/bin/bash -if nc -w1 -z "${CACHER_IP}" 3142; then - echo -n "http://${CACHER_IP}:3142" +if nc -w1 -z "${_proxy_host}" ${_proxy_port}; then + echo -n "${_proxy_url}" else echo -n "DIRECT" fi @@ -309,14 +490,14 @@ customize() { if [[ "$PASSWORD" == "" ]]; then msg_info "Customizing Container" GETTY_OVERRIDE="/etc/systemd/system/container-getty@1.service.d/override.conf" - mkdir -p $(dirname $GETTY_OVERRIDE) - cat <$GETTY_OVERRIDE + mkdir -p "$(dirname "$GETTY_OVERRIDE")" + cat <"$GETTY_OVERRIDE" [Service] ExecStart= ExecStart=-/sbin/agetty --autologin root --noclear --keep-baud tty%I 115200,38400,9600 \$TERM EOF systemctl daemon-reload - systemctl restart $(basename $(dirname $GETTY_OVERRIDE) | sed 's/\.d//') + systemctl restart "$(basename "$(dirname "$GETTY_OVERRIDE")" | sed 's/\.d//')" msg_ok "Customized Container" fi echo "bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)\"" >/usr/bin/update diff --git a/misc/tools.func b/misc/tools.func index c59fd15ab..887db4671 100644 --- a/misc/tools.func +++ b/misc/tools.func @@ -84,7 +84,7 @@ curl_with_retry() { # DNS pre-check - fail fast if host is unresolvable if ! getent hosts "$host" &>/dev/null; then debug_log "DNS resolution failed for $host" - return 1 + return 6 fi while [[ $attempt -le $retries ]]; do @@ -105,11 +105,13 @@ curl_with_retry() { fi fi - debug_log "curl attempt $attempt failed, waiting ${backoff}s before retry..." + debug_log "curl attempt $attempt failed (timeout=${timeout}s), waiting ${backoff}s before retry..." sleep "$backoff" # Exponential backoff: 1, 2, 4, 8... capped at 30s backoff=$((backoff * 2)) ((backoff > 30)) && backoff=30 + # Double --max-time on each retry so slow connections can finish + timeout=$((timeout * 2)) ((attempt++)) done @@ -118,7 +120,7 @@ curl_with_retry() { return 0 else debug_log "curl FAILED after $retries attempts: $url" - return 1 + return 7 fi } @@ -172,14 +174,16 @@ curl_api_with_retry() { return 0 fi - debug_log "curl API attempt $attempt failed (HTTP $http_code), waiting ${attempt}s..." + debug_log "curl API attempt $attempt failed (HTTP $http_code, timeout=${timeout}s), waiting ${attempt}s..." sleep "$attempt" + # Double --max-time on each retry so slow connections can finish + timeout=$((timeout * 2)) ((attempt++)) done debug_log "curl API FAILED after $retries attempts: $url" echo "$http_code" - return 1 + return 7 } # ------------------------------------------------------------------------------ @@ -238,7 +242,7 @@ download_gpg_key() { # Process based on mode if [[ "$mode" == "dearmor" ]]; then - if gpg --dearmor --yes -o "$output" <"$temp_key" 2>/dev/null; then + if gpg --dearmor --yes -o "$output" <"$temp_key" 2>/dev/null && [[ -s "$output" ]]; then rm -f "$temp_key" debug_log "GPG key installed (dearmored): $output" return 0 @@ -258,7 +262,7 @@ download_gpg_key() { rm -f "$temp_key" debug_log "GPG key download FAILED after $retries attempts: $url" - return 1 + return 7 } # ------------------------------------------------------------------------------ @@ -400,7 +404,7 @@ prepare_repository_setup() { cleanup_tool_keyrings "${repo_names[@]}" # Ensure APT is in working state - ensure_apt_working || return 1 + ensure_apt_working || return 100 return 0 } @@ -473,7 +477,7 @@ install_packages_with_retry() { done msg_error "Failed to install packages after $((max_retries + 1)) attempts: ${packages[*]}" - return 1 + return 100 } # ------------------------------------------------------------------------------ @@ -504,7 +508,7 @@ upgrade_packages_with_retry() { done msg_error "Failed to upgrade packages after $((max_retries + 1)) attempts: ${packages[*]}" - return 1 + return 100 } # ------------------------------------------------------------------------------ @@ -520,12 +524,12 @@ is_tool_installed() { case "$tool_name" in mariadb) if command -v mariadb >/dev/null 2>&1; then - installed_version=$(mariadb --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) + installed_version=$(mariadb --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1 || true) fi ;; mysql) if command -v mysql >/dev/null 2>&1; then - installed_version=$(mysql --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) + installed_version=$(mysql --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1 || true) fi ;; mongodb | mongod) @@ -535,7 +539,7 @@ is_tool_installed() { ;; node | nodejs) if command -v node >/dev/null 2>&1; then - installed_version=$(node -v 2>/dev/null | grep -oP '^v\K[0-9]+') + installed_version=$(node -v 2>/dev/null | grep -oP '^v\K[0-9]+' || true) fi ;; php) @@ -702,7 +706,7 @@ manage_tool_repository() { mariadb) if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then msg_error "MariaDB repository requires repo_url and gpg_key_url" - return 1 + return 65 fi # Clean old repos first @@ -726,7 +730,7 @@ manage_tool_repository() { mongodb) if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then msg_error "MongoDB repository requires repo_url and gpg_key_url" - return 1 + return 65 fi # Clean old repos first @@ -735,7 +739,7 @@ manage_tool_repository() { # Import GPG key with retry logic if ! download_gpg_key "$gpg_key_url" "/etc/apt/keyrings/mongodb-server-${version}.gpg" "dearmor"; then msg_error "Failed to download MongoDB GPG key" - return 1 + return 7 fi chmod 644 "/etc/apt/keyrings/mongodb-server-${version}.gpg" @@ -805,7 +809,7 @@ EOF nodejs) if [[ -z "$repo_url" || -z "$gpg_key_url" ]]; then msg_error "Node.js repository requires repo_url and gpg_key_url" - return 1 + return 65 fi cleanup_old_repo_files "nodesource" @@ -817,7 +821,7 @@ EOF # Download GPG key from NodeSource with retry logic if ! download_gpg_key "$gpg_key_url" "/etc/apt/keyrings/nodesource.gpg" "dearmor"; then msg_error "Failed to import NodeSource GPG key" - return 1 + return 7 fi cat </etc/apt/sources.list.d/nodesource.sources @@ -834,7 +838,7 @@ EOF php) if [[ -z "$gpg_key_url" ]]; then msg_error "PHP repository requires gpg_key_url" - return 1 + return 65 fi cleanup_old_repo_files "php" @@ -842,13 +846,13 @@ EOF # Download and install keyring with retry logic if ! curl_with_retry "$gpg_key_url" "/tmp/debsuryorg-archive-keyring.deb"; then msg_error "Failed to download PHP keyring" - return 1 + return 7 fi # Don't use /dev/null redirection for dpkg as it may use background processes dpkg -i /tmp/debsuryorg-archive-keyring.deb >>"$(get_active_logfile)" 2>&1 || { msg_error "Failed to install PHP keyring" rm -f /tmp/debsuryorg-archive-keyring.deb - return 1 + return 100 } rm -f /tmp/debsuryorg-archive-keyring.deb @@ -869,7 +873,7 @@ EOF postgresql) if [[ -z "$gpg_key_url" ]]; then msg_error "PostgreSQL repository requires gpg_key_url" - return 1 + return 65 fi cleanup_old_repo_files "postgresql" @@ -877,7 +881,7 @@ EOF # Import PostgreSQL key with retry logic if ! download_gpg_key "$gpg_key_url" "/etc/apt/keyrings/postgresql.gpg" "dearmor"; then msg_error "Failed to import PostgreSQL GPG key" - return 1 + return 7 fi # Setup repository @@ -896,7 +900,7 @@ EOF *) msg_error "Unknown tool repository: $tool_name" - return 1 + return 65 ;; esac @@ -927,7 +931,7 @@ upgrade_package() { $STD apt install --only-upgrade -y "$package" || { msg_warn "Failed to upgrade $package" - return 1 + return 100 } } @@ -1037,7 +1041,7 @@ ensure_dependencies() { cleanup_orphaned_sources 2>/dev/null || true if ! $STD apt update; then - ensure_apt_working || return 1 + ensure_apt_working || return 100 fi echo "$current_time" >"$apt_cache_file" fi @@ -1052,7 +1056,7 @@ ensure_dependencies() { done if [[ ${#failed[@]} -gt 0 ]]; then msg_error "Failed to install dependencies: ${failed[*]}" - return 1 + return 100 fi } fi @@ -1113,15 +1117,90 @@ is_package_installed() { fi } +# ------------------------------------------------------------------------------ +# validate_github_token() +# Checks a GitHub token via the /user endpoint. +# Prints a status message and returns: +# 0 - token is valid +# 1 - token is invalid / expired (HTTP 401) +# 2 - token has no public repo scope (HTTP 200 but missing scope) +# 3 - network/API error +# Also reports expiry date if the token carries an x-oauth-expiry header. +# ------------------------------------------------------------------------------ +validate_github_token() { + local token="${1:-${GITHUB_TOKEN:-}}" + [[ -z "$token" ]] && return 3 + + local response headers http_code expiry_date scopes + headers=$(mktemp) + response=$(curl -sSL -w "%{http_code}" \ + -D "$headers" \ + -o /dev/null \ + -H "Authorization: Bearer $token" \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "https://api.github.com/user" 2>/dev/null) || { + rm -f "$headers" + return 3 + } + http_code="$response" + + # Read expiry header (fine-grained PATs carry this) + expiry_date=$(grep -i '^github-authentication-token-expiration:' "$headers" | + sed 's/.*: *//' | tr -d '\r\n' || true) + # Read token scopes (classic PATs) + scopes=$(grep -i '^x-oauth-scopes:' "$headers" | + sed 's/.*: *//' | tr -d '\r\n' || true) + rm -f "$headers" + + case "$http_code" in + 200) + if [[ -n "$expiry_date" ]]; then + msg_ok "GitHub token is valid (expires: $expiry_date)." + else + msg_ok "GitHub token is valid (no expiry / fine-grained PAT)." + fi + # Warn if classic PAT has no public_repo scope + if [[ -n "$scopes" && "$scopes" != *"public_repo"* && "$scopes" != *"repo"* ]]; then + msg_warn "Token has no 'public_repo' scope - private repos and some release APIs may fail." + return 2 + fi + return 0 + ;; + 401) + msg_error "GitHub token is invalid or expired (HTTP 401)." + return 1 + ;; + *) + msg_warn "GitHub token validation returned HTTP $http_code - treating as valid." + return 0 + ;; + esac +} + # ------------------------------------------------------------------------------ # Prompt user to enter a GitHub Personal Access Token (PAT) interactively # Returns 0 if a valid token was provided, 1 otherwise # ------------------------------------------------------------------------------ prompt_for_github_token() { if [[ ! -t 0 ]]; then + # Non-interactive: pick up var_github_token if set (from default.vars / app.vars / env) + if [[ -z "${GITHUB_TOKEN:-}" && -n "${var_github_token:-}" ]]; then + export GITHUB_TOKEN="${var_github_token}" + msg_ok "GitHub token loaded from var_github_token." + return 0 + fi return 1 fi + # Prefer var_github_token when already set and no interactive override needed + if [[ -z "${GITHUB_TOKEN:-}" && -n "${var_github_token:-}" ]]; then + export GITHUB_TOKEN="${var_github_token}" + msg_ok "GitHub token loaded from var_github_token." + validate_github_token || true + return 0 + fi + local reply read -rp "${TAB}Would you like to enter a GitHub Personal Access Token (PAT)? [y/N]: " reply reply="${reply:-n}" @@ -1143,10 +1222,16 @@ prompt_for_github_token() { msg_warn "Token must not contain spaces. Please try again." continue fi - break + # Validate before accepting + export GITHUB_TOKEN="$token" + if validate_github_token "$token"; then + break + else + msg_warn "Please enter a valid token, or press Ctrl+C to abort." + unset GITHUB_TOKEN + fi done - export GITHUB_TOKEN="$token" msg_ok "GitHub token has been set." return 0 } @@ -1187,7 +1272,7 @@ github_api_call() { header_args=(-H "Authorization: Bearer $GITHUB_TOKEN") continue fi - return 1 + return 22 ;; 403) # Rate limit - check if we can retry @@ -1207,11 +1292,11 @@ github_api_call() { fi msg_error "To increase the limit, export a GitHub token before running the script:" msg_error " export GITHUB_TOKEN=\"ghp_your_token_here\"" - return 1 + return 22 ;; 404) msg_error "GitHub repository or release not found (HTTP 404): $url" - return 1 + return 22 ;; 000 | "") if [[ $attempt -lt $max_retries ]]; then @@ -1221,7 +1306,7 @@ github_api_call() { fi msg_error "GitHub API connection failed (no response)." msg_error "Check your network/DNS: curl -sSL https://api.github.com/rate_limit" - return 1 + return 22 ;; *) if [[ $attempt -lt $max_retries ]]; then @@ -1230,14 +1315,14 @@ github_api_call() { continue fi msg_error "GitHub API call failed (HTTP $http_code)." - return 1 + return 22 ;; esac ((attempt++)) done msg_error "GitHub API call failed after ${max_retries} attempts: ${url}" - return 1 + return 22 } # ------------------------------------------------------------------------------ @@ -1261,7 +1346,7 @@ codeberg_api_call() { ;; 401) msg_error "Codeberg API authentication failed (HTTP 401)." - return 1 + return 22 ;; 403) # Rate limit - retry @@ -1272,11 +1357,11 @@ codeberg_api_call() { continue fi msg_error "Codeberg API rate limit exceeded (HTTP 403)." - return 1 + return 22 ;; 404) msg_error "Codeberg repository or release not found (HTTP 404): $url" - return 1 + return 22 ;; 000 | "") if [[ $attempt -lt $max_retries ]]; then @@ -1285,7 +1370,7 @@ codeberg_api_call() { fi msg_error "Codeberg API connection failed (no response)." msg_error "Check your network/DNS: curl -sSL https://codeberg.org" - return 1 + return 22 ;; *) if [[ $attempt -lt $max_retries ]]; then @@ -1293,13 +1378,13 @@ codeberg_api_call() { continue fi msg_error "Codeberg API call failed (HTTP $http_code)." - return 1 + return 22 ;; esac done msg_error "Codeberg API call failed after ${max_retries} attempts: ${url}" - return 1 + return 22 } should_upgrade() { @@ -1383,7 +1468,7 @@ download_file() { done msg_error "Failed to download: $url" - return 1 + return 250 } # ------------------------------------------------------------------------------ @@ -1645,7 +1730,7 @@ wait_for_apt() { while is_apt_locked; do if [[ $waited -ge $max_wait ]]; then msg_error "Timeout waiting for apt to be available" - return 1 + return 100 fi sleep 5 @@ -1771,7 +1856,7 @@ ensure_apt_working() { # Final attempt if ! $STD apt update; then msg_error "Cannot update package lists - APT is critically broken" - return 1 + return 100 fi fi fi @@ -1795,7 +1880,7 @@ setup_deb822_repo() { # Validate required parameters if [[ -z "$name" || -z "$gpg_url" || -z "$repo_url" || -z "$suite" ]]; then msg_error "setup_deb822_repo: missing required parameters (name=$name repo=$repo_url suite=$suite)" - return 1 + return 65 fi # Cleanup @@ -1804,16 +1889,16 @@ setup_deb822_repo() { mkdir -p /etc/apt/keyrings || { msg_error "Failed to create /etc/apt/keyrings" - return 1 + return 252 } # Import GPG key (auto-detect binary vs ASCII-armored format) local tmp_gpg - tmp_gpg=$(mktemp) || return 1 + tmp_gpg=$(mktemp) || return 252 curl -fsSL "$gpg_url" -o "$tmp_gpg" || { msg_error "Failed to download GPG key for ${name}" rm -f "$tmp_gpg" - return 1 + return 7 } if grep -q "BEGIN PGP" "$tmp_gpg" 2>/dev/null; then @@ -1821,14 +1906,14 @@ setup_deb822_repo() { gpg --dearmor --yes -o "/etc/apt/keyrings/${name}.gpg" <"$tmp_gpg" || { msg_error "Failed to install GPG key for ${name}" rm -f "$tmp_gpg" - return 1 + return 251 } else # Already binary — copy directly cp -f "$tmp_gpg" "/etc/apt/keyrings/${name}.gpg" || { msg_error "Failed to install GPG key for ${name}" rm -f "$tmp_gpg" - return 1 + return 252 } fi rm -f "$tmp_gpg" @@ -1839,8 +1924,8 @@ setup_deb822_repo() { echo "Types: deb" echo "URIs: $repo_url" echo "Suites: $suite" - # Flat repositories (suite="./" or absolute path) must not have Components - if [[ "$suite" != "./" && -n "$component" ]]; then + # Flat repositories (suite ending with "/" or "./") must not have Components + if [[ "$suite" != *"/" && -n "$component" ]]; then echo "Components: $component" fi [[ -n "$architectures" ]] && echo "Architectures: $architectures" @@ -1911,7 +1996,7 @@ safe_service_restart() { msg_error "Failed to start $service after $max_retries retries" systemctl status "$service" --no-pager -l 2>/dev/null | head -20 || true - return 1 + return 150 } # ------------------------------------------------------------------------------ @@ -1922,13 +2007,13 @@ enable_and_start_service() { if ! systemctl enable "$service" &>/dev/null; then msg_error "Failed to enable service: $service" - return 1 + return 150 fi if ! systemctl start "$service" &>/dev/null; then msg_error "Failed to start $service" systemctl status "$service" --no-pager - return 1 + return 150 fi return 0 @@ -1965,7 +2050,7 @@ extract_version_from_json() { if [[ -z "$version" ]]; then msg_warn "JSON field '${field}' is empty in API response" - return 1 + return 250 fi if [[ "$strip_v" == "true" ]]; then @@ -1975,6 +2060,65 @@ extract_version_from_json() { fi } +# ------------------------------------------------------------------------------ +# Get latest GitHub tag (for repos that only publish tags, not releases). +# +# Usage: +# get_latest_gh_tag "owner/repo" [prefix] +# +# Arguments: +# $1 - GitHub repo (owner/repo) +# $2 - Optional prefix filter (e.g., "v" to only match tags starting with "v") +# +# Returns: +# Latest tag name (stdout), or returns 1 on failure +# ------------------------------------------------------------------------------ +get_latest_gh_tag() { + local repo="$1" + local prefix="${2:-}" + local temp_file + temp_file=$(mktemp) + + local tag="" + + if [[ -n "$prefix" ]]; then + # Use git/matching-refs API for server-side prefix filtering. This avoids + # paging through unrelated tags (e.g. mongodb/mongo-tools where 100.x tags + # only appear after page 4 of /tags). Returns ALL tags matching the prefix + # in a single call, sorted lexicographically ascending; we pick the + # highest version using `sort -V`. + if ! github_api_call "https://api.github.com/repos/${repo}/git/matching-refs/tags/${prefix}" "$temp_file"; then + rm -f "$temp_file" + return 22 + fi + + local count + count=$(jq 'length' "$temp_file" 2>/dev/null || echo 0) + if [[ "$count" -gt 0 ]]; then + tag=$(jq -r '.[].ref' "$temp_file" | + sed 's|^refs/tags/||' | + sort -V | + tail -n1) + fi + else + # No prefix: just take the first (newest) tag from /tags + if ! github_api_call "https://api.github.com/repos/${repo}/tags?per_page=1" "$temp_file"; then + rm -f "$temp_file" + return 22 + fi + tag=$(jq -r '.[0].name // empty' "$temp_file") + fi + + rm -f "$temp_file" + + if [[ -z "$tag" ]]; then + msg_error "No tags found for ${repo}" + return 250 + fi + + echo "$tag" +} + # ------------------------------------------------------------------------------ # Get latest GitHub release version with fallback to tags # Usage: get_latest_github_release "owner/repo" [strip_v] [include_prerelease] @@ -1987,7 +2131,7 @@ get_latest_github_release() { if ! github_api_call "https://api.github.com/repos/${repo}/releases/latest" "$temp_file"; then msg_warn "GitHub API call failed for ${repo}" rm -f "$temp_file" - return 1 + return 22 fi local version @@ -1996,7 +2140,7 @@ get_latest_github_release() { if [[ -z "$version" ]]; then msg_error "Could not determine latest version for ${repo}" - return 1 + return 250 fi echo "$version" @@ -2014,7 +2158,7 @@ get_latest_codeberg_release() { if ! codeberg_api_call "https://codeberg.org/api/v1/repos/${repo}/releases" "$temp_file"; then msg_warn "Codeberg API call failed for ${repo}" rm -f "$temp_file" - return 1 + return 22 fi local version @@ -2029,7 +2173,7 @@ get_latest_codeberg_release() { if [[ -z "$version" ]]; then msg_error "Could not determine latest version for ${repo}" - return 1 + return 250 fi echo "$version" @@ -2069,107 +2213,135 @@ verify_gpg_fingerprint() { fi msg_error "GPG fingerprint mismatch! Expected: $expected_fingerprint, Got: $actual_fingerprint" - return 1 + return 65 } # ------------------------------------------------------------------------------ -# Get latest GitHub tag for a repository. +# Fetches and deploys a GitHub tag-based source tarball. # # Description: -# - Queries the GitHub API for tags (not releases) -# - Useful for repos that only create tags, not full releases -# - Supports optional prefix filter and version-only extraction -# - Returns the latest tag name (printed to stdout) +# - Downloads the source tarball for a given tag from GitHub +# - Extracts to the target directory +# - Writes the version to ~/. # # Usage: -# MONGO_VERSION=$(get_latest_gh_tag "mongodb/mongo-tools") -# LATEST=$(get_latest_gh_tag "owner/repo" "v") # only tags starting with "v" -# LATEST=$(get_latest_gh_tag "owner/repo" "" "true") # strip leading "v" +# fetch_and_deploy_gh_tag "guacd" "apache/guacamole-server" +# fetch_and_deploy_gh_tag "guacd" "apache/guacamole-server" "latest" "/opt/guacamole-server" # # Arguments: -# $1 - GitHub repo (owner/repo) -# $2 - Tag prefix filter (optional, e.g. "v" or "100.") -# $3 - Strip prefix from result (optional, "true" to strip $2 prefix) -# -# Returns: -# 0 on success (tag printed to stdout), 1 on failure +# $1 - App name (used for version file ~/.) +# $2 - GitHub repo (owner/repo) +# $3 - Tag version (default: "latest" → auto-detect via get_latest_gh_tag) +# $4 - Target directory (default: /opt/$app) # # Notes: -# - Skips tags containing "rc", "alpha", "beta", "dev", "test" -# - Sorts by version number (sort -V) to find the latest -# - Respects GITHUB_TOKEN for rate limiting +# - Supports CLEAN_INSTALL=1 to wipe target before extracting +# - For repos that only publish tags, not GitHub Releases # ------------------------------------------------------------------------------ -get_latest_gh_tag() { - local repo="$1" - local prefix="${2:-}" - local strip_prefix="${3:-false}" +fetch_and_deploy_gh_tag() { + local app="$1" + local repo="$2" + local version="${3:-latest}" + local target="${4:-/opt/$app}" + local app_lc="" + app_lc="$(echo "${app,,}" | tr -d ' ')" + local version_file="$HOME/.${app_lc}" - local header_args=() - [[ -n "${GITHUB_TOKEN:-}" ]] && header_args=(-H "Authorization: Bearer $GITHUB_TOKEN") - - local http_code="" - http_code=$(curl -sSL --max-time 20 -w "%{http_code}" -o /tmp/gh_tags.json \ - -H 'Accept: application/vnd.github+json' \ - -H 'X-GitHub-Api-Version: 2022-11-28' \ - "${header_args[@]}" \ - "https://api.github.com/repos/${repo}/tags?per_page=100" 2>/dev/null) || true - - if [[ "$http_code" == "401" ]]; then - msg_error "GitHub API authentication failed (HTTP 401)." - if [[ -n "${GITHUB_TOKEN:-}" ]]; then - msg_error "Your GITHUB_TOKEN appears to be invalid or expired." - else - msg_error "The repository may require authentication. Try: export GITHUB_TOKEN=\"ghp_your_token\"" - fi - rm -f /tmp/gh_tags.json - return 1 + if [[ "$version" == "latest" ]]; then + version=$(get_latest_gh_tag "$repo") || { + msg_error "Failed to determine latest tag for ${repo}" + return 250 + } fi - if [[ "$http_code" == "403" ]]; then - msg_error "GitHub API rate limit exceeded (HTTP 403)." - msg_error "To increase the limit, export a GitHub token before running the script:" - msg_error " export GITHUB_TOKEN=\"ghp_your_token_here\"" - rm -f /tmp/gh_tags.json - return 1 + local current_version="" + [[ -f "$version_file" ]] && current_version=$(<"$version_file") + + if [[ "$current_version" == "$version" ]]; then + msg_ok "$app is already up-to-date ($version)" + return 0 fi - if [[ "$http_code" == "000" || -z "$http_code" ]]; then - msg_error "GitHub API connection failed (no response)." - msg_error "Check your network/DNS: curl -sSL https://api.github.com/rate_limit" - rm -f /tmp/gh_tags.json - return 1 + local tmpdir + tmpdir=$(mktemp -d) || return 1 + local tarball_url="https://github.com/${repo}/archive/refs/tags/${version}.tar.gz" + local filename="${app_lc}-${version}.tar.gz" + + msg_info "Fetching GitHub tag: ${app} (${version})" + + download_file "$tarball_url" "$tmpdir/$filename" || { + msg_error "Download failed: $tarball_url" + rm -rf "$tmpdir" + return 7 + } + + mkdir -p "$target" + if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then + rm -rf "${target:?}/"* fi - if [[ "$http_code" != "200" ]] || [[ ! -s /tmp/gh_tags.json ]]; then - msg_error "Unable to fetch tags for ${repo} (HTTP ${http_code})" - rm -f /tmp/gh_tags.json - return 1 - fi + tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || { + msg_error "Failed to extract tarball" + rm -rf "$tmpdir" + return 251 + } - local tags_json - tags_json=$("$version_file" + msg_ok "Deployed ${app} ${version} to ${target}" return 0 } +# ------------------------------------------------------------------------------ +# Checks for new GitHub tag (for repos without releases). +# +# Description: +# - Uses get_latest_gh_tag to fetch the latest tag +# - Compares it to a local cached version (~/.) +# - If newer, sets global CHECK_UPDATE_RELEASE and returns 0 +# +# Usage: +# if check_for_gh_tag "guacd" "apache/guacamole-server"; then +# fetch_and_deploy_gh_tag "guacd" "apache/guacamole-server" "/opt/guacamole-server" +# fi +# +# Notes: +# - For repos that only publish tags, not GitHub Releases +# - Same interface as check_for_gh_release +# ------------------------------------------------------------------------------ +check_for_gh_tag() { + local app="$1" + local repo="$2" + local prefix="${3:-}" + local app_lc="" + app_lc="$(echo "${app,,}" | tr -d ' ')" + local current_file="$HOME/.${app_lc}" + + msg_info "Checking for update: ${app}" + + local latest="" + latest=$(get_latest_gh_tag "$repo" "$prefix") || return 22 + + local current="" + [[ -f "$current_file" ]] && current="$(<"$current_file")" + + if [[ -z "$current" || "$current" != "$latest" ]]; then + CHECK_UPDATE_RELEASE="$latest" + msg_ok "Update available: ${app} ${current:-not installed} → ${latest}" + return 0 + fi + + msg_ok "No update available: ${app} (${latest})" + return 1 +} + # ============================================================================== # INSTALL FUNCTIONS # ============================================================================== @@ -2198,6 +2370,7 @@ check_for_gh_release() { local app="$1" local source="$2" local pinned_version_in="${3:-}" # optional + local pin_reason="${4:-}" # optional reason shown to user local app_lc="" app_lc="$(echo "${app,,}" | tr -d ' ')" local current_file="$HOME/.${app_lc}" @@ -2207,7 +2380,7 @@ check_for_gh_release() { # DNS check if ! getent hosts api.github.com >/dev/null 2>&1; then msg_error "Network error: cannot resolve api.github.com" - return 1 + return 6 fi ensure_dependencies jq @@ -2219,6 +2392,35 @@ check_for_gh_release() { # Try /latest endpoint for non-pinned versions (most efficient) local releases_json="" http_code="" + # For pinned versions, query the specific release tag directly + if [[ -n "$pinned_version_in" ]]; then + http_code=$(curl -sSL --max-time 20 -w "%{http_code}" -o /tmp/gh_check.json \ + -H 'Accept: application/vnd.github+json' \ + -H 'X-GitHub-Api-Version: 2022-11-28' \ + "${header_args[@]}" \ + "https://api.github.com/repos/${source}/releases/tags/${pinned_version_in}" 2>/dev/null) || true + + if [[ "$http_code" == "200" ]] && [[ -s /tmp/gh_check.json ]]; then + releases_json="[$(/dev/null 2>&1; then msg_error "Network error: cannot resolve codeberg.org" - return 1 + return 6 fi ensure_dependencies jq @@ -2400,13 +2607,13 @@ check_for_codeberg_release() { -H 'Accept: application/json' \ "https://codeberg.org/api/v1/repos/${source}/releases" 2>/dev/null) || { msg_error "Unable to fetch releases for ${app} (codeberg.org/api/v1/repos/${source}/releases)" - return 1 + return 22 } mapfile -t raw_tags < <(jq -r '.[] | select(.draft==false and .prerelease==false) | .tag_name' <<<"$releases_json") if ((${#raw_tags[@]} == 0)); then msg_error "No stable releases found for ${app}" - return 1 + return 250 fi local clean_tags=() @@ -2451,7 +2658,7 @@ check_for_codeberg_release() { if [[ -z "$match_raw" ]]; then msg_error "Pinned version ${pinned_version_in} not found upstream" - return 1 + return 250 fi if [[ "$current" != "$pin_clean" ]]; then @@ -2460,7 +2667,11 @@ check_for_codeberg_release() { return 0 fi - msg_ok "No update available: ${app} is already on pinned version (${current})" + if [[ -n "$pin_reason" ]]; then + msg_ok "No update available: ${app} (${current}) - update held back: ${pin_reason}" + else + msg_ok "No update available: ${app} (${current}) - update temporarily held back due to issues with newer releases" + fi return 1 fi @@ -2486,6 +2697,8 @@ check_for_codeberg_release() { # ------------------------------------------------------------------------------ create_self_signed_cert() { local APP_NAME="${1:-${APPLICATION}}" + local HOSTNAME="$(hostname -f)" + local IP="$(hostname -I | awk '{print $1}')" local APP_NAME_LC=$(echo "${APP_NAME,,}" | tr -d ' ') local CERT_DIR="/etc/ssl/${APP_NAME_LC}" local CERT_KEY="${CERT_DIR}/${APP_NAME_LC}.key" @@ -2498,17 +2711,17 @@ create_self_signed_cert() { # Use ensure_dependencies for cleaner handling ensure_dependencies openssl || { msg_error "Failed to install OpenSSL" - return 1 + return 100 } mkdir -p "$CERT_DIR" $STD openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 \ - -subj "/CN=${APP_NAME}" \ - -addext "subjectAltName=DNS:${APP_NAME}" \ + -subj "/CN=${HOSTNAME}" \ + -addext "subjectAltName=DNS:${HOSTNAME},DNS:localhost,IP:${IP},IP:127.0.0.1" \ -keyout "$CERT_KEY" \ -out "$CERT_CRT" || { msg_error "Failed to create self-signed certificate" - return 1 + return 150 } chmod 600 "$CERT_KEY" @@ -2538,12 +2751,12 @@ function download_with_progress() { if [[ -z "$content_length" ]]; then if ! curl -fL# -o "$output" "$url"; then msg_error "Download failed: $url" - return 1 + return 7 fi else if ! curl -fsSL "$url" | pv -s "$content_length" >"$output"; then msg_error "Download failed: $url" - return 1 + return 7 fi fi } @@ -2574,6 +2787,30 @@ function ensure_usr_local_bin_persist() { fi } +# ------------------------------------------------------------------------------ +# curl_download - Downloads a file with automatic retry and exponential backoff. +# +# Usage: curl_download +# +# Retries up to 5 times with increasing --max-time (60/120/240/480/960s). +# Returns 0 on success, 1 if all attempts fail. +# ------------------------------------------------------------------------------ +function curl_download() { + local output="$1" + local url="$2" + local timeouts=(60 120 240 480 960) + + for i in "${!timeouts[@]}"; do + if curl --connect-timeout 15 --max-time "${timeouts[$i]}" -fsSL -o "$output" "$url"; then + return 0 + fi + if ((i < ${#timeouts[@]} - 1)); then + msg_warn "Download timed out after ${timeouts[$i]}s, retrying... (attempt $((i + 2))/${#timeouts[@]})" + fi + done + return 7 +} + # ------------------------------------------------------------------------------ # Downloads and deploys latest Codeberg release (source, binary, tarball, asset). # @@ -2631,8 +2868,7 @@ function fetch_and_deploy_codeberg_release() { local app_lc=$(echo "${app,,}" | tr -d ' ') local version_file="$HOME/.${app_lc}" - local api_timeout="--connect-timeout 10 --max-time 60" - local download_timeout="--connect-timeout 15 --max-time 900" + local api_timeouts=(60 120 240) local current_version="" [[ -f "$version_file" ]] && current_version=$(<"$version_file") @@ -2643,7 +2879,7 @@ function fetch_and_deploy_codeberg_release() { if [[ "$mode" == "tag" ]]; then if [[ "$version" == "latest" ]]; then msg_error "Mode 'tag' requires explicit version (not 'latest')" - return 1 + return 65 fi local tag_name="$version" @@ -2657,11 +2893,11 @@ function fetch_and_deploy_codeberg_release() { # DNS check if ! getent hosts "codeberg.org" &>/dev/null; then msg_error "DNS resolution failed for codeberg.org – check /etc/resolv.conf or networking" - return 1 + return 6 fi local tmpdir - tmpdir=$(mktemp -d) || return 1 + tmpdir=$(mktemp -d) || return 252 msg_info "Fetching Codeberg tag: $app ($tag_name)" @@ -2672,14 +2908,14 @@ function fetch_and_deploy_codeberg_release() { # Codeberg archive URL format: https://codeberg.org/{owner}/{repo}/archive/{tag}.tar.gz local archive_url="https://codeberg.org/$repo/archive/${tag_name}.tar.gz" - if curl $download_timeout -fsSL -o "$tmpdir/$filename" "$archive_url"; then + if curl_download "$tmpdir/$filename" "$archive_url"; then download_success=true fi if [[ "$download_success" != "true" ]]; then msg_error "Download failed for $app ($tag_name)" rm -rf "$tmpdir" - return 1 + return 250 fi mkdir -p "$target" @@ -2690,7 +2926,7 @@ function fetch_and_deploy_codeberg_release() { tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || { msg_error "Failed to extract tarball" rm -rf "$tmpdir" - return 1 + return 251 } local unpack_dir @@ -2716,26 +2952,28 @@ function fetch_and_deploy_codeberg_release() { # dns pre check if ! getent hosts "codeberg.org" &>/dev/null; then msg_error "DNS resolution failed for codeberg.org – check /etc/resolv.conf or networking" - return 1 + return 6 fi - local max_retries=3 retry_delay=2 attempt=1 success=false resp http_code + local attempt=0 success=false resp http_code - while ((attempt <= max_retries)); do - resp=$(curl $api_timeout -fsSL -w "%{http_code}" -o /tmp/codeberg_rel.json "$api_url") && success=true && break - sleep "$retry_delay" - ((attempt++)) + while ((attempt < ${#api_timeouts[@]})); do + resp=$(curl --connect-timeout 10 --max-time "${api_timeouts[$attempt]}" -fsSL -w "%{http_code}" -o /tmp/codeberg_rel.json "$api_url") && success=true && break + attempt=$((attempt + 1)) + if ((attempt < ${#api_timeouts[@]})); then + msg_warn "API request timed out after ${api_timeouts[$((attempt - 1))]}s, retrying... (attempt $((attempt + 1))/${#api_timeouts[@]})" + fi done if ! $success; then - msg_error "Failed to fetch release metadata from $api_url after $max_retries attempts" - return 1 + msg_error "Failed to fetch release metadata from $api_url after ${#api_timeouts[@]} attempts" + return 22 fi http_code="${resp:(-3)}" [[ "$http_code" != "200" ]] && { msg_error "Codeberg API returned HTTP $http_code" - return 1 + return 22 } local json tag_name @@ -2755,7 +2993,7 @@ function fetch_and_deploy_codeberg_release() { fi local tmpdir - tmpdir=$(mktemp -d) || return 1 + tmpdir=$(mktemp -d) || return 252 local filename="" url="" msg_info "Fetching Codeberg release: $app ($version)" @@ -2769,14 +3007,14 @@ function fetch_and_deploy_codeberg_release() { # Codeberg archive URL format local archive_url="https://codeberg.org/$repo/archive/${tag_name}.tar.gz" - if curl $download_timeout -fsSL -o "$tmpdir/$filename" "$archive_url"; then + if curl_download "$tmpdir/$filename" "$archive_url"; then download_success=true fi if [[ "$download_success" != "true" ]]; then msg_error "Download failed for $app ($tag_name)" rm -rf "$tmpdir" - return 1 + return 250 fi mkdir -p "$target" @@ -2787,7 +3025,7 @@ function fetch_and_deploy_codeberg_release() { tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || { msg_error "Failed to extract tarball" rm -rf "$tmpdir" - return 1 + return 251 } local unpack_dir unpack_dir=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d | head -n1) @@ -2842,14 +3080,14 @@ function fetch_and_deploy_codeberg_release() { if [[ -z "$url_match" ]]; then msg_error "No suitable .deb asset found for $app" rm -rf "$tmpdir" - return 1 + return 252 fi filename="${url_match##*/}" - curl $download_timeout -fsSL -o "$tmpdir/$filename" "$url_match" || { + curl_download "$tmpdir/$filename" "$url_match" || { msg_error "Download failed: $url_match" rm -rf "$tmpdir" - return 1 + return 250 } chmod 644 "$tmpdir/$filename" @@ -2857,7 +3095,7 @@ function fetch_and_deploy_codeberg_release() { $STD dpkg -i "$tmpdir/$filename" || { msg_error "Both apt and dpkg installation failed" rm -rf "$tmpdir" - return 1 + return 100 } } @@ -2868,7 +3106,7 @@ function fetch_and_deploy_codeberg_release() { [[ -z "$pattern" ]] && { msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)" rm -rf "$tmpdir" - return 1 + return 65 } local asset_url="" @@ -2885,14 +3123,14 @@ function fetch_and_deploy_codeberg_release() { [[ -z "$asset_url" ]] && { msg_error "No asset matching '$pattern' found" rm -rf "$tmpdir" - return 1 + return 252 } filename="${asset_url##*/}" - curl $download_timeout -fsSL -o "$tmpdir/$filename" "$asset_url" || { + curl_download "$tmpdir/$filename" "$asset_url" || { msg_error "Download failed: $asset_url" rm -rf "$tmpdir" - return 1 + return 250 } local unpack_tmp @@ -2907,18 +3145,18 @@ function fetch_and_deploy_codeberg_release() { unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || { msg_error "Failed to extract ZIP archive" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 251 } elif [[ "$filename" == *.tar.* || "$filename" == *.tgz ]]; then tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || { msg_error "Failed to extract TAR archive" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 251 } else msg_error "Unsupported archive format: $filename" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 251 fi local top_dirs @@ -2932,12 +3170,12 @@ function fetch_and_deploy_codeberg_release() { cp -r "$inner_dir"/* "$target/" || { msg_error "Failed to copy contents from $inner_dir to $target" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 } else msg_error "Inner directory is empty: $inner_dir" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 fi shopt -u dotglob nullglob else @@ -2946,12 +3184,12 @@ function fetch_and_deploy_codeberg_release() { cp -r "$unpack_tmp"/* "$target/" || { msg_error "Failed to copy contents to $target" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 } else msg_error "Unpacked archive is empty" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 fi shopt -u dotglob nullglob fi @@ -2963,7 +3201,7 @@ function fetch_and_deploy_codeberg_release() { [[ -z "$pattern" ]] && { msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)" rm -rf "$tmpdir" - return 1 + return 65 } local asset_url="" @@ -2980,7 +3218,7 @@ function fetch_and_deploy_codeberg_release() { [[ -z "$asset_url" ]] && { msg_error "No asset matching '$pattern' found" rm -rf "$tmpdir" - return 1 + return 252 } filename="${asset_url##*/}" @@ -2990,10 +3228,10 @@ function fetch_and_deploy_codeberg_release() { local target_file="$app" [[ "$use_filename" == "true" ]] && target_file="$filename" - curl $download_timeout -fsSL -o "$target/$target_file" "$asset_url" || { + curl_download "$target/$target_file" "$asset_url" || { msg_error "Download failed: $asset_url" rm -rf "$tmpdir" - return 1 + return 250 } if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then @@ -3003,7 +3241,7 @@ function fetch_and_deploy_codeberg_release() { else msg_error "Unknown mode: $mode" rm -rf "$tmpdir" - return 1 + return 65 fi echo "$version" >"$version_file" @@ -3090,7 +3328,7 @@ _gh_scan_older_releases() { "${header[@]}" \ "https://api.github.com/repos/${repo}/releases?per_page=15" 2>/dev/null) || { msg_warn "Failed to fetch older releases for ${repo}" - return 1 + return 22 } local count @@ -3160,12 +3398,12 @@ _gh_scan_older_releases() { echo "$releases_list" | jq ".[$i]" return 0 else - return 1 + return 250 fi fi done - return 1 + return 250 } function fetch_and_deploy_gh_release() { @@ -3182,15 +3420,14 @@ function fetch_and_deploy_gh_release() { app="${repo##*/}" if [[ -z "$app" ]]; then msg_error "fetch_and_deploy_gh_release requires app name or valid repo" - return 1 + return 65 fi fi local app_lc=$(echo "${app,,}" | tr -d ' ') local version_file="$HOME/.${app_lc}" - local api_timeout="--connect-timeout 10 --max-time 60" - local download_timeout="--connect-timeout 15 --max-time 900" + local api_timeouts=(60 120 240) local current_version="" [[ -f "$version_file" ]] && current_version=$(<"$version_file") @@ -3207,13 +3444,13 @@ function fetch_and_deploy_gh_release() { gh_host=$(awk -F/ '{print $3}' <<<"$api_url") if ! getent hosts "$gh_host" &>/dev/null; then msg_error "DNS resolution failed for $gh_host – check /etc/resolv.conf or networking" - return 1 + return 6 fi - local max_retries=3 retry_delay=2 attempt=1 success=false http_code + local max_retries=${#api_timeouts[@]} retry_delay=2 attempt=1 success=false http_code while ((attempt <= max_retries)); do - http_code=$(curl $api_timeout -sSL -w "%{http_code}" -o /tmp/gh_rel.json "${header[@]}" "$api_url" 2>/dev/null) || true + http_code=$(curl --connect-timeout 10 --max-time "${api_timeouts[$((attempt - 1))]:-240}" -sSL -w "%{http_code}" -o /tmp/gh_rel.json "${header[@]}" "$api_url" 2>/dev/null) || true if [[ "$http_code" == "200" ]]; then success=true break @@ -3239,7 +3476,8 @@ function fetch_and_deploy_gh_release() { if prompt_for_github_token; then header=(-H "Authorization: token $GITHUB_TOKEN") retry_delay=2 - attempt=0 + attempt=1 + continue fi fi else @@ -3255,7 +3493,7 @@ function fetch_and_deploy_gh_release() { elif [[ "$http_code" != "401" ]]; then msg_error "Failed to fetch release metadata (HTTP $http_code)" fi - return 1 + return 22 fi local json tag_name @@ -3287,10 +3525,10 @@ function fetch_and_deploy_gh_release() { local direct_tarball_url="https://github.com/$repo/archive/refs/tags/$tag_name.tar.gz" filename="${app_lc}-${version_safe}.tar.gz" - curl $download_timeout -fsSL -o "$tmpdir/$filename" "$direct_tarball_url" || { + curl_download "$tmpdir/$filename" "$direct_tarball_url" || { msg_error "Download failed: $direct_tarball_url" rm -rf "$tmpdir" - return 1 + return 250 } mkdir -p "$target" @@ -3301,7 +3539,7 @@ function fetch_and_deploy_gh_release() { tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || { msg_error "Failed to extract tarball" rm -rf "$tmpdir" - return 1 + return 251 } local unpack_dir unpack_dir=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d | head -n1) @@ -3392,14 +3630,14 @@ function fetch_and_deploy_gh_release() { if [[ -z "$url_match" ]]; then msg_error "No suitable .deb asset found for $app" rm -rf "$tmpdir" - return 1 + return 252 fi filename="${url_match##*/}" - curl $download_timeout -fsSL -o "$tmpdir/$filename" "$url_match" || { + curl_download "$tmpdir/$filename" "$url_match" || { msg_error "Download failed: $url_match" rm -rf "$tmpdir" - return 1 + return 250 } chmod 644 "$tmpdir/$filename" @@ -3412,7 +3650,7 @@ function fetch_and_deploy_gh_release() { SYSTEMD_OFFLINE=1 $STD dpkg -i "$tmpdir/$filename" || { msg_error "Both apt and dpkg installation failed" rm -rf "$tmpdir" - return 1 + return 100 } } @@ -3423,7 +3661,7 @@ function fetch_and_deploy_gh_release() { [[ -z "$pattern" ]] && { msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)" rm -rf "$tmpdir" - return 1 + return 65 } local asset_url="" @@ -3459,14 +3697,14 @@ function fetch_and_deploy_gh_release() { [[ -z "$asset_url" ]] && { msg_error "No asset matching '$pattern' found" rm -rf "$tmpdir" - return 1 + return 252 } filename="${asset_url##*/}" - curl $download_timeout -fsSL -o "$tmpdir/$filename" "$asset_url" || { + curl_download "$tmpdir/$filename" "$asset_url" || { msg_error "Download failed: $asset_url" rm -rf "$tmpdir" - return 1 + return 250 } local unpack_tmp @@ -3481,18 +3719,18 @@ function fetch_and_deploy_gh_release() { unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || { msg_error "Failed to extract ZIP archive" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 251 } elif [[ "$filename" == *.tar.* || "$filename" == *.tgz || "$filename" == *.txz ]]; then tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || { msg_error "Failed to extract TAR archive" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 251 } else msg_error "Unsupported archive format: $filename" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 65 fi local top_dirs @@ -3507,12 +3745,12 @@ function fetch_and_deploy_gh_release() { cp -r "$inner_dir"/* "$target/" || { msg_error "Failed to copy contents from $inner_dir to $target" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 } else msg_error "Inner directory is empty: $inner_dir" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 fi shopt -u dotglob nullglob else @@ -3522,12 +3760,12 @@ function fetch_and_deploy_gh_release() { cp -r "$unpack_tmp"/* "$target/" || { msg_error "Failed to copy contents to $target" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 } else msg_error "Unpacked archive is empty" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 fi shopt -u dotglob nullglob fi @@ -3539,7 +3777,7 @@ function fetch_and_deploy_gh_release() { [[ -z "$pattern" ]] && { msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)" rm -rf "$tmpdir" - return 1 + return 65 } local asset_url="" @@ -3574,7 +3812,7 @@ function fetch_and_deploy_gh_release() { [[ -z "$asset_url" ]] && { msg_error "No asset matching '$pattern' found" rm -rf "$tmpdir" - return 1 + return 252 } filename="${asset_url##*/}" @@ -3584,10 +3822,10 @@ function fetch_and_deploy_gh_release() { local target_file="$app" [[ "$use_filename" == "true" ]] && target_file="$filename" - curl $download_timeout -fsSL -o "$target/$target_file" "$asset_url" || { + curl_download "$target/$target_file" "$asset_url" || { msg_error "Download failed: $asset_url" rm -rf "$tmpdir" - return 1 + return 250 } if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then @@ -3597,7 +3835,7 @@ function fetch_and_deploy_gh_release() { else msg_error "Unknown mode: $mode" rm -rf "$tmpdir" - return 1 + return 65 fi echo "$version" >"$version_file" @@ -3619,7 +3857,7 @@ function setup_adminer() { mkdir -p /var/www/localhost/htdocs/adminer if ! curl_with_retry "https://github.com/vrana/adminer/releases/latest/download/adminer.php" "/var/www/localhost/htdocs/adminer/index.php"; then msg_error "Failed to download Adminer" - return 1 + return 250 fi cache_installed_version "adminer" "latest-alpine" msg_ok "Setup Adminer (Alpine)" @@ -3628,11 +3866,11 @@ function setup_adminer() { ensure_dependencies adminer $STD a2enconf adminer || { msg_error "Failed to enable Adminer Apache config" - return 1 + return 150 } $STD systemctl reload apache2 || { msg_error "Failed to reload Apache" - return 1 + return 150 } local VERSION VERSION=$(dpkg -s adminer 2>/dev/null | grep '^Version:' | awk '{print $2}' 2>/dev/null || echo 'unknown') @@ -3685,19 +3923,19 @@ function setup_composer() { if ! curl_with_retry "https://getcomposer.org/installer" "/tmp/composer-setup.php"; then msg_error "Failed to download Composer installer" - return 1 + return 250 fi $STD php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer || { msg_error "Failed to install Composer" rm -f /tmp/composer-setup.php - return 1 + return 150 } rm -f /tmp/composer-setup.php if [[ ! -x "$COMPOSER_BIN" ]]; then msg_error "Composer installation failed" - return 1 + return 127 fi chmod +x "$COMPOSER_BIN" @@ -3754,12 +3992,12 @@ function setup_ffmpeg() { if ! CURL_TIMEOUT=300 curl_with_retry "https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-${ffmpeg_arch}-static.tar.xz" "$TMP_DIR/ffmpeg.tar.xz"; then msg_error "Failed to download FFmpeg binary" rm -rf "$TMP_DIR" - return 1 + return 250 fi tar -xf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR" || { msg_error "Failed to extract FFmpeg binary" rm -rf "$TMP_DIR" - return 1 + return 251 } local EXTRACTED_DIR EXTRACTED_DIR=$(find "$TMP_DIR" -maxdepth 1 -type d -name "ffmpeg-*") @@ -3809,14 +4047,19 @@ function setup_ffmpeg() { DEPS+=( libx264-dev libx265-dev libvpx-dev libmp3lame-dev libfreetype6-dev libass-dev libopus-dev libvorbis-dev - libdav1d-dev libsvtav1-dev zlib1g-dev libnuma-dev + libdav1d-dev zlib1g-dev libnuma-dev libva-dev libdrm-dev ) + if apt-cache show libsvtav1enc-dev &>/dev/null; then + DEPS+=(libsvtav1enc-dev) + elif apt-cache show libsvtav1-dev &>/dev/null; then + DEPS+=(libsvtav1-dev) + fi ;; *) msg_error "Invalid FFMPEG_TYPE: $TYPE" rm -rf "$TMP_DIR" - return 1 + return 65 ;; esac @@ -3841,19 +4084,19 @@ function setup_ffmpeg() { if ! CURL_TIMEOUT=300 curl_with_retry "https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-${ffmpeg_arch}-static.tar.xz" "$TMP_DIR/ffmpeg.tar.xz"; then msg_error "Failed to download FFmpeg pre-built binary" rm -rf "$TMP_DIR" - return 1 + return 250 fi tar -xJf "$TMP_DIR/ffmpeg.tar.xz" -C "$TMP_DIR" || { msg_error "Failed to extract FFmpeg binary archive" rm -rf "$TMP_DIR" - return 1 + return 251 } if ! cp "$TMP_DIR/ffmpeg-"*/ffmpeg /usr/local/bin/ffmpeg 2>/dev/null; then msg_error "Failed to install FFmpeg binary" rm -rf "$TMP_DIR" - return 1 + return 150 fi cache_installed_version "ffmpeg" "static" @@ -3865,13 +4108,13 @@ function setup_ffmpeg() { tar -xzf "$TMP_DIR/ffmpeg.tar.gz" -C "$TMP_DIR" || { msg_error "Failed to extract FFmpeg source" rm -rf "$TMP_DIR" - return 1 + return 251 } cd "$TMP_DIR/FFmpeg-"* || { msg_error "Source extraction failed" rm -rf "$TMP_DIR" - return 1 + return 251 } local args=( @@ -3896,23 +4139,23 @@ function setup_ffmpeg() { if [[ ${#args[@]} -eq 0 ]]; then msg_error "FFmpeg configure args array is empty" rm -rf "$TMP_DIR" - return 1 + return 65 fi $STD ./configure "${args[@]}" || { msg_error "FFmpeg configure failed" rm -rf "$TMP_DIR" - return 1 + return 150 } $STD make -j"$(nproc)" || { msg_error "FFmpeg compilation failed" rm -rf "$TMP_DIR" - return 1 + return 150 } $STD make install || { msg_error "FFmpeg installation failed" rm -rf "$TMP_DIR" - return 1 + return 150 } echo "/usr/local/lib" >/etc/ld.so.conf.d/ffmpeg.conf $STD ldconfig @@ -3920,13 +4163,13 @@ function setup_ffmpeg() { ldconfig -p 2>/dev/null | grep libavdevice >/dev/null || { msg_error "libavdevice not registered with dynamic linker" rm -rf "$TMP_DIR" - return 1 + return 150 } if ! command -v ffmpeg &>/dev/null; then msg_error "FFmpeg installation failed" rm -rf "$TMP_DIR" - return 1 + return 150 fi local FINAL_VERSION @@ -3955,7 +4198,7 @@ function setup_go() { aarch64) ARCH="arm64" ;; *) msg_error "Unsupported architecture: $(uname -m)" - return 1 + return 236 ;; esac @@ -3966,7 +4209,7 @@ function setup_go() { go_version_tmp=$(curl_with_retry "https://go.dev/VERSION?m=text" "-" 2>/dev/null | head -n1 | sed 's/^go//') || true if [[ -z "$go_version_tmp" ]]; then msg_error "Could not determine latest Go version" - return 1 + return 250 fi GO_VERSION="$go_version_tmp" fi @@ -4001,13 +4244,13 @@ function setup_go() { if ! CURL_TIMEOUT=300 curl_with_retry "$URL" "$TMP_TAR"; then msg_error "Failed to download Go $GO_VERSION" rm -f "$TMP_TAR" - return 1 + return 250 fi $STD tar -C /usr/local -xzf "$TMP_TAR" || { msg_error "Failed to extract Go tarball" rm -f "$TMP_TAR" - return 1 + return 251 } ln -sf /usr/local/go/bin/go /usr/local/bin/go @@ -4045,7 +4288,7 @@ function setup_gs() { return 0 fi msg_error "Cannot determine Ghostscript version and no existing installation found" - return 1 + return 250 fi local LATEST_VERSION LATEST_VERSION=$(echo "$RELEASE_JSON" | jq -r '.tag_name' | sed 's/^gs//') @@ -4058,7 +4301,7 @@ function setup_gs() { if [[ "$CURRENT_VERSION" == "0" ]]; then msg_error "Ghostscript not installed and cannot determine latest version" rm -rf "$TMP_DIR" - return 1 + return 250 fi rm -rf "$TMP_DIR" return 0 @@ -4081,26 +4324,26 @@ function setup_gs() { if ! CURL_TIMEOUT=180 curl_with_retry "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/download/gs${LATEST_VERSION}/ghostscript-${LATEST_VERSION_DOTTED}.tar.gz" "$TMP_DIR/ghostscript.tar.gz"; then msg_error "Failed to download Ghostscript" rm -rf "$TMP_DIR" - return 1 + return 250 fi if ! tar -xzf "$TMP_DIR/ghostscript.tar.gz" -C "$TMP_DIR"; then msg_error "Failed to extract Ghostscript archive" rm -rf "$TMP_DIR" - return 1 + return 251 fi # Verify directory exists before cd if [[ ! -d "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" ]]; then msg_error "Ghostscript source directory not found: $TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" rm -rf "$TMP_DIR" - return 1 + return 252 fi cd "$TMP_DIR/ghostscript-${LATEST_VERSION_DOTTED}" || { msg_error "Failed to enter Ghostscript source directory" rm -rf "$TMP_DIR" - return 1 + return 252 } ensure_dependencies build-essential libpng-dev zlib1g-dev @@ -4108,17 +4351,17 @@ function setup_gs() { $STD ./configure || { msg_error "Ghostscript configure failed" rm -rf "$TMP_DIR" - return 1 + return 150 } $STD make -j"$(nproc)" || { msg_error "Ghostscript compilation failed" rm -rf "$TMP_DIR" - return 1 + return 150 } $STD make install || { msg_error "Ghostscript installation failed" rm -rf "$TMP_DIR" - return 1 + return 150 } hash -r @@ -4151,6 +4394,8 @@ function setup_gs() { # - NVIDIA requires matching host driver version # ------------------------------------------------------------------------------ function setup_hwaccel() { + local service_user="${1:-}" + # Check if user explicitly disabled GPU in advanced settings # ENABLE_GPU is exported from build.func if [[ "${ENABLE_GPU:-no}" == "no" ]]; then @@ -4402,12 +4647,29 @@ function setup_hwaccel() { # ═══════════════════════════════════════════════════════════════════════════ # Device Permissions # ═══════════════════════════════════════════════════════════════════════════ - _setup_gpu_permissions "$in_ct" + _setup_gpu_permissions "$in_ct" "$service_user" cache_installed_version "hwaccel" "1.0" msg_ok "Setup Hardware Acceleration" } +# ══════════════════════════════════════════════════════════════════════════════ +# Resolve the IGC tag that the latest compute-runtime was built against. +# Must be called AFTER a fetch_and_deploy_gh_release for intel/compute-runtime +# so that /tmp/gh_rel.json contains the compute-runtime release metadata. +# Sets the variable named by $1 (default: igc_tag) to the discovered tag. +# ══════════════════════════════════════════════════════════════════════════════ +_resolve_igc_tag() { + local -n _out_ref="${1:-igc_tag}" + _out_ref="latest" + if [[ -f /tmp/gh_rel.json ]]; then + local _body _parsed + _body=$(jq -r '.body // empty' /tmp/gh_rel.json 2>/dev/null) || return 0 + _parsed=$(grep -oP 'intel-graphics-compiler/releases/tag/\K[^\s\)]+' <<<"$_body" | head -1) + [[ -n "$_parsed" ]] && _out_ref="$_parsed" + fi +} + # ══════════════════════════════════════════════════════════════════════════════ # Intel Arc GPU Setup # ══════════════════════════════════════════════════════════════════════════════ @@ -4434,12 +4696,17 @@ _setup_intel_arc() { if [[ "$os_codename" == "trixie" || "$os_codename" == "sid" ]]; then msg_info "Fetching Intel compute-runtime from GitHub for Arc support" + # Fetch a compute-runtime package first so /tmp/gh_rel.json is populated, + # then resolve the matching IGC tag from the release notes. # libigdgmm - bundled in compute-runtime releases fetch_and_deploy_gh_release "libigdgmm12" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" || true - # Intel Graphics Compiler (note: packages have -2 suffix) - fetch_and_deploy_gh_release "intel-igc-core" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" || true - fetch_and_deploy_gh_release "intel-igc-opencl" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" || true + local igc_tag + _resolve_igc_tag igc_tag + + # Intel Graphics Compiler – pinned to the version compute-runtime expects + fetch_and_deploy_gh_release "intel-igc-core" "intel/intel-graphics-compiler" "binary" "$igc_tag" "" "intel-igc-core-2_*_amd64.deb" || true + fetch_and_deploy_gh_release "intel-igc-opencl" "intel/intel-graphics-compiler" "binary" "$igc_tag" "" "intel-igc-opencl-2_*_amd64.deb" || true # Compute Runtime (depends on IGC and gmmlib) fetch_and_deploy_gh_release "intel-opencl-icd" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" || true @@ -4489,12 +4756,17 @@ _setup_intel_modern() { if [[ "$os_codename" == "trixie" || "$os_codename" == "sid" ]]; then msg_info "Fetching Intel compute-runtime from GitHub" + # Fetch a compute-runtime package first so /tmp/gh_rel.json is populated, + # then resolve the matching IGC tag from the release notes. # libigdgmm first (bundled in compute-runtime releases) fetch_and_deploy_gh_release "libigdgmm12" "intel/compute-runtime" "binary" "latest" "" "libigdgmm12_*_amd64.deb" || true - # Intel Graphics Compiler (note: packages have -2 suffix) - fetch_and_deploy_gh_release "intel-igc-core" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-core-2_*_amd64.deb" || true - fetch_and_deploy_gh_release "intel-igc-opencl" "intel/intel-graphics-compiler" "binary" "latest" "" "intel-igc-opencl-2_*_amd64.deb" || true + local igc_tag + _resolve_igc_tag igc_tag + + # Intel Graphics Compiler – pinned to the version compute-runtime expects + fetch_and_deploy_gh_release "intel-igc-core" "intel/intel-graphics-compiler" "binary" "$igc_tag" "" "intel-igc-core-2_*_amd64.deb" || true + fetch_and_deploy_gh_release "intel-igc-opencl" "intel/intel-graphics-compiler" "binary" "$igc_tag" "" "intel-igc-opencl-2_*_amd64.deb" || true # Compute Runtime fetch_and_deploy_gh_release "intel-opencl-icd" "intel/compute-runtime" "binary" "latest" "" "intel-opencl-icd_*_amd64.deb" || true @@ -4595,9 +4867,6 @@ _setup_amd_apu() { $STD apt -y install firmware-amd-graphics 2>/dev/null || true fi - # ROCm compute stack (OpenCL + HIP) - also works for many APUs - _setup_rocm "$os_id" "$os_codename" - msg_ok "AMD APU configured" } @@ -4645,16 +4914,9 @@ _setup_rocm() { return 0 } - # AMDGPU driver repository (append to same keyring) - { - echo "" - echo "Types: deb" - echo "URIs: https://repo.radeon.com/amdgpu/latest/ubuntu" - echo "Suites: ${ROCM_REPO_CODENAME}" - echo "Components: main" - echo "Architectures: amd64" - echo "Signed-By: /etc/apt/keyrings/rocm.gpg" - } >>/etc/apt/sources.list.d/rocm.sources + # Note: The amdgpu/latest/ubuntu repo (kernel driver packages) is intentionally + # omitted — kernel drivers are managed by the Proxmox host, not the LXC container. + # Only the ROCm userspace compute stack is needed inside the container. # Pin ROCm packages to prefer radeon repo cat </etc/apt/preferences.d/rocm-pin-600 @@ -4663,7 +4925,26 @@ Pin: release o=repo.radeon.com Pin-Priority: 600 EOF - $STD apt update + # apt update with retry — repo.radeon.com CDN can be mid-sync (transient size mismatches). + # Run with ERR trap disabled so a transient failure does not abort the entire install. + local _apt_ok=0 + for _attempt in 1 2 3; do + if ( + set +e + apt-get update -qq 2>&1 + exit $? + ) 2>/dev/null; then + _apt_ok=1 + break + fi + msg_warn "apt update failed (attempt ${_attempt}/3) — AMD repo may be temporarily unavailable, retrying in 30s…" + sleep 30 + done + if [[ $_apt_ok -eq 0 ]]; then + msg_warn "apt update still failing after 3 attempts — skipping ROCm install" + return 0 + fi + # Install only runtime packages — full 'rocm' meta-package includes 15GB+ dev tools $STD apt install -y rocm-opencl-runtime rocm-hip-runtime rocm-smi-lib 2>/dev/null || { msg_warn "ROCm runtime install failed — trying minimal set" @@ -4676,8 +4957,8 @@ EOF # Environment (PATH + LD_LIBRARY_PATH) if [[ -d /opt/rocm ]]; then cat <<'ENVEOF' >/etc/profile.d/rocm.sh -export PATH="\$PATH:/opt/rocm/bin" -export LD_LIBRARY_PATH="\${LD_LIBRARY_PATH:+\$LD_LIBRARY_PATH:}/opt/rocm/lib" +export PATH="$PATH:/opt/rocm/bin" +export LD_LIBRARY_PATH="${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}/opt/rocm/lib" ENVEOF chmod +x /etc/profile.d/rocm.sh # Also make available for current session / systemd services @@ -4708,10 +4989,10 @@ _setup_nvidia_gpu() { # Format varies by driver type: # Proprietary: "NVRM version: NVIDIA UNIX x86_64 Kernel Module 550.54.14 Thu..." # Open: "NVRM version: NVIDIA UNIX Open Kernel Module for x86_64 590.48.01 Release..." - # Use regex to extract version number (###.##.## pattern) + # Use regex to extract version number (###.##.## or ###.## pattern) local nvidia_host_version="" if [[ -f /proc/driver/nvidia/version ]]; then - nvidia_host_version=$(grep -oP '\d{3,}\.\d+\.\d+' /proc/driver/nvidia/version 2>/dev/null | head -1) + nvidia_host_version=$(grep -oP '\d{3,}\.\d+(\.\d+)?' /proc/driver/nvidia/version 2>/dev/null | head -1 || true) fi if [[ -z "$nvidia_host_version" ]]; then @@ -5027,6 +5308,7 @@ EOF # ══════════════════════════════════════════════════════════════════════════════ _setup_gpu_permissions() { local in_ct="$1" + local service_user="${2:-}" # /dev/dri permissions (Intel/AMD) if [[ "$in_ct" == "0" && -d /dev/dri ]]; then @@ -5065,7 +5347,7 @@ _setup_gpu_permissions() { for nvidia_dev in /dev/nvidia*; do [[ -e "$nvidia_dev" ]] && { chgrp video "$nvidia_dev" 2>/dev/null || true - chmod 666 "$nvidia_dev" 2>/dev/null || true + chmod 660 "$nvidia_dev" 2>/dev/null || true } done if [[ -d /dev/nvidia-caps ]]; then @@ -5073,7 +5355,7 @@ _setup_gpu_permissions() { for caps_dev in /dev/nvidia-caps/*; do [[ -e "$caps_dev" ]] && { chgrp video "$caps_dev" 2>/dev/null || true - chmod 666 "$caps_dev" 2>/dev/null || true + chmod 660 "$caps_dev" 2>/dev/null || true } done fi @@ -5090,9 +5372,16 @@ _setup_gpu_permissions() { # /dev/kfd permissions (AMD ROCm) if [[ -e /dev/kfd ]]; then - chmod 666 /dev/kfd 2>/dev/null || true + chgrp render /dev/kfd 2>/dev/null || true + chmod 660 /dev/kfd 2>/dev/null || true msg_info "AMD ROCm compute device configured" fi + + # Add service user to render and video groups for GPU hardware acceleration + if [[ -n "$service_user" ]]; then + usermod -aG render "$service_user" 2>/dev/null || true + usermod -aG video "$service_user" 2>/dev/null || true + fi } # ------------------------------------------------------------------------------ @@ -5141,42 +5430,42 @@ function setup_imagemagick() { if ! CURL_TIMEOUT=180 curl_with_retry "https://imagemagick.org/archive/ImageMagick.tar.gz" "$TMP_DIR/ImageMagick.tar.gz"; then msg_error "Failed to download ImageMagick" rm -rf "$TMP_DIR" - return 1 + return 250 fi tar -xzf "$TMP_DIR/ImageMagick.tar.gz" -C "$TMP_DIR" || { msg_error "Failed to extract ImageMagick" rm -rf "$TMP_DIR" - return 1 + return 251 } cd "$TMP_DIR"/ImageMagick-* || { msg_error "Source extraction failed" rm -rf "$TMP_DIR" - return 1 + return 251 } $STD ./configure --disable-static || { msg_error "ImageMagick configure failed" rm -rf "$TMP_DIR" - return 1 + return 150 } $STD make -j"$(nproc)" || { msg_error "ImageMagick compilation failed" rm -rf "$TMP_DIR" - return 1 + return 150 } $STD make install || { msg_error "ImageMagick installation failed" rm -rf "$TMP_DIR" - return 1 + return 150 } $STD ldconfig /usr/local/lib if [[ ! -x "$BINARY_PATH" ]]; then msg_error "ImageMagick installation failed" rm -rf "$TMP_DIR" - return 1 + return 150 fi local FINAL_VERSION @@ -5213,7 +5502,7 @@ function setup_java() { # Prepare repository (cleanup + validation) prepare_repository_setup "adoptium" || { msg_error "Failed to prepare Adoptium repository" - return 1 + return 100 } # Add repo if needed @@ -5230,32 +5519,15 @@ function setup_java() { # Get currently installed version local INSTALLED_VERSION="" - if dpkg -l | grep -q "temurin-.*-jdk" 2>/dev/null; then - INSTALLED_VERSION=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | grep -oP 'temurin-\K[0-9]+' | head -n1 || echo "") - fi - - # Validate INSTALLED_VERSION is not empty if JDK package found - local JDK_COUNT=0 - JDK_COUNT=$(dpkg -l 2>/dev/null | grep -c "temurin-.*-jdk" || true) - if [[ -z "$INSTALLED_VERSION" && "${JDK_COUNT:-0}" -gt 0 ]]; then - msg_warn "Found Temurin JDK but cannot determine version - attempting reinstall" - # Try to get actual package name for purge - local OLD_PACKAGE - OLD_PACKAGE=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | head -n1 || echo "") - if [[ -n "$OLD_PACKAGE" ]]; then - msg_info "Removing existing package: $OLD_PACKAGE" - $STD apt purge -y "$OLD_PACKAGE" || true - fi - INSTALLED_VERSION="" # Reset to trigger fresh install - fi + INSTALLED_VERSION=$(dpkg-query -W -f '${Package}\n' 2>/dev/null | grep -oP '^temurin-\K[0-9]+(?=-jdk$)' | head -n1 || echo "") # Scenario 1: Already at correct version if [[ "$INSTALLED_VERSION" == "$JAVA_VERSION" ]]; then msg_info "Update Temurin JDK $JAVA_VERSION" - ensure_apt_working || return 1 + ensure_apt_working || return 100 upgrade_packages_with_retry "$DESIRED_PACKAGE" || { msg_error "Failed to update Temurin JDK" - return 1 + return 100 } cache_installed_version "temurin-jdk" "$JAVA_VERSION" msg_ok "Update Temurin JDK $JAVA_VERSION" @@ -5270,12 +5542,12 @@ function setup_java() { msg_info "Setup Temurin JDK $JAVA_VERSION" fi - ensure_apt_working || return 1 + ensure_apt_working || return 100 # Install with retry logic install_packages_with_retry "$DESIRED_PACKAGE" || { msg_error "Failed to install Temurin JDK $JAVA_VERSION" - return 1 + return 100 } cache_installed_version "temurin-jdk" "$JAVA_VERSION" @@ -5311,7 +5583,7 @@ function setup_local_ip_helper() { if ! dpkg -s networkd-dispatcher >/dev/null 2>&1; then ensure_dependencies networkd-dispatcher || { msg_error "Failed to install networkd-dispatcher" - return 1 + return 100 } fi @@ -5472,7 +5744,7 @@ EOF fi # Ensure APT is working - ensure_apt_working || return 1 + ensure_apt_working || return 100 # Check if installed version is from official repo and higher than distro version # In this case, we keep the existing installation to avoid data issues @@ -5502,7 +5774,7 @@ EOF # Install or upgrade MariaDB from distribution packages if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then msg_error "Failed to install MariaDB packages from distribution" - return 1 + return 100 fi # Get installed version for caching @@ -5539,7 +5811,7 @@ EOF msg_info "Update MariaDB $MARIADB_VERSION" # Ensure APT is working - ensure_apt_working || return 1 + ensure_apt_working || return 100 # Check if repository needs to be refreshed if [[ -f /etc/apt/sources.list.d/mariadb.sources ]]; then @@ -5550,16 +5822,16 @@ EOF manage_tool_repository "mariadb" "$MARIADB_VERSION" "http://mirror.mariadb.org/repo/$MARIADB_VERSION" \ "https://mariadb.org/mariadb_release_signing_key.asc" || { msg_error "Failed to update MariaDB repository" - return 1 + return 100 } fi fi # Perform upgrade with retry logic - ensure_apt_working || return 1 + ensure_apt_working || return 100 upgrade_packages_with_retry "mariadb-server" "mariadb-client" || { msg_error "Failed to upgrade MariaDB packages" - return 1 + return 100 } cache_installed_version "mariadb" "$MARIADB_VERSION" msg_ok "Update MariaDB $MARIADB_VERSION" @@ -5576,7 +5848,7 @@ EOF # Prepare repository (cleanup + validation) prepare_repository_setup "mariadb" || { msg_error "Failed to prepare MariaDB repository" - return 1 + return 100 } # Install required dependencies first @@ -5595,7 +5867,7 @@ EOF manage_tool_repository "mariadb" "$MARIADB_VERSION" "http://mirror.mariadb.org/repo/$MARIADB_VERSION" \ "https://mariadb.org/mariadb_release_signing_key.asc" || { msg_error "Failed to setup MariaDB repository" - return 1 + return 100 } # Install packages with retry logic @@ -5616,7 +5888,7 @@ EOF return 0 else msg_error "Failed to install MariaDB packages (both official repo and distribution)" - return 1 + return 100 fi fi @@ -5687,7 +5959,7 @@ _setup_mariadb_runtime_dir() { function setup_mariadb_db() { if [[ -z "${MARIADB_DB_NAME:-}" || -z "${MARIADB_DB_USER:-}" ]]; then msg_error "MARIADB_DB_NAME and MARIADB_DB_USER must be set before calling setup_mariadb_db" - return 1 + return 65 fi if [[ -z "${MARIADB_DB_PASS:-}" ]]; then @@ -5733,14 +6005,14 @@ function setup_mariadb_db() { } # ------------------------------------------------------------------------------ -# Installs or updates MongoDB to specified major version. +# Installs or updates MongoDB to specified version. # # Description: # - Preserves data across installations # - Adds official MongoDB repo # # Variables: -# MONGO_VERSION - MongoDB major version to install (e.g. 7.0, 8.0) +# MONGO_VERSION - MongoDB version to install (e.g. 7.0, 8.2) # ------------------------------------------------------------------------------ function setup_mongodb() { @@ -5759,7 +6031,7 @@ function setup_mongodb() { local major="${MONGO_VERSION%%.*}" if ((major > 5)); then msg_error "MongoDB ${MONGO_VERSION} requires AVX support, which is not available on this system." - return 1 + return 236 fi fi @@ -5772,7 +6044,7 @@ function setup_mongodb() { ;; *) msg_error "Unsupported distribution: $DISTRO_ID" - return 1 + return 238 ;; esac @@ -5784,12 +6056,12 @@ function setup_mongodb() { if [[ -n "$INSTALLED_VERSION" && "$INSTALLED_VERSION" == "$MONGO_VERSION" ]]; then msg_info "Update MongoDB $MONGO_VERSION" - ensure_apt_working || return 1 + ensure_apt_working || return 100 # Perform upgrade with retry logic upgrade_packages_with_retry "mongodb-org" || { msg_error "Failed to upgrade MongoDB" - return 1 + return 100 } cache_installed_version "mongodb" "$MONGO_VERSION" msg_ok "Update MongoDB $MONGO_VERSION" @@ -5809,31 +6081,34 @@ function setup_mongodb() { # Prepare repository (cleanup + validation) prepare_repository_setup "mongodb" || { msg_error "Failed to prepare MongoDB repository" - return 1 + return 100 } # Setup repository + # MongoDB 8.x versions beyond 8.0 reuse the server-8.0.asc PGP key + local MONGO_KEY_VERSION="${MONGO_VERSION}" + [[ "${MONGO_VERSION}" == 8.[1-9]* ]] && MONGO_KEY_VERSION="8.0" manage_tool_repository "mongodb" "$MONGO_VERSION" "$MONGO_BASE_URL" \ - "https://www.mongodb.org/static/pgp/server-${MONGO_VERSION}.asc" || { + "https://www.mongodb.org/static/pgp/server-${MONGO_KEY_VERSION}.asc" || { msg_error "Failed to setup MongoDB repository" - return 1 + return 100 } # Wait for repo to settle $STD apt update || { msg_error "APT update failed — invalid MongoDB repo for ${DISTRO_ID}-${DISTRO_CODENAME}?" - return 1 + return 100 } # Install MongoDB with retry logic install_packages_with_retry "mongodb-org" || { msg_error "Failed to install MongoDB packages" - return 1 + return 100 } if ! command -v mongod >/dev/null 2>&1; then msg_error "MongoDB binary not found after installation" - return 1 + return 127 fi mkdir -p /var/lib/mongodb @@ -5899,12 +6174,12 @@ function setup_mysql() { # If already installed, just update if [[ -n "$CURRENT_VERSION" ]]; then msg_info "Update MySQL $CURRENT_VERSION" - ensure_apt_working || return 1 + ensure_apt_working || return 100 upgrade_packages_with_retry "default-mysql-server" "default-mysql-client" || upgrade_packages_with_retry "mysql-server" "mysql-client" || upgrade_packages_with_retry "mariadb-server" "mariadb-client" || { msg_error "Failed to upgrade MySQL/MariaDB packages" - return 1 + return 100 } cache_installed_version "mysql" "$CURRENT_VERSION" msg_ok "Update MySQL $CURRENT_VERSION" @@ -5912,7 +6187,7 @@ function setup_mysql() { fi # Fresh install from distro repo - ensure_apt_working || return 1 + ensure_apt_working || return 100 export DEBIAN_FRONTEND=noninteractive # Try default-mysql-server first, fallback to mysql-server, then mariadb @@ -5923,7 +6198,7 @@ function setup_mysql() { msg_warn "mysql-server failed, trying mariadb as fallback" install_packages_with_retry "mariadb-server" "mariadb-client" || { msg_error "Failed to install any MySQL/MariaDB from distro repository" - return 1 + return 100 } } } @@ -5932,14 +6207,14 @@ function setup_mysql() { msg_warn "mysql-server failed, trying mariadb as fallback" install_packages_with_retry "mariadb-server" "mariadb-client" || { msg_error "Failed to install any MySQL/MariaDB from distro repository" - return 1 + return 100 } } else # Distro doesn't have MySQL, use MariaDB install_packages_with_retry "mariadb-server" "mariadb-client" || { msg_error "Failed to install MariaDB from distro repository" - return 1 + return 100 } fi @@ -5959,7 +6234,7 @@ function setup_mysql() { if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$MYSQL_VERSION" ]]; then msg_info "Update MySQL $MYSQL_VERSION" - ensure_apt_working || return 1 + ensure_apt_working || return 100 # Perform upgrade with retry logic (non-fatal if fails) upgrade_packages_with_retry "mysql-server" "mysql-client" || { @@ -5982,7 +6257,7 @@ function setup_mysql() { # Prepare repository (cleanup + validation) prepare_repository_setup "mysql" || { msg_error "Failed to prepare MySQL repository" - return 1 + return 100 } # Debian 13+ Fix: MySQL 8.0 incompatible with libaio1t64, use 8.4 LTS @@ -5991,7 +6266,7 @@ function setup_mysql() { if ! download_gpg_key "https://repo.mysql.com/RPM-GPG-KEY-mysql-2023" "/etc/apt/keyrings/mysql.gpg" "dearmor"; then msg_error "Failed to import MySQL GPG key" - return 1 + return 7 fi cat >/etc/apt/sources.list.d/mysql.sources </dev/null 2>&1; then msg_error "MySQL installed but mysql command still not found" - return 1 + return 127 fi fi @@ -6106,18 +6381,21 @@ function setup_nodejs() { $STD apt update $STD apt install -y jq || { msg_error "Failed to install jq" - return 1 + return 100 } fi - # Scenario 1: Already installed at target version - just update packages/modules + # Scenario 1: Already installed at target version - upgrade to latest minor/patch + update packages/modules if [[ -n "$CURRENT_NODE_VERSION" && "$CURRENT_NODE_VERSION" == "$NODE_VERSION" ]]; then msg_info "Update Node.js $NODE_VERSION" - ensure_apt_working || return 1 + ensure_apt_working || return 100 - # Just update npm to latest - $STD npm install -g npm@latest 2>/dev/null || true + # Upgrade to the latest minor/patch release from NodeSource + $STD apt-get install -y --only-upgrade nodejs 2>/dev/null || true + + # Pin npm to 11.11.0 to work around Node.js 22.22.2 regression (nodejs/node#62425) + $STD npm install -g npm@11.11.0 2>/dev/null || true cache_installed_version "nodejs" "$NODE_VERSION" msg_ok "Update Node.js $NODE_VERSION" @@ -6144,13 +6422,13 @@ function setup_nodejs() { # Prepare repository (cleanup + validation) prepare_repository_setup "nodesource" || { msg_error "Failed to prepare Node.js repository" - return 1 + return 250 } # Setup NodeSource repository manage_tool_repository "nodejs" "$NODE_VERSION" "https://deb.nodesource.com/node_${NODE_VERSION}.x" "https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key" || { msg_error "Failed to setup Node.js repository" - return 1 + return 250 } # Force APT cache refresh after repository setup @@ -6162,13 +6440,13 @@ function setup_nodejs() { install_packages_with_retry "nodejs" || { msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource" - return 1 + return 100 } # Verify Node.js was installed correctly if ! command -v node >/dev/null 2>&1; then msg_error "Node.js binary not found after installation" - return 1 + return 127 fi local INSTALLED_NODE_VERSION @@ -6178,15 +6456,15 @@ function setup_nodejs() { # Verify npm is available (should come with NodeSource nodejs) if ! command -v npm >/dev/null 2>&1; then msg_error "npm not found after Node.js installation - repository issue?" - return 1 + return 127 fi - # Update to latest npm (with version check to avoid incompatibility) + # Pin npm to 11.11.0 to work around Node.js 22.22.2 regression (nodejs/node#62425) local NPM_VERSION NPM_VERSION=$(npm -v 2>/dev/null || echo "0") if [[ "$NPM_VERSION" != "0" ]]; then - $STD npm install -g npm@latest 2>/dev/null || { - msg_warn "Failed to update npm to latest version (continuing with bundled npm $NPM_VERSION)" + $STD npm install -g npm@11.11.0 2>/dev/null || { + msg_warn "Failed to update npm to 11.11.0 (continuing with bundled npm $NPM_VERSION)" } fi @@ -6194,7 +6472,37 @@ function setup_nodejs() { msg_ok "Setup Node.js $NODE_VERSION" fi - export NODE_OPTIONS="--max-old-space-size=4096" + # Set a safe default heap limit for Node.js builds if not explicitly provided. + # Priority: + # 1) NODE_OPTIONS (caller/user override) + # 2) NODE_MAX_OLD_SPACE_SIZE (explicit MB override) + # 3) var_ram (LXC memory setting, MB) + # 4) /proc/meminfo (runtime memory detection) + # Auto value is clamped to 1024..12288 MB. + if [[ -z "${NODE_OPTIONS:-}" ]]; then + local node_heap_mb="" + + if [[ -n "${NODE_MAX_OLD_SPACE_SIZE:-}" ]] && [[ "${NODE_MAX_OLD_SPACE_SIZE}" =~ ^[0-9]+$ ]]; then + node_heap_mb="${NODE_MAX_OLD_SPACE_SIZE}" + elif [[ -n "${var_ram:-}" ]] && [[ "${var_ram}" =~ ^[0-9]+$ ]]; then + node_heap_mb=$((var_ram * 75 / 100)) + else + local total_mem_kb="" + total_mem_kb=$(awk '/^MemTotal:/ {print $2; exit}' /proc/meminfo 2>/dev/null || echo "") + if [[ "$total_mem_kb" =~ ^[0-9]+$ ]]; then + local total_mem_mb=$((total_mem_kb / 1024)) + node_heap_mb=$((total_mem_mb * 75 / 100)) + fi + fi + + if [[ -z "$node_heap_mb" ]] || ((node_heap_mb < 1024)); then + node_heap_mb=1024 + elif ((node_heap_mb > 12288)); then + node_heap_mb=12288 + fi + + export NODE_OPTIONS="--max-old-space-size=${node_heap_mb}" + fi # Ensure valid working directory for npm (avoids uv_cwd error) if [[ ! -d /opt ]]; then @@ -6202,12 +6510,20 @@ function setup_nodejs() { fi cd /opt || { msg_error "Failed to set safe working directory before npm install" - return 1 + return 127 } # Install global Node modules if [[ -n "$NODE_MODULE" ]]; then IFS=',' read -ra MODULES <<<"$NODE_MODULE" + + # Pin pnpm to v10 to avoid breaking changes from newer major versions + for i in "${!MODULES[@]}"; do + if [[ "${MODULES[$i]}" =~ ^pnpm(@.*)?$ ]]; then + MODULES[$i]="pnpm@^10" + fi + done + local failed_modules=0 for mod in "${MODULES[@]}"; do local MODULE_NAME MODULE_REQ_VERSION MODULE_INSTALLED_VERSION @@ -6392,7 +6708,7 @@ EOF # Setup repository prepare_repository_setup "php" "deb.sury.org-php" || { msg_error "Failed to prepare PHP repository" - return 1 + return 100 } # Use different repository based on OS @@ -6401,7 +6717,7 @@ EOF msg_info "Adding ondrej/php PPA for Ubuntu" $STD apt install -y software-properties-common || { msg_error "Failed to install software-properties-common" - return 1 + return 100 } # Don't use $STD for add-apt-repository as it uses background processes add-apt-repository -y ppa:ondrej/php >>"$(get_active_logfile)" 2>&1 @@ -6409,10 +6725,10 @@ EOF # Debian: Use Sury repository manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || { msg_error "Failed to setup PHP repository" - return 1 + return 100 } fi - ensure_apt_working || return 1 + ensure_apt_working || return 100 $STD apt update || { msg_warn "apt update failed after PHP repository setup" } @@ -6423,7 +6739,7 @@ EOF if [[ -z "$AVAILABLE_PHP_VERSION" ]]; then msg_error "PHP ${PHP_VERSION} not found in configured repositories" - return 1 + return 100 fi # Build module list - verify each package exists before adding @@ -6477,7 +6793,7 @@ EOF msg_info "Installing Apache with PHP ${PHP_VERSION} module" install_packages_with_retry "apache2" || { msg_error "Failed to install Apache" - return 1 + return 100 } install_packages_with_retry "libapache2-mod-php${PHP_VERSION}" || { msg_warn "Failed to install libapache2-mod-php${PHP_VERSION}, continuing without Apache module" @@ -6495,7 +6811,7 @@ EOF # Install main package first (critical) if ! $STD apt install -y "php${PHP_VERSION}" 2>/dev/null; then msg_error "Failed to install php${PHP_VERSION}" - return 1 + return 100 fi # Try to install Apache module individually if requested @@ -6551,7 +6867,7 @@ EOF # Verify PHP installation - critical check if ! command -v php >/dev/null 2>&1; then msg_error "PHP installation verification failed - php command not found" - return 1 + return 127 fi local INSTALLED_VERSION=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2) @@ -6560,7 +6876,7 @@ EOF msg_error "PHP version mismatch: requested ${PHP_VERSION} but got ${INSTALLED_VERSION}" msg_error "This indicates a critical package installation issue" # Don't cache wrong version - return 1 + return 127 fi cache_installed_version "php" "$INSTALLED_VERSION" @@ -6575,22 +6891,38 @@ EOF # - Optionally uses official PGDG repository for specific versions # - Detects existing PostgreSQL version # - Dumps all databases before upgrade -# - Installs optional PG_MODULES (e.g. postgis, contrib) +# - Installs optional PG_MODULES (e.g. postgis, contrib, cron) # - Restores dumped data post-upgrade # # Variables: # USE_PGDG_REPO - Use official PGDG repository (default: true) # Set to "false" to use distro packages instead # PG_VERSION - Major PostgreSQL version (e.g. 15, 16) (default: 16) -# PG_MODULES - Comma-separated list of modules (e.g. "postgis,contrib") +# PG_MODULES - Comma-separated list of modules (e.g. "postgis,contrib,cron") # # Examples: -# setup_postgresql # Uses PGDG repo, PG 16 -# PG_VERSION="17" setup_postgresql # Specific version from PGDG -# USE_PGDG_REPO=false setup_postgresql # Uses distro package instead +# setup_postgresql # Uses PGDG repo, PG 16 +# PG_VERSION="17" setup_postgresql # Specific version from PGDG +# USE_PGDG_REPO=false setup_postgresql # Uses distro package instead +# PG_VERSION="17" PG_MODULES="cron" setup_postgresql # With pg_cron module # ------------------------------------------------------------------------------ -function setup_postgresql() { +# Internal helper: Configure shared_preload_libraries for pg_cron +_configure_pg_cron_preload() { + local modules="${1:-}" + [[ -z "$modules" ]] && return 0 + if [[ ",$modules," == *",cron,"* ]]; then + local current_libs + current_libs=$(sudo -u postgres psql -tAc "SHOW shared_preload_libraries;" 2>/dev/null || echo "") + if [[ "$current_libs" != *"pg_cron"* ]]; then + local new_libs="${current_libs:+${current_libs},}pg_cron" + $STD sudo -u postgres psql -c "ALTER SYSTEM SET shared_preload_libraries = '${new_libs}';" + $STD systemctl restart postgresql + fi + fi +} + +setup_postgresql() { local PG_VERSION="${PG_VERSION:-16}" local PG_MODULES="${PG_MODULES:-}" local USE_PGDG_REPO="${USE_PGDG_REPO:-true}" @@ -6616,7 +6948,7 @@ function setup_postgresql() { # If already installed, just update if [[ -n "$CURRENT_PG_VERSION" ]]; then msg_info "Update PostgreSQL $CURRENT_PG_VERSION" - ensure_apt_working || return 1 + ensure_apt_working || return 100 upgrade_packages_with_retry "postgresql" "postgresql-client" || true cache_installed_version "postgresql" "$CURRENT_PG_VERSION" msg_ok "Update PostgreSQL $CURRENT_PG_VERSION" @@ -6628,16 +6960,17 @@ function setup_postgresql() { $STD apt install -y "postgresql-${CURRENT_PG_VERSION}-${module}" 2>/dev/null || true done fi + _configure_pg_cron_preload "$PG_MODULES" return 0 fi # Fresh install from distro repo - ensure_apt_working || return 1 + ensure_apt_working || return 100 export DEBIAN_FRONTEND=noninteractive install_packages_with_retry "postgresql" "postgresql-client" || { msg_error "Failed to install PostgreSQL from distro repository" - return 1 + return 100 } # Get installed version @@ -6663,6 +6996,7 @@ function setup_postgresql() { $STD apt install -y "postgresql-${INSTALLED_VERSION}-${module}" 2>/dev/null || true done fi + _configure_pg_cron_preload "$PG_MODULES" return 0 fi @@ -6670,7 +7004,7 @@ function setup_postgresql() { # Scenario 2a: Already at correct version if [[ "$CURRENT_PG_VERSION" == "$PG_VERSION" ]]; then msg_info "Update PostgreSQL $PG_VERSION" - ensure_apt_working || return 1 + ensure_apt_working || return 100 # Perform upgrade with retry logic (non-fatal if fails) upgrade_packages_with_retry "postgresql-${PG_VERSION}" "postgresql-client-${PG_VERSION}" 2>/dev/null || true @@ -6684,6 +7018,7 @@ function setup_postgresql() { $STD apt install -y "postgresql-${PG_VERSION}-${module}" 2>/dev/null || true done fi + _configure_pg_cron_preload "$PG_MODULES" return 0 fi @@ -6694,7 +7029,7 @@ function setup_postgresql() { local PG_BACKUP_FILE="/var/lib/postgresql/backup_$(date +%F)_v${CURRENT_PG_VERSION}.sql" $STD runuser -u postgres -- pg_dumpall >"$PG_BACKUP_FILE" || { msg_error "Failed to backup PostgreSQL databases" - return 1 + return 150 } $STD systemctl stop postgresql || true $STD apt purge -y "postgresql-${CURRENT_PG_VERSION}" "postgresql-client-${CURRENT_PG_VERSION}" 2>/dev/null || true @@ -6705,19 +7040,22 @@ function setup_postgresql() { # Scenario 3: Fresh install or after removal - setup repo and install prepare_repository_setup "pgdg" "postgresql" || { msg_error "Failed to prepare PostgreSQL repository" - return 1 + return 100 } local SUITE case "$DISTRO_CODENAME" in trixie | forky | sid) + if verify_repo_available "https://apt.postgresql.org/pub/repos/apt" "trixie-pgdg"; then SUITE="trixie-pgdg" + else msg_warn "PGDG repo not available for ${DISTRO_CODENAME}, falling back to distro packages" USE_PGDG_REPO=false setup_postgresql return $? fi + ;; *) SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://apt.postgresql.org/pub/repos/apt") @@ -6734,7 +7072,7 @@ function setup_postgresql() { if ! $STD apt update; then msg_error "APT update failed for PostgreSQL repository" - return 1 + return 100 fi # Install ssl-cert dependency if available @@ -6764,12 +7102,12 @@ function setup_postgresql() { if [[ "$pg_install_success" == false ]]; then msg_error "PostgreSQL package not available for suite ${SUITE}" - return 1 + return 100 fi if ! command -v psql >/dev/null 2>&1; then msg_error "PostgreSQL installed but psql command not found" - return 1 + return 127 fi # Restore database backup if we upgraded from previous version @@ -6801,6 +7139,7 @@ function setup_postgresql() { } done fi + _configure_pg_cron_preload "$PG_MODULES" } # ------------------------------------------------------------------------------ @@ -6819,6 +7158,7 @@ function setup_postgresql() { # PG_DB_NAME="immich" PG_DB_USER="immich" PG_DB_EXTENSIONS="pgvector" setup_postgresql_db # PG_DB_NAME="ghostfolio" PG_DB_USER="ghostfolio" PG_DB_GRANT_SUPERUSER="true" setup_postgresql_db # PG_DB_NAME="adventurelog" PG_DB_USER="adventurelog" PG_DB_EXTENSIONS="postgis" setup_postgresql_db +# PG_DB_NAME="splitpro" PG_DB_USER="splitpro" PG_DB_EXTENSIONS="pg_cron" setup_postgresql_db # # Variables: # PG_DB_NAME - Database name (required) @@ -6838,7 +7178,7 @@ function setup_postgresql_db() { # Validation if [[ -z "${PG_DB_NAME:-}" || -z "${PG_DB_USER:-}" ]]; then msg_error "PG_DB_NAME and PG_DB_USER must be set before calling setup_postgresql_db" - return 1 + return 65 fi # Generate password if not provided @@ -6850,6 +7190,13 @@ function setup_postgresql_db() { $STD sudo -u postgres psql -c "CREATE ROLE $PG_DB_USER WITH LOGIN PASSWORD '$PG_DB_PASS';" $STD sudo -u postgres psql -c "CREATE DATABASE $PG_DB_NAME WITH OWNER $PG_DB_USER ENCODING 'UTF8' TEMPLATE template0;" + # Configure pg_cron database BEFORE creating the extension (must be set before pg_cron loads) + if [[ -n "${PG_DB_EXTENSIONS:-}" ]] && [[ ",${PG_DB_EXTENSIONS//[[:space:]]/}," == *",pg_cron,"* ]]; then + $STD sudo -u postgres psql -c "ALTER SYSTEM SET cron.database_name = '${PG_DB_NAME}';" + $STD sudo -u postgres psql -c "ALTER SYSTEM SET cron.timezone = 'UTC';" + $STD systemctl restart postgresql + fi + # Install extensions (comma-separated) if [[ -n "${PG_DB_EXTENSIONS:-}" ]]; then IFS=',' read -ra EXT_LIST <<<"${PG_DB_EXTENSIONS:-}" @@ -6859,6 +7206,12 @@ function setup_postgresql_db() { done fi + # Grant pg_cron schema permissions to DB user + if [[ -n "${PG_DB_EXTENSIONS:-}" ]] && [[ ",${PG_DB_EXTENSIONS//[[:space:]]/}," == *",pg_cron,"* ]]; then + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT USAGE ON SCHEMA cron TO ${PG_DB_USER};" + $STD sudo -u postgres psql -d "$PG_DB_NAME" -c "GRANT ALL ON ALL TABLES IN SCHEMA cron TO ${PG_DB_USER};" + fi + # ALTER ROLE settings for Django/Rails compatibility (unless skipped) if [[ "${PG_DB_SKIP_ALTER_ROLE:-}" != "true" ]]; then $STD sudo -u postgres psql -c "ALTER ROLE $PG_DB_USER SET client_encoding TO 'utf8';" @@ -6900,7 +7253,6 @@ function setup_postgresql_db() { export PG_DB_USER export PG_DB_PASS } - # ------------------------------------------------------------------------------ # Installs rbenv and ruby-build, installs Ruby and optionally Rails. # @@ -6943,7 +7295,7 @@ function setup_ruby() { msg_info "Setup Ruby $RUBY_VERSION" fi - ensure_apt_working || return 1 + ensure_apt_working || return 100 # Install build dependencies with fallbacks local ruby_deps=() @@ -6985,7 +7337,7 @@ function setup_ruby() { else msg_error "No Ruby build dependencies available" rm -rf "$TMP_DIR" - return 1 + return 100 fi # Download and build rbenv if needed @@ -6997,7 +7349,7 @@ function setup_ruby() { if [[ -z "$rbenv_json" ]]; then msg_error "Failed to fetch latest rbenv version from GitHub" rm -rf "$TMP_DIR" - return 1 + return 7 fi RBENV_RELEASE=$(echo "$rbenv_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "") @@ -7005,19 +7357,19 @@ function setup_ruby() { if [[ -z "$RBENV_RELEASE" ]]; then msg_error "Could not parse rbenv version from GitHub response" rm -rf "$TMP_DIR" - return 1 + return 250 fi if ! curl_with_retry "https://github.com/rbenv/rbenv/archive/refs/tags/v${RBENV_RELEASE}.tar.gz" "$TMP_DIR/rbenv.tar.gz"; then msg_error "Failed to download rbenv" rm -rf "$TMP_DIR" - return 1 + return 7 fi tar -xzf "$TMP_DIR/rbenv.tar.gz" -C "$TMP_DIR" || { msg_error "Failed to extract rbenv" rm -rf "$TMP_DIR" - return 1 + return 251 } mkdir -p "$RBENV_DIR" @@ -7025,7 +7377,7 @@ function setup_ruby() { (cd "$RBENV_DIR" && src/configure && $STD make -C src) || { msg_error "Failed to build rbenv" rm -rf "$TMP_DIR" - return 1 + return 150 } # Setup profile @@ -7044,7 +7396,7 @@ function setup_ruby() { if [[ -z "$ruby_build_json" ]]; then msg_error "Failed to fetch latest ruby-build version from GitHub" rm -rf "$TMP_DIR" - return 1 + return 7 fi RUBY_BUILD_RELEASE=$(echo "$ruby_build_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "") @@ -7052,19 +7404,19 @@ function setup_ruby() { if [[ -z "$RUBY_BUILD_RELEASE" ]]; then msg_error "Could not parse ruby-build version from GitHub response" rm -rf "$TMP_DIR" - return 1 + return 250 fi if ! curl_with_retry "https://github.com/rbenv/ruby-build/archive/refs/tags/v${RUBY_BUILD_RELEASE}.tar.gz" "$TMP_DIR/ruby-build.tar.gz"; then msg_error "Failed to download ruby-build" rm -rf "$TMP_DIR" - return 1 + return 7 fi tar -xzf "$TMP_DIR/ruby-build.tar.gz" -C "$TMP_DIR" || { msg_error "Failed to extract ruby-build" rm -rf "$TMP_DIR" - return 1 + return 251 } mkdir -p "$RBENV_DIR/plugins/ruby-build" @@ -7079,14 +7431,14 @@ function setup_ruby() { $STD "$RBENV_BIN" install "$RUBY_VERSION" || { msg_error "Failed to install Ruby $RUBY_VERSION" rm -rf "$TMP_DIR" - return 1 + return 150 } fi "$RBENV_BIN" global "$RUBY_VERSION" || { msg_error "Failed to set Ruby $RUBY_VERSION as global version" rm -rf "$TMP_DIR" - return 1 + return 150 } hash -r @@ -7168,7 +7520,7 @@ function setup_meilisearch() { MEILI_HOST="${MEILISEARCH_HOST:-127.0.0.1}" MEILI_PORT="${MEILISEARCH_PORT:-7700}" MEILI_DUMP_DIR="${MEILISEARCH_DUMP_DIR:-/var/lib/meilisearch/dumps}" - MEILI_MASTER_KEY=$(grep -E "^master_key\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ') + MEILI_MASTER_KEY=$(grep -E "^master_key\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ' || true) # Create dump before update if migration is needed local DUMP_UID="" @@ -7234,7 +7586,7 @@ function setup_meilisearch() { # We choose option 2: backup and proceed with warning if [[ "$NEEDS_MIGRATION" == "true" ]] && [[ -z "$DUMP_UID" ]]; then local MEILI_DB_PATH - MEILI_DB_PATH=$(grep -E "^db_path\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ') + MEILI_DB_PATH=$(grep -E "^db_path\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ' || true) MEILI_DB_PATH="${MEILI_DB_PATH:-/var/lib/meilisearch/data}" if [[ -d "$MEILI_DB_PATH" ]] && [[ -n "$(ls -A "$MEILI_DB_PATH" 2>/dev/null)" ]]; then @@ -7254,7 +7606,7 @@ function setup_meilisearch() { # If migration needed and dump was created, remove old data and import dump if [[ "$NEEDS_MIGRATION" == "true" ]] && [[ -n "$DUMP_UID" ]]; then local MEILI_DB_PATH - MEILI_DB_PATH=$(grep -E "^db_path\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ') + MEILI_DB_PATH=$(grep -E "^db_path\s*=" /etc/meilisearch.toml 2>/dev/null | sed 's/.*=\s*"\(.*\)"/\1/' | tr -d ' ' || true) MEILI_DB_PATH="${MEILI_DB_PATH:-/var/lib/meilisearch/data}" msg_info "Removing old MeiliSearch database for migration" @@ -7267,7 +7619,7 @@ function setup_meilisearch() { # Start meilisearch with --import-dump flag # This is a one-time import that happens during startup - /usr/bin/meilisearch --config-file-path /etc/meilisearch.toml --import-dump "$DUMP_FILE" & + /usr/bin/meilisearch --config-file-path /etc/meilisearch.toml --import-dump "$DUMP_FILE" >/dev/null 2>&1 & local MEILI_PID=$! # Wait for meilisearch to become healthy (import happens during startup) @@ -7290,6 +7642,7 @@ function setup_meilisearch() { # Stop the manual process kill $MEILI_PID 2>/dev/null || true + wait $MEILI_PID 2>/dev/null || true sleep 2 # Start via systemd for proper management @@ -7319,13 +7672,13 @@ function setup_meilisearch() { # Install binary fetch_and_deploy_gh_release "meilisearch" "meilisearch/meilisearch" "binary" || { msg_error "Failed to install MeiliSearch binary" - return 1 + return 250 } # Download default config curl -fsSL https://raw.githubusercontent.com/meilisearch/meilisearch/latest/config.toml -o /etc/meilisearch.toml || { msg_error "Failed to download MeiliSearch config" - return 1 + return 7 } # Generate master key @@ -7375,7 +7728,7 @@ EOF # Verify service is running if ! systemctl is-active --quiet meilisearch; then msg_error "MeiliSearch service failed to start" - return 1 + return 150 fi # Get API keys with retry logic @@ -7441,7 +7794,7 @@ function setup_clickhouse() { [[ -z "$CLICKHOUSE_VERSION" ]] && { msg_error "Could not determine latest ClickHouse version from any source" - return 1 + return 250 } fi @@ -7454,7 +7807,7 @@ function setup_clickhouse() { # Scenario 1: Already at target version - just update packages if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$CLICKHOUSE_VERSION" ]]; then msg_info "Update ClickHouse $CLICKHOUSE_VERSION" - ensure_apt_working || return 1 + ensure_apt_working || return 100 # Perform upgrade with retry logic (non-fatal if fails) upgrade_packages_with_retry "clickhouse-server" "clickhouse-client" || { @@ -7479,7 +7832,7 @@ function setup_clickhouse() { # Prepare repository (cleanup + validation) prepare_repository_setup "clickhouse" || { msg_error "Failed to prepare ClickHouse repository" - return 1 + return 100 } # Setup repository (ClickHouse uses 'stable' suite) @@ -7493,18 +7846,18 @@ function setup_clickhouse() { # Install packages with retry logic $STD apt update || { msg_error "APT update failed for ClickHouse repository" - return 1 + return 100 } install_packages_with_retry "clickhouse-server" "clickhouse-client" || { msg_error "Failed to install ClickHouse packages" - return 1 + return 100 } # Verify installation if ! command -v clickhouse-server >/dev/null 2>&1; then msg_error "ClickHouse installation completed but clickhouse-server command not found" - return 1 + return 127 fi # Setup data directory @@ -7556,7 +7909,7 @@ function setup_rust() { msg_info "Setup Rust ($RUST_TOOLCHAIN)" curl -fsSL https://sh.rustup.rs | $STD sh -s -- -y --default-toolchain "$RUST_TOOLCHAIN" || { msg_error "Failed to install Rust" - return 1 + return 7 } export PATH="$CARGO_BIN:$PATH" echo 'export PATH="$HOME/.cargo/bin:$PATH"' >>"$HOME/.profile" @@ -7564,14 +7917,14 @@ function setup_rust() { # Verify installation if ! command -v rustc >/dev/null 2>&1; then msg_error "Rust binary not found after installation" - return 1 + return 127 fi local RUST_VERSION RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}' 2>/dev/null) || true if [[ -z "$RUST_VERSION" ]]; then msg_error "Failed to determine Rust version" - return 1 + return 250 fi cache_installed_version "rust" "$RUST_VERSION" @@ -7585,11 +7938,11 @@ function setup_rust() { # If default fails, install the toolchain first $STD rustup install "$RUST_TOOLCHAIN" || { msg_error "Failed to install Rust toolchain $RUST_TOOLCHAIN" - return 1 + return 150 } $STD rustup default "$RUST_TOOLCHAIN" || { msg_error "Failed to set default Rust toolchain" - return 1 + return 150 } } @@ -7605,7 +7958,7 @@ function setup_rust() { RUST_VERSION=$(rustc --version 2>/dev/null | awk '{print $2}' 2>/dev/null) || true if [[ -z "$RUST_VERSION" ]]; then msg_error "Failed to determine Rust version after update" - return 1 + return 250 fi cache_installed_version "rust" "$RUST_VERSION" @@ -7640,14 +7993,14 @@ function setup_rust() { msg_info "Upgrading $NAME from v$INSTALLED_VER to v$VER" $STD cargo install "$NAME" --version "$VER" --force || { msg_error "Failed to install $NAME@$VER" - return 1 + return 150 } msg_ok "Upgraded $NAME to v$VER" elif [[ -z "$VER" ]]; then msg_info "Upgrading $NAME to latest" $STD cargo install "$NAME" --force || { msg_error "Failed to upgrade $NAME" - return 1 + return 150 } local NEW_VER=$(cargo install --list 2>/dev/null | grep "^${NAME} " | head -1 | awk '{print $2}' 2>/dev/null | tr -d 'v:' || echo 'unknown') msg_ok "Upgraded $NAME to v$NEW_VER" @@ -7659,13 +8012,13 @@ function setup_rust() { if [[ -n "$VER" ]]; then $STD cargo install "$NAME" --version "$VER" || { msg_error "Failed to install $NAME@$VER" - return 1 + return 150 } msg_ok "Installed $NAME v$VER" else $STD cargo install "$NAME" || { msg_error "Failed to install $NAME" - return 1 + return 150 } local NEW_VER=$(cargo install --list 2>/dev/null | grep "^${NAME} " | head -1 | awk '{print $2}' 2>/dev/null | tr -d 'v:' || echo 'unknown') msg_ok "Installed $NAME v$NEW_VER" @@ -7718,7 +8071,7 @@ function setup_uv() { ;; *) msg_error "Unsupported architecture: $ARCH (supported: x86_64, aarch64, i686)" - return 1 + return 236 ;; esac @@ -7731,7 +8084,7 @@ function setup_uv() { if [[ -z "$releases_json" ]]; then msg_error "Could not fetch latest uv version from GitHub API" - return 1 + return 7 fi local LATEST_VERSION @@ -7739,7 +8092,7 @@ function setup_uv() { if [[ -z "$LATEST_VERSION" ]]; then msg_error "Could not parse uv version from GitHub API response" - return 1 + return 250 fi # Get currently installed version @@ -7755,7 +8108,7 @@ function setup_uv() { # Check if uvx is needed and missing if [[ "${USE_UVX:-NO}" == "YES" ]] && [[ ! -x "$UVX_BIN" ]]; then msg_info "Installing uvx wrapper" - _install_uvx_wrapper || return 1 + _install_uvx_wrapper || return 252 msg_ok "uvx wrapper installed" fi @@ -7773,25 +8126,25 @@ function setup_uv() { if ! curl_with_retry "$UV_URL" "$TMP_DIR/uv.tar.gz"; then msg_error "Failed to download uv from $UV_URL" - return 1 + return 7 fi # Extract $STD tar -xzf "$TMP_DIR/uv.tar.gz" -C "$TMP_DIR" || { msg_error "Failed to extract uv" - return 1 + return 251 } # Find and install uv binary (tarball extracts to uv-VERSION-ARCH/ directory) local UV_BINARY=$(find "$TMP_DIR" -name "uv" -type f -executable | head -n1) if [[ ! -f "$UV_BINARY" ]]; then msg_error "Could not find uv binary in extracted tarball" - return 1 + return 127 fi - $STD install -m 755 "$UV_BINARY" "$UV_BIN" || { + $STD /usr/bin/install -m 755 "$UV_BINARY" "$UV_BIN" || { msg_error "Failed to install uv binary" - return 1 + return 252 } ensure_usr_local_bin_persist @@ -7802,7 +8155,7 @@ function setup_uv() { msg_info "Installing uvx wrapper" _install_uvx_wrapper || { msg_error "Failed to install uvx wrapper" - return 1 + return 252 } msg_ok "uvx wrapper installed" fi @@ -7818,7 +8171,7 @@ function setup_uv() { msg_info "Installing Python $PYTHON_VERSION via uv" $STD uv python install "$PYTHON_VERSION" || { msg_error "Failed to install Python $PYTHON_VERSION" - return 1 + return 150 } msg_ok "Python $PYTHON_VERSION installed" fi @@ -7873,7 +8226,7 @@ function setup_yq() { if [[ -z "$releases_json" ]]; then msg_error "Could not fetch latest yq version from GitHub API" rm -rf "$TMP_DIR" - return 1 + return 250 fi LATEST_VERSION=$(echo "$releases_json" | jq -r '.tag_name' 2>/dev/null | sed 's/^v//' || echo "") @@ -7881,7 +8234,7 @@ function setup_yq() { if [[ -z "$LATEST_VERSION" ]]; then msg_error "Could not parse yq version from GitHub API response" rm -rf "$TMP_DIR" - return 1 + return 250 fi # Get currently installed version @@ -7912,14 +8265,14 @@ function setup_yq() { if ! curl_with_retry "https://github.com/${GITHUB_REPO}/releases/download/v${LATEST_VERSION}/yq_linux_${yq_arch}" "$TMP_DIR/yq"; then msg_error "Failed to download yq" rm -rf "$TMP_DIR" - return 1 + return 250 fi chmod +x "$TMP_DIR/yq" mv "$TMP_DIR/yq" "$BINARY_PATH" || { msg_error "Failed to install yq" rm -rf "$TMP_DIR" - return 1 + return 252 } rm -rf "$TMP_DIR" @@ -7986,18 +8339,18 @@ function setup_docker() { # Install or upgrade Docker from distro repo if [ "$docker_installed" = true ]; then msg_info "Checking for Docker updates (distro package)" - ensure_apt_working || return 1 + ensure_apt_working || return 100 upgrade_packages_with_retry "docker.io" "docker-compose" || true DOCKER_CURRENT_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1) msg_ok "Docker is up-to-date ($DOCKER_CURRENT_VERSION)" else msg_info "Installing Docker (distro package)" - ensure_apt_working || return 1 + ensure_apt_working || return 100 # Install docker.io and docker-compose from distro if ! install_packages_with_retry "docker.io"; then msg_error "Failed to install docker.io from distro repository" - return 1 + return 100 fi # docker-compose is optional $STD apt install -y docker-compose 2>/dev/null || true @@ -8055,7 +8408,7 @@ EOF docker-buildx-plugin \ docker-compose-plugin || { msg_error "Failed to update Docker packages" - return 1 + return 100 } msg_ok "Updated Docker to $DOCKER_LATEST_VERSION" else @@ -8070,7 +8423,7 @@ EOF docker-buildx-plugin \ docker-compose-plugin || { msg_error "Failed to install Docker packages" - return 1 + return 100 } DOCKER_CURRENT_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1) @@ -8219,7 +8572,7 @@ function fetch_and_deploy_from_url() { if [[ -z "$url" ]]; then msg_error "URL parameter is required" - return 1 + return 65 fi local filename="${url##*/}" @@ -8229,13 +8582,13 @@ function fetch_and_deploy_from_url() { local tmpdir tmpdir=$(mktemp -d) || { msg_error "Failed to create temporary directory" - return 1 + return 252 } curl -fsSL -o "$tmpdir/$filename" "$url" || { msg_error "Download failed: $url" rm -rf "$tmpdir" - return 1 + return 250 } # Auto-detect archive type using file description @@ -8255,7 +8608,7 @@ function fetch_and_deploy_from_url() { else msg_error "Unsupported or unknown archive type: $file_desc" rm -rf "$tmpdir" - return 1 + return 65 fi msg_info "Detected archive type: $archive_type (file type: $file_desc)" @@ -8268,7 +8621,7 @@ function fetch_and_deploy_from_url() { $STD dpkg -i "$tmpdir/$filename" || { msg_error "Both apt and dpkg installation failed" rm -rf "$tmpdir" - return 1 + return 100 } } @@ -8280,7 +8633,7 @@ function fetch_and_deploy_from_url() { if [[ -z "$directory" ]]; then msg_error "Directory parameter is required for archive extraction" rm -rf "$tmpdir" - return 1 + return 65 fi msg_info "Extracting archive to $directory" @@ -8299,13 +8652,13 @@ function fetch_and_deploy_from_url() { unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || { msg_error "Failed to extract ZIP archive" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 251 } elif [[ "$archive_type" == "tar" ]]; then tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || { msg_error "Failed to extract TAR archive" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 251 } fi @@ -8319,12 +8672,12 @@ function fetch_and_deploy_from_url() { cp -r "$inner_dir"/* "$directory/" || { msg_error "Failed to copy contents from $inner_dir to $directory" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 } else msg_error "Inner directory is empty: $inner_dir" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 fi shopt -u dotglob nullglob else @@ -8333,12 +8686,12 @@ function fetch_and_deploy_from_url() { cp -r "$unpack_tmp"/* "$directory/" || { msg_error "Failed to copy contents to $directory" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 } else msg_error "Unpacked archive is empty" rm -rf "$tmpdir" "$unpack_tmp" - return 1 + return 252 fi shopt -u dotglob nullglob fi @@ -8367,3 +8720,829 @@ EOF $STD apt update return 0 } + +# ------------------------------------------------------------------------------ +# Get latest GitLab release version. +# Usage: get_latest_gitlab_release "owner/repo" [strip_v] +# ------------------------------------------------------------------------------ +get_latest_gitlab_release() { + local repo="$1" + local strip_v="${2:-true}" + + local repo_encoded + repo_encoded=$(printf '%s' "$repo" | sed 's|/|%2F|g') + + local header=() + [[ -n "${GITLAB_TOKEN:-}" ]] && header=(-H "PRIVATE-TOKEN: $GITLAB_TOKEN") + + local temp_file + temp_file=$(mktemp) + + local http_code + http_code=$(curl --connect-timeout 10 --max-time 30 -sSL \ + -w "%{http_code}" -o "$temp_file" \ + "${header[@]}" \ + "https://gitlab.com/api/v4/projects/$repo_encoded/releases?per_page=1&order_by=released_at&sort=desc" 2>/dev/null) || true + + if [[ "$http_code" != "200" ]]; then + rm -f "$temp_file" + msg_warn "GitLab API call failed for ${repo} (HTTP ${http_code})" + return 22 + fi + + local version + version=$(jq -r '.[0].tag_name // empty' "$temp_file") + rm -f "$temp_file" + + if [[ -z "$version" ]]; then + msg_error "Could not determine latest version for ${repo}" + return 250 + fi + + if [[ "$strip_v" == "true" ]]; then + [[ "$version" =~ ^v[0-9] ]] && version="${version:1}" + fi + + echo "$version" +} + +# ------------------------------------------------------------------------------ +# Checks for new GitLab release (latest tag). +# +# Description: +# - Queries the GitLab API for the latest release tag +# - Compares it to a local cached version (~/.) +# - If newer, sets global CHECK_UPDATE_RELEASE and returns 0 +# +# Usage: +# if check_for_gl_release "myapp" "owner/repo" [optional] "v1.2.3"; then +# # trigger update... +# fi +# exit 0 +# } (end of update_script not from the function) +# +# Notes: +# - Requires `jq` (auto-installed if missing) +# - Supports GITLAB_TOKEN env var for private/rate-limited repos +# - Does not modify anything, only checks version state +# ------------------------------------------------------------------------------ +check_for_gl_release() { + local app="$1" + local source="$2" + local pinned_version_in="${3:-}" # optional + local pin_reason="${4:-}" # optional reason shown to user + local app_lc="${app,,}" + local current_file="$HOME/.${app_lc}" + + msg_info "Checking for update: ${app}" + + # DNS check + if ! getent hosts gitlab.com >/dev/null 2>&1; then + msg_error "Network error: cannot resolve gitlab.com" + return 6 + fi + + ensure_dependencies jq + + local repo_encoded + repo_encoded=$(printf '%s' "$source" | sed 's|/|%2F|g') + + local header=() + [[ -n "${GITLAB_TOKEN:-}" ]] && header=(-H "PRIVATE-TOKEN: $GITLAB_TOKEN") + + local releases_json="" http_code="" + + # For pinned versions, try to fetch the specific release tag first + if [[ -n "$pinned_version_in" ]]; then + local pinned_encoded="${pinned_version_in//\//%2F}" + http_code=$(curl -sSL --max-time 20 -w "%{http_code}" -o /tmp/gl_check.json \ + "${header[@]}" \ + "https://gitlab.com/api/v4/projects/$repo_encoded/releases/$pinned_encoded" 2>/dev/null) || true + if [[ "$http_code" == "200" ]] && [[ -s /tmp/gl_check.json ]]; then + releases_json="[$(/dev/null) || true + + if [[ "$http_code" == "200" ]] && [[ -s /tmp/gl_check.json ]]; then + releases_json=$(/dev/null) + if ((${#legacy_files[@]} == 1)); then + current="$(<"${legacy_files[0]}")" + echo "${current#v}" >"$current_file" + rm -f "${legacy_files[0]}" + fi + fi + if [[ "$current" =~ ^v[0-9] ]]; then + current="${current:1}" + fi + + # Pinned version handling + if [[ -n "$pinned_version_in" ]]; then + local pin_clean + if [[ "$pinned_version_in" =~ ^v[0-9] ]]; then + pin_clean="${pinned_version_in:1}" + else + pin_clean="$pinned_version_in" + fi + local match_raw="" + for i in "${!clean_tags[@]}"; do + if [[ "${clean_tags[$i]}" == "$pin_clean" ]]; then + match_raw="${raw_tags[$i]}" + break + fi + done + + if [[ -z "$match_raw" ]]; then + msg_error "Pinned version ${pinned_version_in} not found upstream" + return 250 + fi + + if [[ "$current" != "$pin_clean" ]]; then + CHECK_UPDATE_RELEASE="$match_raw" + msg_ok "Update available: ${app} ${current:-not installed} → ${pin_clean}" + return 0 + fi + + if [[ -n "$pin_reason" ]]; then + msg_ok "No update available: ${app} (${current}) - update held back: ${pin_reason}" + else + msg_ok "No update available: ${app} (${current}) - update temporarily held back due to issues with newer releases" + fi + return 1 + fi + + # No pinning → use latest + if [[ -z "$current" || "$current" != "$latest_clean" ]]; then + CHECK_UPDATE_RELEASE="$latest_raw" + msg_ok "Update available: ${app} ${current:-not installed} → ${latest_clean}" + return 0 + fi + + msg_ok "No update available: ${app} (${latest_clean})" + return 1 +} + +# ------------------------------------------------------------------------------ +# Scan older GitLab releases for a matching asset (fallback helper). +# +# Description: +# When the latest release does not contain the expected asset +# (e.g. .deb for the current arch, or a custom pattern), walks back +# through up to 15 recent releases and returns the first release JSON +# that has a matching asset. Used internally by fetch_and_deploy_gl_release. +# +# Usage (internal): +# _gl_scan_older_releases "owner/repo" "owner%2Frepo" "https://gitlab.com" \ +# "binary|prebuild|singlefile" "$asset_pattern" "$skip_tag" +# +# Returns: +# - stdout: JSON of the matching release (single object) on success +# - 0 on success, 22 on API error, 250 if no match found +# ------------------------------------------------------------------------------ +_gl_scan_older_releases() { + local repo="$1" + local repo_encoded="$2" + local base_url="${3:-https://gitlab.com}" + local mode="$4" + local asset_pattern="$5" + local skip_tag="$6" + + local header=() + [[ -n "${GITLAB_TOKEN:-}" ]] && header=(-H "PRIVATE-TOKEN: $GITLAB_TOKEN") + + local releases_list + releases_list=$(curl --connect-timeout 10 --max-time 30 -fsSL \ + "${header[@]}" \ + "${base_url}/api/v4/projects/${repo_encoded}/releases?per_page=15&order_by=released_at&sort=desc" 2>/dev/null) || { + msg_warn "Failed to fetch older releases for ${repo}" + return 22 + } + + local count + count=$(echo "$releases_list" | jq 'length' 2>/dev/null || echo 0) + [[ "$count" -eq 0 ]] && return 250 + + for ((i = 0; i < count; i++)); do + local rel_tag + rel_tag=$(echo "$releases_list" | jq -r ".[$i].tag_name") + + # Skip the tag we already checked + [[ "$rel_tag" == "$skip_tag" ]] && continue + + # Asset URLs for this release (direct_asset_url preferred, fallback to url) + local asset_urls + asset_urls=$(echo "$releases_list" | jq -r ".[$i].assets.links // [] | .[] | .direct_asset_url // .url") + [[ -z "$asset_urls" ]] && continue + + local has_match=false + + if [[ "$mode" == "binary" ]]; then + local arch + arch=$(dpkg --print-architecture 2>/dev/null || uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + + # Check with explicit pattern first, then arch heuristic, then any .deb + if [[ -n "$asset_pattern" ]]; then + while read -r u; do + case "${u##*/}" in $asset_pattern) + has_match=true + break + ;; + esac + done <<<"$asset_urls" + fi + if [[ "$has_match" != "true" ]]; then + echo "$asset_urls" | grep -qE "($arch|amd64|x86_64|aarch64|arm64).*\.deb$" && has_match=true + fi + if [[ "$has_match" != "true" ]]; then + echo "$asset_urls" | grep -qE '\.deb$' && has_match=true + fi + + elif [[ "$mode" == "prebuild" || "$mode" == "singlefile" ]]; then + while read -r u; do + case "${u##*/}" in $asset_pattern) + has_match=true + break + ;; + esac + done <<<"$asset_urls" + fi + + if [[ "$has_match" == "true" ]]; then + local use_fallback="y" + if [[ -t 0 ]]; then + msg_warn "Release ${skip_tag} has no matching asset. Previous release ${rel_tag} has a compatible asset." + read -rp "Use version ${rel_tag} instead? [Y/n] (auto-yes in 60s): " -t 60 use_fallback || use_fallback="y" + use_fallback="${use_fallback:-y}" + fi + + if [[ "${use_fallback,,}" == "y" || "${use_fallback,,}" == "yes" ]]; then + echo "$releases_list" | jq ".[$i]" + return 0 + else + return 250 + fi + fi + done + + return 250 +} + +function fetch_and_deploy_gl_release() { + local app="$1" + local repo="$2" + local mode="${3:-tarball}" + local version="${var_appversion:-${4:-latest}}" + local target="${5:-/opt/$app}" + local asset_pattern="${6:-}" + + if [[ -z "$app" ]]; then + app="${repo##*/}" + if [[ -z "$app" ]]; then + msg_error "fetch_and_deploy_gl_release requires app name or valid repo" + return 1 + fi + fi + + local app_lc=$(echo "${app,,}" | tr -d ' ') + local version_file="$HOME/.${app_lc}" + + local api_timeout="--connect-timeout 10 --max-time 60" + local download_timeout="--connect-timeout 15 --max-time 900" + + local current_version="" + [[ -f "$version_file" ]] && current_version=$(<"$version_file") + + ensure_dependencies jq + + local repo_encoded + repo_encoded=$(printf '%s' "$repo" | sed 's|/|%2F|g') + + local api_base="https://gitlab.com/api/v4/projects/$repo_encoded/releases" + local api_url + if [[ "$version" != "latest" ]]; then + api_url="$api_base/$version" + else + api_url="$api_base?per_page=1&order_by=released_at&sort=desc" + fi + + local header=() + [[ -n "${GITLAB_TOKEN:-}" ]] && header=(-H "PRIVATE-TOKEN: $GITLAB_TOKEN") + + local max_retries=3 retry_delay=2 attempt=1 success=false http_code + + while ((attempt <= max_retries)); do + http_code=$(curl $api_timeout -sSL -w "%{http_code}" -o /tmp/gl_rel.json "${header[@]}" "$api_url" 2>/dev/null) || true + if [[ "$http_code" == "200" ]]; then + success=true + break + elif [[ "$http_code" == "429" ]]; then + if ((attempt < max_retries)); then + msg_warn "GitLab API rate limit hit, retrying in ${retry_delay}s... (attempt $attempt/$max_retries)" + sleep "$retry_delay" + retry_delay=$((retry_delay * 2)) + fi + else + sleep "$retry_delay" + fi + ((attempt++)) + done + + if ! $success; then + if [[ "$http_code" == "401" ]]; then + msg_error "GitLab API authentication failed (HTTP 401)." + if [[ -n "${GITLAB_TOKEN:-}" ]]; then + msg_error "Your GITLAB_TOKEN appears to be invalid or expired." + else + msg_error "The repository may require authentication. Try: export GITLAB_TOKEN=\"glpat-your_token\"" + fi + elif [[ "$http_code" == "404" ]]; then + msg_error "GitLab project or release not found (HTTP 404)." + msg_error "Ensure '$repo' is correct and the project is accessible." + elif [[ "$http_code" == "429" ]]; then + msg_error "GitLab API rate limit exceeded (HTTP 429)." + msg_error "To increase the limit, export a GitLab token before running the script:" + msg_error " export GITLAB_TOKEN=\"glpat-your_token_here\"" + elif [[ "$http_code" == "000" || -z "$http_code" ]]; then + msg_error "GitLab API connection failed (no response)." + msg_error "Check your network/DNS: curl -sSL https://gitlab.com/api/v4/version" + else + msg_error "Failed to fetch release metadata (HTTP $http_code)" + fi + return 1 + fi + + local json tag_name + json=$(/dev/null || uname -m) + [[ "$arch" == "x86_64" ]] && arch="amd64" + [[ "$arch" == "aarch64" ]] && arch="arm64" + + local assets url_match="" + assets=$(_gl_asset_urls "$json") + + if [[ -n "$asset_pattern" ]]; then + for u in $assets; do + case "${u##*/}" in + $asset_pattern) + url_match="$u" + break + ;; + esac + done + fi + + if [[ -z "$url_match" ]]; then + for u in $assets; do + if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then + url_match="$u" + break + fi + done + fi + + if [[ -z "$url_match" ]]; then + for u in $assets; do + [[ "$u" =~ \.deb$ ]] && url_match="$u" && break + done + fi + + if [[ -z "$url_match" ]]; then + local fallback_json + if fallback_json=$(_gl_scan_older_releases "$repo" "$repo_encoded" "https://gitlab.com" "binary" "$asset_pattern" "$tag_name"); then + json="$fallback_json" + tag_name=$(echo "$json" | jq -r '.tag_name // empty') + [[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name" + msg_info "Fetching GitLab release: $app ($version)" + assets=$(_gl_asset_urls "$json") + if [[ -n "$asset_pattern" ]]; then + for u in $assets; do + case "${u##*/}" in $asset_pattern) + url_match="$u" + break + ;; + esac + done + fi + if [[ -z "$url_match" ]]; then + for u in $assets; do + [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]] && url_match="$u" && break + done + fi + if [[ -z "$url_match" ]]; then + for u in $assets; do + [[ "$u" =~ \.deb$ ]] && url_match="$u" && break + done + fi + fi + fi + + if [[ -z "$url_match" ]]; then + msg_error "No suitable .deb asset found for $app" + rm -rf "$tmpdir" + return 1 + fi + + filename="${url_match##*/}" + curl $download_timeout -fsSL "${header[@]}" -o "$tmpdir/$filename" "$url_match" || { + msg_error "Download failed: $url_match" + rm -rf "$tmpdir" + return 1 + } + + chmod 644 "$tmpdir/$filename" + local dpkg_opts="" + [[ "${DPKG_FORCE_CONFOLD:-}" == "1" ]] && dpkg_opts="-o Dpkg::Options::=--force-confold" + [[ "${DPKG_FORCE_CONFNEW:-}" == "1" ]] && dpkg_opts="-o Dpkg::Options::=--force-confnew" + DEBIAN_FRONTEND=noninteractive SYSTEMD_OFFLINE=1 $STD apt install -y $dpkg_opts "$tmpdir/$filename" || { + SYSTEMD_OFFLINE=1 $STD dpkg -i "$tmpdir/$filename" || { + msg_error "Both apt and dpkg installation failed" + rm -rf "$tmpdir" + return 1 + } + } + + ### Prebuild Mode ### + elif [[ "$mode" == "prebuild" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(_gl_asset_urls "$json"); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + if [[ -z "$asset_url" ]]; then + local fallback_json + if fallback_json=$(_gl_scan_older_releases "$repo" "$repo_encoded" "https://gitlab.com" "prebuild" "$pattern" "$tag_name"); then + json="$fallback_json" + tag_name=$(echo "$json" | jq -r '.tag_name // empty') + [[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name" + msg_info "Fetching GitLab release: $app ($version)" + for u in $(_gl_asset_urls "$json"); do + filename_candidate="${u##*/}" + case "$filename_candidate" in $pattern) + asset_url="$u" + break + ;; + esac + done + fi + fi + + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + curl $download_timeout -fsSL "${header[@]}" -o "$tmpdir/$filename" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + local unpack_tmp + unpack_tmp=$(mktemp -d) + mkdir -p "$target" + if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then + rm -rf "${target:?}/"* + fi + + if [[ "$filename" == *.zip ]]; then + ensure_dependencies unzip + unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || { + msg_error "Failed to extract ZIP archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + elif [[ "$filename" == *.tar.* || "$filename" == *.tgz || "$filename" == *.txz ]]; then + tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || { + msg_error "Failed to extract TAR archive" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unsupported archive format: $filename" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + + local top_entries inner_dir + top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1) + if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then + inner_dir="$top_entries" + shopt -s dotglob nullglob + if compgen -G "$inner_dir/*" >/dev/null; then + cp -r "$inner_dir"/* "$target/" || { + msg_error "Failed to copy contents from $inner_dir to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Inner directory is empty: $inner_dir" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + else + shopt -s dotglob nullglob + if compgen -G "$unpack_tmp/*" >/dev/null; then + cp -r "$unpack_tmp"/* "$target/" || { + msg_error "Failed to copy contents to $target" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + } + else + msg_error "Unpacked archive is empty" + rm -rf "$tmpdir" "$unpack_tmp" + return 1 + fi + shopt -u dotglob nullglob + fi + + ### Singlefile Mode ### + elif [[ "$mode" == "singlefile" ]]; then + local pattern="${6%\"}" + pattern="${pattern#\"}" + [[ -z "$pattern" ]] && { + msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)" + rm -rf "$tmpdir" + return 1 + } + + local asset_url="" + for u in $(_gl_asset_urls "$json"); do + filename_candidate="${u##*/}" + case "$filename_candidate" in + $pattern) + asset_url="$u" + break + ;; + esac + done + + if [[ -z "$asset_url" ]]; then + local fallback_json + if fallback_json=$(_gl_scan_older_releases "$repo" "$repo_encoded" "https://gitlab.com" "singlefile" "$pattern" "$tag_name"); then + json="$fallback_json" + tag_name=$(echo "$json" | jq -r '.tag_name // empty') + [[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name" + msg_info "Fetching GitLab release: $app ($version)" + for u in $(_gl_asset_urls "$json"); do + filename_candidate="${u##*/}" + case "$filename_candidate" in $pattern) + asset_url="$u" + break + ;; + esac + done + fi + fi + + [[ -z "$asset_url" ]] && { + msg_error "No asset matching '$pattern' found" + rm -rf "$tmpdir" + return 1 + } + + filename="${asset_url##*/}" + mkdir -p "$target" + + local use_filename="${USE_ORIGINAL_FILENAME:-false}" + local target_file="$app" + [[ "$use_filename" == "true" ]] && target_file="$filename" + + curl $download_timeout -fsSL "${header[@]}" -o "$target/$target_file" "$asset_url" || { + msg_error "Download failed: $asset_url" + rm -rf "$tmpdir" + return 1 + } + + if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then + chmod +x "$target/$target_file" + fi + + else + msg_error "Unknown mode: $mode" + rm -rf "$tmpdir" + return 1 + fi + + echo "$version" >"$version_file" + msg_ok "Deployed: $app ($version)" + rm -rf "$tmpdir" +} + +# ------------------------------------------------------------------------------ +# Download NLTK data packages directly from GitHub, bypassing Python. +# Avoids CPU-instruction failures (SIGILL) on older hardware lacking AVX. +# +# Usage: +# setup_nltk "averaged_perceptron_tagger_eng" "/nltk_data" +# setup_nltk "snowball_data stopwords punkt_tab" "/usr/share/nltk_data" +# +# Parameters: +# $1 - Space-separated list of NLTK package IDs +# $2 - Target directory (default: /usr/share/nltk_data) +# +# Returns: 0 on success, non-zero if any package failed +# ------------------------------------------------------------------------------ +function setup_nltk() { + local packages="${1:?setup_nltk requires at least one package name}" + local target_dir="${2:-/usr/share/nltk_data}" + local NLTK_INDEX_URL="https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml" + local index_xml rc=0 + + ensure_dependencies unzip + + index_xml=$(curl_with_retry "$NLTK_INDEX_URL" "-") || { + msg_error "Failed to fetch NLTK package index" + return 1 + } + + local pkg + for pkg in $packages; do + msg_info "Downloading NLTK: $pkg" + local pkg_line subdir pkg_url do_unzip tmp_zip + + pkg_line=$(echo "$index_xml" | grep "id=\"${pkg}\"" | head -1) + if [[ -z "$pkg_line" ]]; then + msg_error "NLTK package not found in index: $pkg" + rc=1 + continue + fi + + subdir=$(echo "$pkg_line" | grep -oP 'subdir="\K[^"]+') + pkg_url=$(echo "$pkg_line" | grep -oP 'url="\K[^"]+') + do_unzip=$(echo "$pkg_line" | grep -oP 'unzip="\K[^"]+') + + if [[ -z "$subdir" || -z "$pkg_url" ]]; then + msg_error "Could not parse NLTK index entry for: $pkg" + rc=1 + continue + fi + + mkdir -p "${target_dir}/${subdir}" + tmp_zip=$(mktemp --suffix=.zip) + + if CURL_TIMEOUT=120 curl_with_retry "$pkg_url" "$tmp_zip"; then + if [[ "$do_unzip" == "1" ]]; then + $STD unzip -q -o "$tmp_zip" -d "${target_dir}/${subdir}/" + rm -f "$tmp_zip" + else + mv "$tmp_zip" "${target_dir}/${subdir}/${pkg}.zip" + fi + msg_ok "Downloaded NLTK: $pkg" + else + msg_error "Failed to download NLTK package: $pkg" + rm -f "$tmp_zip" + rc=1 + fi + done + + return $rc +} diff --git a/misc/vm-core.func b/misc/vm-core.func index 852945152..2c82c0fb5 100644 --- a/misc/vm-core.func +++ b/misc/vm-core.func @@ -42,7 +42,7 @@ get_header() { if [ ! -s "$local_header_path" ]; then if ! curl -fsSL "$header_url" -o "$local_header_path"; then - return 1 + return 250 fi fi @@ -188,32 +188,18 @@ silent() { trap 'error_handler' ERR if [[ $rc -ne 0 ]]; then - # Source explain_exit_code if needed - if ! declare -f explain_exit_code >/dev/null 2>&1; then - source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVE/raw/branch/main/misc/error_handler.func) 2>/dev/null || true - fi + # Return instead of exit so that callers can use `$STD cmd || true` + # When no || is used, set -e + ERR trap catches it via error_handler() + export _SILENT_FAILED_RC="$rc" + export _SILENT_FAILED_CMD="$cmd" + export _SILENT_FAILED_LINE="$caller_line" + export _SILENT_FAILED_LOG="$logfile" - local explanation="" - if declare -f explain_exit_code >/dev/null 2>&1; then - explanation="$(explain_exit_code "$rc")" - fi - - printf "\e[?25h" - if [[ -n "$explanation" ]]; then - msg_error "in line ${caller_line}: exit code ${rc} (${explanation})" - else - msg_error "in line ${caller_line}: exit code ${rc}" - fi - msg_custom "→" "${YWB}" "${cmd}" - - if [[ -s "$logfile" ]]; then - echo -e "\n${TAB}--- Last 20 lines of log ---" - tail -n 20 "$logfile" - echo -e "${TAB}----------------------------\n" - fi - - exit "$rc" + return "$rc" fi + + # Clear stale flags on success + unset _SILENT_FAILED_RC _SILENT_FAILED_CMD _SILENT_FAILED_LINE _SILENT_FAILED_LOG 2>/dev/null || true } # ------------------------------------------------------------------------------ @@ -591,18 +577,31 @@ check_hostname_conflict() { } set_description() { + local app_name script_slug script_url donate_url + app_name=$(echo "${APP,,}" | tr ' ' '-') + script_slug="${SCRIPT_SLUG:-${app_name}}" + script_slug="$(echo "$script_slug" | tr '[:upper:]' '[:lower:]' | tr ' ' '-')" + script_url="https://community-scripts.org/scripts/${script_slug}" + donate_url="https://community-scripts.org/donate" + DESCRIPTION=$( cat < - + Logo

${NSAPP} VM

- - spend Coffee + + Sponsoring and donations + +

+ +

+ + Open script page

diff --git a/tools/addon/adguardhome-sync.sh b/tools/addon/adguardhome-sync.sh index 4f597b536..406c00d5d 100644 --- a/tools/addon/adguardhome-sync.sh +++ b/tools/addon/adguardhome-sync.sh @@ -18,6 +18,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "adguardhome-sync" "addon" # Enable error handling set -Eeuo pipefail @@ -34,7 +35,6 @@ DEFAULT_PORT=8080 # Initialize all core functions (colors, formatting, icons, STD mode) load_functions -init_tool_telemetry "" "addon" # ============================================================================== # HEADER @@ -55,7 +55,7 @@ EOF # HELPER FUNCTIONS # ============================================================================== get_ip() { - ifconfig | grep -v '127.0.0.1' | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -m1 -Eo '([0-9]*\.){3}[0-9]*' || echo "127.0.0.1" + hostname -I 2>/dev/null | awk '{print $1}' || ip -4 addr show scope global 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1 | head -n1 || echo "127.0.0.1" } # ============================================================================== diff --git a/tools/addon/all-templates.sh b/tools/addon/all-templates.sh index f8d35619d..b23e98e00 100644 --- a/tools/addon/all-templates.sh +++ b/tools/addon/all-templates.sh @@ -116,7 +116,7 @@ fi PCT_OPTIONS=" -features keyctl=1,nesting=1 -hostname $NAME - -tags proxmox-helper-scripts + -tags community-script -onboot 0 -cores 2 -memory 2048 diff --git a/tools/addon/arcane.sh b/tools/addon/arcane.sh index 8faf16106..b63b8306f 100644 --- a/tools/addon/arcane.sh +++ b/tools/addon/arcane.sh @@ -13,6 +13,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "arcane" "addon" # Enable error handling set -Eeuo pipefail diff --git a/tools/addon/coder-code-server.sh b/tools/addon/coder-code-server.sh index ac3a1d056..95039aebe 100644 --- a/tools/addon/coder-code-server.sh +++ b/tools/addon/coder-code-server.sh @@ -89,26 +89,31 @@ VERSION=$(curl -fsSL https://api.github.com/repos/coder/code-server/releases/lat awk '{print substr($2, 3, length($2)-4) }') msg_info "Installing Code-Server v${VERSION}" +config_path="${HOME}/.config/code-server/config.yaml" +preexisting_config=false -if [ -f ~/.config/code-server/config.yaml ]; then - existing_config=true +if [ -f "$config_path" ]; then + preexisting_config=true fi curl -fOL https://github.com/coder/code-server/releases/download/v"$VERSION"/code-server_"${VERSION}"_amd64.deb &>/dev/null dpkg -i code-server_"${VERSION}"_amd64.deb &>/dev/null rm -rf code-server_"${VERSION}"_amd64.deb -mkdir -p ~/.config/code-server/ -systemctl enable -q --now code-server@"$USER" +mkdir -p "${HOME}/.config/code-server/" -if [ $existing_config = false ]; then -cat <~/.config/code-server/config.yaml +if [ "$preexisting_config" = false ]; then +cat <"$config_path" bind-addr: 0.0.0.0:8680 auth: none password: cert: false EOF fi +systemctl enable -q --now code-server@"$USER" systemctl restart code-server@"$USER" +if ! systemctl is-active --quiet code-server@"$USER"; then + error_exit "code-server service failed to start." +fi msg_ok "Installed Code-Server v${VERSION} on $hostname" echo -e "${APP} should be reachable by going to the following URL. diff --git a/tools/addon/coolify.sh b/tools/addon/coolify.sh index 258260ee6..31b4ebe44 100644 --- a/tools/addon/coolify.sh +++ b/tools/addon/coolify.sh @@ -18,6 +18,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "coolify" "addon" # Enable error handling set -Eeuo pipefail diff --git a/tools/addon/copyparty.sh b/tools/addon/copyparty.sh index b0ba19b77..a56e862e0 100644 --- a/tools/addon/copyparty.sh +++ b/tools/addon/copyparty.sh @@ -14,12 +14,12 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "copyparty" "addon" # Enable error handling set -Eeuo pipefail trap 'error_handler' ERR load_functions -init_tool_telemetry "" "addon" # ============================================================================== # CONFIGURATION @@ -165,9 +165,9 @@ function install() { else read -rp "${TAB}Set admin username [admin]: " admin_user admin_user=${admin_user:-admin} - read -rsp "${TAB}Set admin password [helper-scripts.com]: " admin_pass + read -rsp "${TAB}Set admin password [community-scripts.org]: " admin_pass echo "" - admin_pass=${admin_pass:-helper-scripts.com} + admin_pass=${admin_pass:-community-scripts.org} msg_ok "Configured with admin user: ${admin_user}" fi diff --git a/tools/addon/cronmaster.sh b/tools/addon/cronmaster.sh index 8973188d4..450572a68 100644 --- a/tools/addon/cronmaster.sh +++ b/tools/addon/cronmaster.sh @@ -14,12 +14,12 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "cronmaster" "addon" # Enable error handling set -Eeuo pipefail trap 'error_handler' ERR load_functions -init_tool_telemetry "" "addon" # ============================================================================== # CONFIGURATION @@ -147,7 +147,7 @@ EOF # Create update script msg_info "Creating update script" ensure_usr_local_bin_persist - cat </usr/local/bin/update_cronmaster + cat <<'EOF' >/usr/local/bin/update_cronmaster #!/usr/bin/env bash # CronMaster Update Script type=update bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/addon/cronmaster.sh)" diff --git a/tools/addon/daemonsync_2.2.0.0059_amd64.deb b/tools/addon/daemonsync_2.2.0.0059_amd64.deb deleted file mode 100644 index 0444976c7..000000000 Binary files a/tools/addon/daemonsync_2.2.0.0059_amd64.deb and /dev/null differ diff --git a/tools/addon/dockge.sh b/tools/addon/dockge.sh index 0341273c0..ef8e2d27a 100644 --- a/tools/addon/dockge.sh +++ b/tools/addon/dockge.sh @@ -18,6 +18,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "dockge" "addon" # Enable error handling set -Eeuo pipefail diff --git a/tools/addon/dokploy.sh b/tools/addon/dokploy.sh index 6538ad1f7..85dd7d53b 100644 --- a/tools/addon/dokploy.sh +++ b/tools/addon/dokploy.sh @@ -18,6 +18,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "dokploy" "addon" # Enable error handling set -Eeuo pipefail diff --git a/tools/addon/filebrowser-quantum.sh b/tools/addon/filebrowser-quantum.sh index 7d470ecac..66fdb3455 100644 --- a/tools/addon/filebrowser-quantum.sh +++ b/tools/addon/filebrowser-quantum.sh @@ -43,6 +43,21 @@ IP=$(ip -4 addr show "$IFACE" | awk '/inet / {print $2}' | cut -d/ -f1 | head -n [[ -z "$IP" ]] && IP=$(hostname -I | awk '{print $1}') [[ -z "$IP" ]] && IP="127.0.0.1" +# Proxmox Host Warning +if [[ -d "/etc/pve" ]]; then + echo -e "${RD}⚠️ Warning: Running this addon directly on the Proxmox host is not recommended!${CL}" + echo -e "${YW} Only the boot disk will be visible — passthrough drives will not be indexed.${CL}" + echo -e "${YW} This causes incorrect disk usage stats and incomplete file browsing.${CL}" + echo -e "${YW} Run this addon inside an LXC or VM instead and mount your drives there.${CL}" + echo "" + echo -n "Continue anyway on the Proxmox host? (y/N): " + read -r host_confirm + if [[ ! "${host_confirm,,}" =~ ^(y|yes)$ ]]; then + echo -e "${YW}Aborted.${CL}" + exit 0 + fi +fi + # OS Detection if [[ -f "/etc/alpine-release" ]]; then OS="Alpine" @@ -201,9 +216,9 @@ server: - neverWatchPath: "/lost+found" auth: adminUsername: admin - adminPassword: helper-scripts.com + adminPassword: community-scripts.org EOF - msg_ok "Configured with default admin (admin / helper-scripts.com)" + msg_ok "Configured with default admin (admin / community-scripts.org)" fi msg_info "Creating service" diff --git a/tools/addon/filebrowser.sh b/tools/addon/filebrowser.sh index d8f0c1e98..a7db5f122 100644 --- a/tools/addon/filebrowser.sh +++ b/tools/addon/filebrowser.sh @@ -41,6 +41,21 @@ IP=$(ip -4 addr show "$IFACE" | awk '/inet / {print $2}' | cut -d/ -f1 | head -n [[ -z "$IP" ]] && IP=$(hostname -I | awk '{print $1}') [[ -z "$IP" ]] && IP="127.0.0.1" +# Proxmox Host Warning +if [[ -d "/etc/pve" ]]; then + echo -e "${RD}⚠️ Warning: Running this addon directly on the Proxmox host is not recommended!${CL}" + echo -e "${YW} Only the boot disk will be visible — passthrough drives will not be indexed.${CL}" + echo -e "${YW} This causes incorrect disk usage stats and incomplete file browsing.${CL}" + echo -e "${YW} Run this addon inside an LXC or VM instead and mount your drives there.${CL}" + echo "" + echo -n "Continue anyway on the Proxmox host? (y/N): " + read -r host_confirm + if [[ ! "${host_confirm,,}" =~ ^(y|yes)$ ]]; then + echo -e "${YW}Aborted.${CL}" + exit 0 + fi +fi + # Detect OS if [[ -f "/etc/alpine-release" ]]; then OS="Alpine" @@ -131,17 +146,18 @@ if [[ "${install_prompt,,}" =~ ^(y|yes)$ ]]; then cd /usr/local/community-scripts filebrowser config init -a '0.0.0.0' -p "$PORT" -d "$DB_PATH" &>/dev/null filebrowser config set -a '0.0.0.0' -p "$PORT" -d "$DB_PATH" &>/dev/null - filebrowser config init --auth.method=noauth &>/dev/null - filebrowser config set --auth.method=noauth &>/dev/null - filebrowser users add ID 1 --perm.admin &>/dev/null + filebrowser config set --auth.method=noauth --database "$DB_PATH" &>/dev/null + if ! filebrowser users update 1 --perm.admin --database "$DB_PATH" &>/dev/null; then + filebrowser users add admin community-scripts.org --perm.admin --database "$DB_PATH" &>/dev/null + fi msg_ok "No Authentication configured" else msg_info "Setting up default authentication" cd /usr/local/community-scripts filebrowser config init -a '0.0.0.0' -p "$PORT" -d "$DB_PATH" &>/dev/null filebrowser config set -a '0.0.0.0' -p "$PORT" -d "$DB_PATH" &>/dev/null - filebrowser users add admin helper-scripts.com --perm.admin --database "$DB_PATH" &>/dev/null - msg_ok "Default authentication configured (admin:helper-scripts.com)" + filebrowser users add admin community-scripts.org --perm.admin --database "$DB_PATH" &>/dev/null + msg_ok "Default authentication configured (admin:community-scripts.org)" fi msg_info "Creating service" diff --git a/tools/addon/homebrew.sh b/tools/addon/homebrew.sh new file mode 100644 index 000000000..16b114b7a --- /dev/null +++ b/tools/addon/homebrew.sh @@ -0,0 +1,173 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MorganCSIT | MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://brew.sh | Github: https://github.com/Homebrew/brew + +if ! command -v curl &>/dev/null; then + printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2 + apt-get update >/dev/null 2>&1 + apt-get install -y curl >/dev/null 2>&1 +fi +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true + +# Enable error handling +set -Eeuo pipefail +trap 'error_handler' ERR +load_functions +init_tool_telemetry "" "addon" + +# ============================================================================== +# CONFIGURATION +# ============================================================================== +VERBOSE=${var_verbose:-no} +APP="homebrew" +APP_TYPE="tools" +INSTALL_PATH="/home/linuxbrew/.linuxbrew" + +# ============================================================================== +# OS DETECTION +# ============================================================================== +if [[ -f "/etc/alpine-release" ]]; then + echo -e "${CROSS} Alpine is not supported by Homebrew. Exiting." + exit 1 +elif grep -qE 'ID=debian|ID=ubuntu' /etc/os-release; then + OS="Debian" +else + echo -e "${CROSS} Unsupported OS detected. Exiting." + exit 1 +fi + +# ============================================================================== +# UNINSTALL +# ============================================================================== +function uninstall() { + msg_info "Uninstalling Homebrew" + + BREW_USER=$(awk -F: '$3 >= 1000 && $3 < 65534 { print $1; exit }' /etc/passwd) + if [[ -n "$BREW_USER" ]]; then + BREW_USER_HOME=$(getent passwd "$BREW_USER" | cut -d: -f6) + for rc_file in "$BREW_USER_HOME/.bashrc" "$BREW_USER_HOME/.profile"; do + if [[ -f "$rc_file" ]]; then + sed -i '/# Homebrew (Linuxbrew)/,/^fi$/d' "$rc_file" + fi + done + fi + + rm -rf /home/linuxbrew + rm -f /etc/profile.d/homebrew.sh + groupdel linuxbrew &>/dev/null || true + + msg_ok "Homebrew has been uninstalled" +} + +# ============================================================================== +# INSTALL +# ============================================================================== +function install() { + msg_info "Detecting Non-Root User" + BREW_USER=$(awk -F: '$3 >= 1000 && $3 < 65534 { print $1; exit }' /etc/passwd) + if [[ -z "$BREW_USER" ]]; then + msg_warn "No non-root user found (uid >= 1000). Homebrew cannot run as root." + read -r -p "${TAB}Create a 'brew' user automatically? (y/N): " create_user_prompt + if [[ "${create_user_prompt,,}" =~ ^(y|yes)$ ]]; then + msg_info "Creating user 'brew'" + useradd -m -s /bin/bash brew + BREW_USER="brew" + msg_ok "Created user 'brew'" + else + msg_error "Cannot install Homebrew without a non-root user. Exiting." + exit 1 + fi + fi + msg_ok "Detected User: $BREW_USER" + + msg_info "Installing Dependencies" + $STD apt update + $STD apt install -y build-essential git file procps + msg_ok "Installed Dependencies" + + msg_info "Setting Up Homebrew Prefix" + export PATH="/usr/sbin:$PATH" + groupadd -f linuxbrew + mkdir -p /home/linuxbrew/.linuxbrew + chown -R "$BREW_USER":linuxbrew /home/linuxbrew + chmod 2775 /home/linuxbrew + chmod 2775 /home/linuxbrew/.linuxbrew + usermod -aG linuxbrew "$BREW_USER" + msg_ok "Set Up Homebrew Prefix" + + msg_info "Installing Homebrew" + $STD su - "$BREW_USER" -c 'NONINTERACTIVE=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"' + msg_ok "Installed Homebrew" + + msg_info "Configuring Shell Integration" + cat <<'EOF' >/etc/profile.d/homebrew.sh +#!/bin/bash +if [ -d "/home/linuxbrew/.linuxbrew" ]; then + eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)" +fi +EOF + chmod +x /etc/profile.d/homebrew.sh + + BREW_USER_HOME=$(getent passwd "$BREW_USER" | cut -d: -f6) + BREW_SHELL_BLOCK='\n# Homebrew (Linuxbrew)\nif [ -d "/home/linuxbrew/.linuxbrew" ]; then\n eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"\nfi' + for rc_file in "$BREW_USER_HOME/.bashrc" "$BREW_USER_HOME/.profile"; do + if ! grep -q 'linuxbrew' "$rc_file" 2>/dev/null; then + echo -e "$BREW_SHELL_BLOCK" >>"$rc_file" + fi + done + msg_ok "Configured Shell Integration" + + msg_info "Verifying Installation" + $STD su - "$BREW_USER" -c 'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)" && brew --version' + msg_ok "Homebrew Verified" + + echo "" + msg_ok "Homebrew installed successfully" + msg_ok "Ready for user: ${BL}${BREW_USER}${CL}" + echo "" + echo -e "${TAB}${INFO} Usage: Switch to the brew user with a login shell:" + echo -e "${TAB} ${BL}su - ${BREW_USER}${CL}" + echo -e "${TAB} Then run: ${BL}brew install ${CL}" + echo -e "${TAB} Update with: ${BL}brew update${CL}" +} + +# ============================================================================== +# MAIN +# ============================================================================== +header_info + +if [[ -d "$INSTALL_PATH" ]]; then + msg_warn "Homebrew is already installed." + echo "" + + read -r -p "${TAB}Uninstall Homebrew? (y/N): " uninstall_prompt + if [[ "${uninstall_prompt,,}" =~ ^(y|yes)$ ]]; then + uninstall + exit 0 + fi + + msg_warn "No action selected. Exiting." + exit 0 +fi + +# Fresh installation +msg_warn "Homebrew is not installed." +echo "" +echo -e "${TAB}${INFO} This will install:" +echo -e "${TAB} - Homebrew (Linuxbrew) package manager" +echo -e "${TAB} - Shell integration for the detected non-root user" +echo "" + +read -r -p "${TAB}Install Homebrew? (y/N): " install_prompt +if [[ "${install_prompt,,}" =~ ^(y|yes)$ ]]; then + install +else + msg_warn "Installation cancelled. Exiting." + exit 0 +fi diff --git a/tools/addon/immich-public-proxy.sh b/tools/addon/immich-public-proxy.sh index 002225b1a..e464382d5 100644 --- a/tools/addon/immich-public-proxy.sh +++ b/tools/addon/immich-public-proxy.sh @@ -14,6 +14,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "immich-public-proxy" "addon" # Enable error handling set -Eeuo pipefail @@ -30,7 +31,6 @@ DEFAULT_PORT=3000 # Initialize all core functions (colors, formatting, icons, $STD mode) load_functions -init_tool_telemetry "" "addon" # ============================================================================== # HEADER diff --git a/tools/addon/jellystat.sh b/tools/addon/jellystat.sh index bd53e5189..e9950b156 100644 --- a/tools/addon/jellystat.sh +++ b/tools/addon/jellystat.sh @@ -14,6 +14,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "jellystat" "addon" # Enable error handling set -Eeuo pipefail @@ -30,7 +31,6 @@ DEFAULT_PORT=3000 # Initialize all core functions (colors, formatting, icons, STD mode) load_functions -init_tool_telemetry "" "addon" # ============================================================================== # HEADER diff --git a/tools/addon/komodo.sh b/tools/addon/komodo.sh index 71921aa47..9815cac52 100644 --- a/tools/addon/komodo.sh +++ b/tools/addon/komodo.sh @@ -3,7 +3,7 @@ # Copyright (c) 2021-2026 community-scripts ORG # Author: MickLesk (CanbiZ) # License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -# Source: https://komo.do/ | Github: https://github.com/mbecker20/komodo +# Source: https://komo.do/ | Github: https://github.com/moghtech/komodo if ! command -v curl &>/dev/null; then printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2 apt-get update >/dev/null 2>&1 || apk update >/dev/null 2>&1 @@ -13,6 +13,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "komodo" "addon" # Enable error handling set -Eeuo pipefail @@ -81,6 +82,7 @@ function update() { msg_error "Failed to create backup of ${COMPOSE_BASENAME}!" exit 235 } + cp "$COMPOSE_ENV" "${COMPOSE_ENV}.bak_$(date +%Y%m%d_%H%M%S)" 2>/dev/null || true GITHUB_URL="https://raw.githubusercontent.com/moghtech/komodo/main/compose/${COMPOSE_BASENAME}" if ! curl -fsSL "$GITHUB_URL" -o "$COMPOSE_FILE"; then @@ -89,8 +91,35 @@ function update() { exit 115 fi - if ! grep -qxF 'COMPOSE_KOMODO_BACKUPS_PATH=/etc/komodo/backups' "$COMPOSE_ENV"; then - sed -i '/^COMPOSE_KOMODO_IMAGE_TAG=latest$/a COMPOSE_KOMODO_BACKUPS_PATH=/etc/komodo/backups' "$COMPOSE_ENV" + # === v2 migration: image tag (latest is deprecated) === + if grep -q '^COMPOSE_KOMODO_IMAGE_TAG=latest' "$COMPOSE_ENV"; then + msg_info "Migrating to Komodo v2 image tag" + sed -i 's/^COMPOSE_KOMODO_IMAGE_TAG=latest/COMPOSE_KOMODO_IMAGE_TAG=2/' "$COMPOSE_ENV" + msg_ok "Migrated image tag to :2" + fi + + # === v2 migration: DB credential variable names === + if grep -q '^KOMODO_DB_USERNAME=' "$COMPOSE_ENV"; then + msg_info "Migrating database credential variables" + sed -i 's/^KOMODO_DB_USERNAME=/KOMODO_DATABASE_USERNAME=/' "$COMPOSE_ENV" + sed -i 's/^KOMODO_DB_PASSWORD=/KOMODO_DATABASE_PASSWORD=/' "$COMPOSE_ENV" + msg_ok "Migrated DB credential variables" + fi + + # === v2 migration: remove deprecated passkey (replaced by PKI) === + if grep -q '^KOMODO_PASSKEY=' "$COMPOSE_ENV"; then + sed -i '/^KOMODO_PASSKEY=/d' "$COMPOSE_ENV" + fi + + # === v2 migration: ensure PERIPHERY_CORE_PUBLIC_KEYS is set === + if ! grep -q 'PERIPHERY_CORE_PUBLIC_KEYS' "$COMPOSE_ENV"; then + echo '## Use the public key generated by Core.' >> "$COMPOSE_ENV" + echo 'PERIPHERY_CORE_PUBLIC_KEYS=file:/config/keys/core.pub' >> "$COMPOSE_ENV" + fi + + # === ensure backups path is set === + if ! grep -q 'COMPOSE_KOMODO_BACKUPS_PATH=' "$COMPOSE_ENV"; then + echo 'COMPOSE_KOMODO_BACKUPS_PATH=/etc/komodo/backups' >>"$COMPOSE_ENV" fi $STD docker compose -p komodo -f "$COMPOSE_FILE" --env-file "$COMPOSE_ENV" pull @@ -122,6 +151,23 @@ function check_proxmox_host() { # ============================================================================== # CHECK / INSTALL DOCKER # ============================================================================== +function ensure_openssl() { + if command -v openssl &>/dev/null; then + return + fi + msg_info "Installing openssl" + if [[ -f /etc/alpine-release ]]; then + $STD apk add openssl + elif command -v apt-get &>/dev/null; then + $STD apt-get update + $STD apt-get install -y openssl + else + msg_error "openssl is required but could not be installed automatically." + exit 10 + fi + msg_ok "Installed openssl" +} + function check_or_install_docker() { if command -v docker &>/dev/null; then msg_ok "Docker $(docker --version | cut -d' ' -f3 | tr -d ',') is available" @@ -131,6 +177,7 @@ function check_or_install_docker() { msg_error "Docker Compose plugin is not available. Please install it." exit 10 fi + ensure_openssl return fi @@ -154,6 +201,8 @@ function check_or_install_docker() { $STD sh <(curl -fsSL https://get.docker.com) fi msg_ok "Installed Docker" + + ensure_openssl } # ============================================================================== @@ -191,14 +240,12 @@ function install() { DB_PASSWORD=$(openssl rand -base64 16 | tr -d '/+=') ADMIN_PASSWORD=$(openssl rand -base64 8 | tr -d '/+=') - PASSKEY=$(openssl rand -base64 24 | tr -d '/+=') WEBHOOK_SECRET=$(openssl rand -base64 24 | tr -d '/+=') JWT_SECRET=$(openssl rand -base64 24 | tr -d '/+=') - sed -i "s/^KOMODO_DB_USERNAME=.*/KOMODO_DB_USERNAME=komodo_admin/" "$COMPOSE_ENV" - sed -i "s/^KOMODO_DB_PASSWORD=.*/KOMODO_DB_PASSWORD=${DB_PASSWORD}/" "$COMPOSE_ENV" + sed -i "s/^KOMODO_DATABASE_USERNAME=.*/KOMODO_DATABASE_USERNAME=komodo_admin/" "$COMPOSE_ENV" + sed -i "s/^KOMODO_DATABASE_PASSWORD=.*/KOMODO_DATABASE_PASSWORD=${DB_PASSWORD}/" "$COMPOSE_ENV" sed -i "s/^KOMODO_INIT_ADMIN_PASSWORD=changeme/KOMODO_INIT_ADMIN_PASSWORD=${ADMIN_PASSWORD}/" "$COMPOSE_ENV" - sed -i "s/^KOMODO_PASSKEY=.*/KOMODO_PASSKEY=${PASSKEY}/" "$COMPOSE_ENV" sed -i "s/^KOMODO_WEBHOOK_SECRET=.*/KOMODO_WEBHOOK_SECRET=${WEBHOOK_SECRET}/" "$COMPOSE_ENV" sed -i "s/^KOMODO_JWT_SECRET=.*/KOMODO_JWT_SECRET=${JWT_SECRET}/" "$COMPOSE_ENV" msg_ok "Configured environment" diff --git a/tools/addon/nextcloud-exporter.sh b/tools/addon/nextcloud-exporter.sh index 130ba979f..d76dc0099 100644 --- a/tools/addon/nextcloud-exporter.sh +++ b/tools/addon/nextcloud-exporter.sh @@ -14,12 +14,12 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "nextcloud-exporter" "addon" # Enable error handling set -Eeuo pipefail trap 'error_handler' ERR load_functions -init_tool_telemetry "" "addon" # ============================================================================== # CONFIGURATION diff --git a/tools/addon/pihole-exporter.sh b/tools/addon/pihole-exporter.sh index 7e9fc0fca..0cba93a30 100644 --- a/tools/addon/pihole-exporter.sh +++ b/tools/addon/pihole-exporter.sh @@ -14,12 +14,12 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "pihole-exporter" "addon" # Enable error handling set -Eeuo pipefail trap 'error_handler' ERR load_functions -init_tool_telemetry "" "addon" # ============================================================================== # CONFIGURATION diff --git a/tools/addon/prometheus-paperless-ngx-exporter.sh b/tools/addon/prometheus-paperless-ngx-exporter.sh index 08796fc59..7b6e44735 100644 --- a/tools/addon/prometheus-paperless-ngx-exporter.sh +++ b/tools/addon/prometheus-paperless-ngx-exporter.sh @@ -9,12 +9,12 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "prometheus-paperless-ngx-exporter" "addon" # Enable error handling set -Eeuo pipefail trap 'error_handler' ERR load_functions -init_tool_telemetry "" "addon" # ============================================================================== # CONFIGURATION diff --git a/tools/addon/qbittorrent-exporter.sh b/tools/addon/qbittorrent-exporter.sh index 7f1b4f322..e7a7b1f80 100644 --- a/tools/addon/qbittorrent-exporter.sh +++ b/tools/addon/qbittorrent-exporter.sh @@ -14,12 +14,12 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "qbittorrent-exporter" "addon" # Enable error handling set -Eeuo pipefail trap 'error_handler' ERR load_functions -init_tool_telemetry "" "addon" # ============================================================================== # CONFIGURATION @@ -68,6 +68,24 @@ function uninstall() { # ============================================================================== function update() { if check_for_gh_release "qbittorrent-exporter" "martabal/qbittorrent-exporter"; then + if [[ "$(printf '%s\n' "2.0.0" "$CHECK_UPDATE_RELEASE" | sort -V | tail -n1)" == "$CHECK_UPDATE_RELEASE" ]] && \ + ! grep -q "QBITTORRENT_API_KEY" "$CONFIG_PATH" 2>/dev/null; then + echo "" + msg_warn "Version 2.0.0 introduces a breaking change: username/password login has been replaced by an API key." + echo -e "${TAB3}${INFO} You must create an API key in qBittorrent under Tools > Options > Web UI > API key" + echo "" + echo -n "${TAB3}Enter your qBittorrent API key (or press Enter to abort): " + read -r QBITTORRENT_API_KEY + if [[ -z "$QBITTORRENT_API_KEY" ]]; then + msg_warn "No API key provided. Update aborted." + exit 0 + fi + sed -i '/^QBITTORRENT_USERNAME=/d' "$CONFIG_PATH" + sed -i '/^QBITTORRENT_PASSWORD=/d' "$CONFIG_PATH" + echo "QBITTORRENT_API_KEY=\"${QBITTORRENT_API_KEY}\"" >>"$CONFIG_PATH" + msg_ok "API key saved to configuration" + fi + msg_info "Stopping service" if [[ "$OS" == "Alpine" ]]; then rc-service qbittorrent-exporter stop &>/dev/null @@ -100,10 +118,9 @@ function update() { # INSTALL # ============================================================================== function install() { - read -erp "Enter URL of qBittorrent, example: (http://127.0.0.1:8080): " QBITTORRENT_BASE_URL - read -erp "Enter qBittorrent username: " QBITTORRENT_USERNAME - read -rsp "Enter qBittorrent password: " QBITTORRENT_PASSWORD - printf "\n" + read -erp "${TAB3}Enter URL of qBittorrent, example: (http://127.0.0.1:8080): " QBITTORRENT_BASE_URL + echo -e "${TAB3}${INFO} Create an API key in qBittorrent under Tools > Options > Web UI > API key" + read -erp "${TAB3}Enter qBittorrent API key: " QBITTORRENT_API_KEY fetch_and_deploy_gh_release "qbittorrent-exporter" "martabal/qbittorrent-exporter" "tarball" "latest" setup_go @@ -116,8 +133,7 @@ function install() { cat <"$CONFIG_PATH" # https://github.com/martabal/qbittorrent-exporter?tab=readme-ov-file#parameters QBITTORRENT_BASE_URL="${QBITTORRENT_BASE_URL}" -QBITTORRENT_USERNAME="${QBITTORRENT_USERNAME}" -QBITTORRENT_PASSWORD="${QBITTORRENT_PASSWORD}" +QBITTORRENT_API_KEY="${QBITTORRENT_API_KEY}" EOF msg_ok "Created configuration" diff --git a/tools/addon/runtipi.sh b/tools/addon/runtipi.sh index 1b231e028..d4ba9bddb 100644 --- a/tools/addon/runtipi.sh +++ b/tools/addon/runtipi.sh @@ -18,6 +18,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "runtipi" "addon" # Enable error handling set -Eeuo pipefail @@ -149,7 +150,7 @@ function install() { curl -fsSL "https://raw.githubusercontent.com/runtipi/runtipi/master/scripts/install.sh" -o "install.sh" chmod +x install.sh $STD ./install.sh - chmod 666 /opt/runtipi/state/settings.json 2>/dev/null || true + chmod 660 /opt/runtipi/state/settings.json 2>/dev/null || true rm -f /opt/install.sh msg_ok "Installed ${APP}" diff --git a/tools/addon/sparkyfitness-garmin.sh b/tools/addon/sparkyfitness-garmin.sh new file mode 100644 index 000000000..d62f4b4a4 --- /dev/null +++ b/tools/addon/sparkyfitness-garmin.sh @@ -0,0 +1,176 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2026 community-scripts ORG +# Author: Tom Frenzel (tomfrenzel) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://github.com/CodeWithCJ/SparkyFitness + +if ! command -v curl &>/dev/null; then + printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2 + apt-get update >/dev/null 2>&1 + apt-get install -y curl >/dev/null 2>&1 +fi +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) 2>/dev/null || true +declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "sparkyfitness-garmin" "addon" + +# Enable error handling +set -Eeuo pipefail +trap 'error_handler' ERR +load_functions + +# ============================================================================== +# CONFIGURATION +# ============================================================================== +APP="SparkyFitness-Garmin" +APP_TYPE="addon" +INSTALL_PATH="/opt/sparkyfitness-garmin" +CONFIG_PATH="/etc/sparkyfitness-garmin/.env" +SERVICE_PATH="/etc/systemd/system/sparkyfitness-garmin.service" +DEFAULT_PORT=8000 + +# ============================================================================== +# OS DETECTION +# ============================================================================== +if ! grep -qE 'ID=debian|ID=ubuntu' /etc/os-release 2>/dev/null; then + echo -e "${CROSS} Unsupported OS detected. This script only supports Debian and Ubuntu." + exit 238 +fi + +# ============================================================================== +# SparkyFitness LXC DETECTION +# ============================================================================== +if [[ ! -d /opt/sparkyfitness ]]; then + echo -e "${CROSS} No SparkyFitness installation detected. This addon must be installed within a container that already has SparkyFitness installed." + exit 238 +fi + +# ============================================================================== +# UNINSTALL +# ============================================================================== +function uninstall() { + msg_info "Uninstalling ${APP}" + systemctl disable --now sparkyfitness-garmin.service &>/dev/null || true + rm -rf "$SERVICE_PATH" "$CONFIG_PATH" "$INSTALL_PATH" ~/.sparkyfitness-garmin + msg_ok "${APP} has been uninstalled" +} + +# ============================================================================== +# UPDATE +# ============================================================================== +function update() { + if check_for_gh_release "sparkyfitness-garmin" "CodeWithCJ/SparkyFitness"; then + PYTHON_VERSION="3.13" setup_uv + + msg_info "Stopping service" + systemctl stop sparkyfitness-garmin.service &>/dev/null || true + msg_ok "Stopped service" + + CLEAN_INSTALL=1 fetch_and_deploy_gh_release "sparkyfitness-garmin" "CodeWithCJ/SparkyFitness" "tarball" "latest" $INSTALL_PATH + cd $INSTALL_PATH/SparkyFitnessGarmin + $STD uv venv --clear .venv + $STD uv pip install -r requirements.txt + + msg_info "Starting service" + systemctl start sparkyfitness-garmin + msg_ok "Started service" + msg_ok "Updated successfully" + exit + fi +} + +# ============================================================================== +# INSTALL +# ============================================================================== +function install() { + PYTHON_VERSION="3.13" setup_uv + fetch_and_deploy_gh_release "sparkyfitness-garmin" "CodeWithCJ/SparkyFitness" "tarball" "latest" $INSTALL_PATH + + msg_info "Setting up ${APP}" + mkdir -p "/etc/sparkyfitness-garmin" + cp "/opt/sparkyfitness-garmin/docker/.env.example" $CONFIG_PATH + cd $INSTALL_PATH/SparkyFitnessGarmin + $STD uv venv --clear .venv + $STD uv pip install -r requirements.txt + sed -i -e "s|^#\?GARMIN_MICROSERVICE_URL=.*|GARMIN_MICROSERVICE_URL=http://${LOCAL_IP}:${DEFAULT_PORT}|" $CONFIG_PATH + cat </etc/systemd/system/sparkyfitness-garmin.service +[Unit] +Description=${APP} +After=network.target sparkyfitness-server.service +Requires=sparkyfitness-server.service + +[Service] +Type=simple +WorkingDirectory=$INSTALL_PATH/SparkyFitnessGarmin +EnvironmentFile=$CONFIG_PATH +ExecStart=$INSTALL_PATH/SparkyFitnessGarmin/.venv/bin/python3 -m uvicorn main:app --host 0.0.0.0 --port ${DEFAULT_PORT} +Restart=always +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF + systemctl enable -q --now sparkyfitness-garmin + msg_ok "Set up ${APP} - reachable at http://${LOCAL_IP}:${DEFAULT_PORT}" + msg_ok "You might need to update the GARMIN_MICROSERVICE_URL in your SparkyFitness .env file to http://${LOCAL_IP}:${DEFAULT_PORT}" +} + +# ============================================================================== +# MAIN +# ============================================================================== +header_info +ensure_usr_local_bin_persist +get_lxc_ip + +# Handle type=update (called from update script) +if [[ "${type:-}" == "update" ]]; then + if [[ -d "$INSTALL_PATH" ]]; then + update + else + msg_error "${APP} is not installed. Nothing to update." + exit 233 + fi + exit 0 +fi + +# Check if already installed +if [[ -d "$INSTALL_PATH" && -n "$(ls -A "$INSTALL_PATH" 2>/dev/null)" ]]; then + msg_warn "${APP} is already installed." + echo "" + + echo -n "${TAB}Uninstall ${APP}? (y/N): " + read -r uninstall_prompt + if [[ "${uninstall_prompt,,}" =~ ^(y|yes)$ ]]; then + uninstall + exit 0 + fi + + echo -n "${TAB}Update ${APP}? (y/N): " + read -r update_prompt + if [[ "${update_prompt,,}" =~ ^(y|yes)$ ]]; then + update + exit 0 + fi + + msg_warn "No action selected. Exiting." + exit 0 +fi + +# Fresh installation +msg_warn "${APP} is not installed." +echo "" +echo -e "${TAB}${INFO} This will install:" +echo -e "${TAB} - UV (Python Version Manager)" +echo -e "${TAB} - SparkyFitness Garmin Microservice" +echo "" + +echo -n "${TAB}Install ${APP}? (y/N): " +read -r install_prompt +if [[ "${install_prompt,,}" =~ ^(y|yes)$ ]]; then + install +else + msg_warn "Installation cancelled. Exiting." + exit 0 +fi diff --git a/tools/headers/homebrew b/tools/headers/homebrew new file mode 100644 index 000000000..529ede91c --- /dev/null +++ b/tools/headers/homebrew @@ -0,0 +1,6 @@ + __ __ + / /_ ____ ____ ___ ___ / /_ ________ _ __ + / __ \/ __ \/ __ `__ \/ _ \/ __ \/ ___/ _ \ | /| / / + / / / / /_/ / / / / / / __/ /_/ / / / __/ |/ |/ / +/_/ /_/\____/_/ /_/ /_/\___/_.___/_/ \___/|__/|__/ + diff --git a/tools/headers/sparkyfitness-garmin b/tools/headers/sparkyfitness-garmin new file mode 100644 index 000000000..3fb871853 --- /dev/null +++ b/tools/headers/sparkyfitness-garmin @@ -0,0 +1,6 @@ + _____ __ _______ __ ______ _ + / ___/____ ____ ______/ /____ __/ ____(_) /_____ ___ __________ / ____/___ __________ ___ (_)___ + \__ \/ __ \/ __ `/ ___/ //_/ / / / /_ / / __/ __ \/ _ \/ ___/ ___/_____/ / __/ __ `/ ___/ __ `__ \/ / __ \ + ___/ / /_/ / /_/ / / / ,< / /_/ / __/ / / /_/ / / / __(__ |__ )_____/ /_/ / /_/ / / / / / / / / / / / / +/____/ .___/\__,_/_/ /_/|_|\__, /_/ /_/\__/_/ /_/\___/____/____/ \____/\__,_/_/ /_/ /_/ /_/_/_/ /_/ + /_/ /____/ diff --git a/tools/pve/clean-orphaned-lvm.sh b/tools/pve/clean-orphaned-lvm.sh index 319497cd7..8d1df7a12 100644 --- a/tools/pve/clean-orphaned-lvm.sh +++ b/tools/pve/clean-orphaned-lvm.sh @@ -37,8 +37,9 @@ function find_orphaned_lvm { fi container_id=$(echo "$lv" | grep -oE "[0-9]+" | head -1) - # Check if the ID exists as a VM or LXC container - if [ -f "/etc/pve/lxc/${container_id}.conf" ] || [ -f "/etc/pve/qemu-server/${container_id}.conf" ]; then + # Check if the ID exists as a VM or LXC container on any cluster node + if compgen -G "/etc/pve/nodes/*/lxc/${container_id}.conf" >/dev/null 2>&1 || + compgen -G "/etc/pve/nodes/*/qemu-server/${container_id}.conf" >/dev/null 2>&1; then continue fi diff --git a/tools/pve/cron-update-lxcs.sh b/tools/pve/cron-update-lxcs.sh index b2a1b5852..c97975c44 100644 --- a/tools/pve/cron-update-lxcs.sh +++ b/tools/pve/cron-update-lxcs.sh @@ -1,11 +1,24 @@ #!/usr/bin/env bash -# Copyright (c) 2021-2026 tteck -# Author: tteck (tteckster) -# License: MIT -# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# +# This script manages a local cron job for automatic LXC container OS updates. +# The update script is downloaded once, displayed for review, and installed +# locally. Cron runs the local copy — no remote code execution at runtime. +# # bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/pve/cron-update-lxcs.sh)" +set -euo pipefail + +REPO_URL="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main" +SCRIPT_URL="${REPO_URL}/tools/pve/update-lxcs-cron.sh" +LOCAL_SCRIPT="/usr/local/bin/update-lxcs.sh" +CONF_FILE="/etc/update-lxcs.conf" +LOG_FILE="/var/log/update-lxcs-cron.log" +CRON_ENTRY="0 0 * * 0 ${LOCAL_SCRIPT} >>${LOG_FILE} 2>&1" + clear cat <<"EOF" ______ __ __ __ __ __ _ ________ @@ -16,41 +29,322 @@ cat <<"EOF" /_/ EOF -add() { +info() { echo -e "\n \e[36m[Info]\e[0m $1"; } +ok() { echo -e " \e[32m[OK]\e[0m $1"; } +err() { echo -e " \e[31m[Error]\e[0m $1" >&2; } + +confirm() { + local prompt="${1:-Proceed?}" while true; do - read -p "This script will add a crontab schedule that updates all LXCs every Sunday at midnight. Proceed(y/n)?" yn + read -rp " ${prompt} (y/n): " yn case $yn in - [Yy]*) break ;; - [Nn]*) exit ;; - *) echo "Please answer yes or no." ;; + [Yy]*) return 0 ;; + [Nn]*) return 1 ;; + *) echo " Please answer yes or no." ;; esac done - sh -c '(crontab -l -u root 2>/dev/null; echo "0 0 * * 0 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin /bin/bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/pve/update-lxcs-cron.sh)\" >>/var/log/update-lxcs-cron.log 2>/dev/null") | crontab -u root -' - clear - echo -e "\n To view Cron Update LXCs logs: cat /var/log/update-lxcs-cron.log" +} + +download_script() { + local tmp + tmp=$(mktemp) + if ! curl -fsSL -o "$tmp" "$SCRIPT_URL"; then + err "Failed to download script from:\n ${SCRIPT_URL}" + rm -f "$tmp" + return 1 + fi + echo "$tmp" +} + +review_script() { + local file="$1" + local hash + hash=$(sha256sum "$file" | awk '{print $1}') + echo "" + echo -e " \e[1;33m─── Script Content ───────────────────────────────────────────\e[0m" + cat "$file" + echo -e " \e[1;33m──────────────────────────────────────────────────────────────\e[0m" + echo -e " \e[36mSHA256:\e[0m ${hash}" + echo -e " \e[36mSource:\e[0m ${SCRIPT_URL}" + echo "" +} + +remove_legacy_cron() { + if crontab -l -u root 2>/dev/null | grep -q "update-lxcs-cron.sh"; then + (crontab -l -u root 2>/dev/null | grep -v "update-lxcs-cron.sh") | crontab -u root - + ok "Removed legacy curl-based cron entry" + fi +} + +add() { + info "Downloading update script..." + local tmp + tmp=$(download_script) || exit 1 + + local hash + hash=$(sha256sum "$tmp" | awk '{print $1}') + echo "" + echo -e " \e[1;33m─── Installation Summary ─────────────────────────────────────\e[0m" + echo -e " \e[36mSource:\e[0m ${SCRIPT_URL}" + echo -e " \e[36mSHA256:\e[0m ${hash}" + echo -e " \e[36mInstall to:\e[0m ${LOCAL_SCRIPT}" + echo -e " \e[36mConfig:\e[0m ${CONF_FILE}" + echo -e " \e[36mLog file:\e[0m ${LOG_FILE}" + echo -e " \e[36mCron schedule:\e[0m Every Sunday at midnight (0 0 * * 0)" + echo -e " \e[1;33m──────────────────────────────────────────────────────────────\e[0m" + echo "" + + if confirm "Review script content before installing?"; then + review_script "$tmp" + fi + + if ! confirm "Install this script and activate cron schedule?"; then + rm -f "$tmp" + echo " Aborted." + exit 0 + fi + + remove_legacy_cron + + install -m 0755 "$tmp" "$LOCAL_SCRIPT" + rm -f "$tmp" + ok "Installed script to ${LOCAL_SCRIPT}" + + if [[ ! -f "$CONF_FILE" ]]; then + cat >"$CONF_FILE" <<'CONF' +# Configuration for automatic LXC container OS updates. +# Add container IDs to exclude from updates (comma-separated): +# EXCLUDE=100,101,102 +EXCLUDE= +CONF + ok "Created config ${CONF_FILE}" + fi + + ( + crontab -l -u root 2>/dev/null | grep -v "${LOCAL_SCRIPT}" || true + echo "${CRON_ENTRY}" + ) | crontab -u root - + ok "Added cron schedule: Every Sunday at midnight" + echo "" + echo -e " \e[36mLocal script:\e[0m ${LOCAL_SCRIPT}" + echo -e " \e[36mConfig:\e[0m ${CONF_FILE}" + echo -e " \e[36mLog file:\e[0m ${LOG_FILE}" + echo "" } remove() { - (crontab -l | grep -v "update-lxcs-cron.sh") | crontab - - rm -rf /var/log/update-lxcs-cron.log - echo "Removed Crontab Schedule from Proxmox VE" + if crontab -l -u root 2>/dev/null | grep -q "${LOCAL_SCRIPT}"; then + (crontab -l -u root 2>/dev/null | grep -v "${LOCAL_SCRIPT}") | crontab -u root - + ok "Removed cron schedule" + fi + remove_legacy_cron + [[ -f "$LOCAL_SCRIPT" ]] && rm -f "$LOCAL_SCRIPT" && ok "Removed ${LOCAL_SCRIPT}" + [[ -f "$LOG_FILE" ]] && rm -f "$LOG_FILE" && ok "Removed ${LOG_FILE}" + echo -e "\n Cron Update LXCs has been fully removed." + echo -e " \e[90mNote: ${CONF_FILE} was kept (remove manually if desired).\e[0m" } -OPTIONS=(Add "Add Crontab Schedule" - Remove "Remove Crontab Schedule") +update_script() { + if [[ ! -f "$LOCAL_SCRIPT" ]]; then + err "No local script found at ${LOCAL_SCRIPT}. Use 'Add' first." + exit 1 + fi -CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Cron Update LXCs" --menu "Select an option:" 10 58 2 \ - "${OPTIONS[@]}" 3>&1 1>&2 2>&3) + info "Downloading latest version..." + local tmp + tmp=$(download_script) || exit 1 + + if command -v diff &>/dev/null; then + local changes + changes=$(diff --color=auto "$LOCAL_SCRIPT" "$tmp" 2>/dev/null || true) + if [[ -z "$changes" ]]; then + ok "Script is already up-to-date (no changes)." + rm -f "$tmp" + return + fi + echo "" + echo -e " \e[1;33m─── Changes ──────────────────────────────────────────────────\e[0m" + echo "$changes" + echo -e " \e[1;33m──────────────────────────────────────────────────────────────\e[0m" + else + review_script "$tmp" + fi + + local new_hash old_hash + new_hash=$(sha256sum "$tmp" | awk '{print $1}') + old_hash=$(sha256sum "$LOCAL_SCRIPT" | awk '{print $1}') + echo -e " \e[36mCurrent SHA256:\e[0m ${old_hash}" + echo -e " \e[36mNew SHA256:\e[0m ${new_hash}" + echo "" + + if ! confirm "Apply update?"; then + rm -f "$tmp" + echo " Aborted." + return + fi + + install -m 0755 "$tmp" "$LOCAL_SCRIPT" + rm -f "$tmp" + ok "Updated ${LOCAL_SCRIPT}" +} + +view_script() { + if [[ ! -f "$LOCAL_SCRIPT" ]]; then + err "No local script found at ${LOCAL_SCRIPT}. Use 'Add' first." + exit 1 + fi + + local view_choice + view_choice=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "View Script" --menu "What do you want to view?" 12 60 3 \ + "Worker" "Installed update script (${LOCAL_SCRIPT##*/})" \ + "Cron" "Cron schedule & configuration" \ + "Both" "Show everything" \ + 3>&1 1>&2 2>&3) || return 0 + + case "$view_choice" in + "Worker") view_worker_script ;; + "Cron") view_cron_config ;; + "Both") view_cron_config && echo "" && view_worker_script ;; + esac +} + +view_worker_script() { + local hash + hash=$(sha256sum "$LOCAL_SCRIPT" | awk '{print $1}') + echo "" + echo -e " \e[1;33m─── ${LOCAL_SCRIPT} ───\e[0m" + cat "$LOCAL_SCRIPT" + echo -e " \e[1;33m──────────────────────────────────────────────────────────────\e[0m" + echo -e " \e[36mSHA256:\e[0m ${hash}" + echo -e " \e[36mInstalled:\e[0m $(stat -c '%y' "$LOCAL_SCRIPT" 2>/dev/null | cut -d. -f1)" + echo "" +} + +view_cron_config() { + echo "" + echo -e " \e[1;33m─── Cron Configuration ───────────────────────────────────────\e[0m" + if crontab -l -u root 2>/dev/null | grep -q "${LOCAL_SCRIPT}"; then + local entry + entry=$(crontab -l -u root 2>/dev/null | grep "${LOCAL_SCRIPT}") + echo -e " \e[36mCron entry:\e[0m ${entry}" + local schedule + schedule=$(echo "$entry" | awk '{print $1,$2,$3,$4,$5}') + echo -e " \e[36mSchedule:\e[0m ${schedule} ($(cron_to_human "$schedule"))" + else + echo -e " \e[31mCron:\e[0m Not configured" + fi + if [[ -f "$CONF_FILE" ]]; then + echo -e " \e[36mConfig file:\e[0m ${CONF_FILE}" + local excludes + excludes=$(grep -oP '^\s*EXCLUDE\s*=\s*\K.*' "$CONF_FILE" 2>/dev/null || true) + echo -e " \e[36mExcluded:\e[0m ${excludes:-(none)}" + echo "" + echo -e " \e[90m--- ${CONF_FILE} ---\e[0m" + cat "$CONF_FILE" + else + echo -e " \e[36mConfig file:\e[0m (not created yet)" + fi + if [[ -f "$LOG_FILE" ]]; then + local log_size + log_size=$(du -h "$LOG_FILE" | awk '{print $1}') + echo -e " \e[36mLog file:\e[0m ${LOG_FILE} (${log_size})" + fi + echo -e " \e[1;33m──────────────────────────────────────────────────────────────\e[0m" + echo "" +} + +cron_to_human() { + local schedule="$1" + case "$schedule" in + "0 0 * * 0") echo "Every Sunday at midnight" ;; + "0 0 * * *") echo "Daily at midnight" ;; + "0 * * * *") echo "Every hour" ;; + *) echo "Custom schedule" ;; + esac +} + +show_status() { + echo "" + if [[ -f "$LOCAL_SCRIPT" ]]; then + local hash + hash=$(sha256sum "$LOCAL_SCRIPT" | awk '{print $1}') + ok "Script installed: ${LOCAL_SCRIPT}" + echo -e " \e[36mSHA256:\e[0m ${hash}" + echo -e " \e[36mInstalled:\e[0m $(stat -c '%y' "$LOCAL_SCRIPT" 2>/dev/null | cut -d. -f1)" + else + err "Script not installed" + fi + + if crontab -l -u root 2>/dev/null | grep -q "${LOCAL_SCRIPT}"; then + local schedule + schedule=$(crontab -l -u root 2>/dev/null | grep "${LOCAL_SCRIPT}" | awk '{print $1,$2,$3,$4,$5}') + ok "Cron active: ${schedule}" + else + err "Cron not configured" + fi + + if [[ -f "$CONF_FILE" ]]; then + local excludes + excludes=$(grep -oP '^\s*EXCLUDE\s*=\s*\K.*' "$CONF_FILE" 2>/dev/null || echo "(none)") + echo -e " \e[36mExcluded:\e[0m ${excludes:-"(none)"}" + fi + + if [[ -f "$LOG_FILE" ]]; then + local log_size last_run + log_size=$(du -h "$LOG_FILE" | awk '{print $1}') + last_run=$(grep -oP '^\s+\K\w.*' "$LOG_FILE" | tail -1) + echo -e " \e[36mLog file:\e[0m ${LOG_FILE} (${log_size})" + [[ -n "${last_run:-}" ]] && echo -e " \e[36mLast run:\e[0m ${last_run}" + else + echo -e " \e[36mLog file:\e[0m (no runs yet)" + fi + echo "" +} + +run_now() { + if [[ ! -f "$LOCAL_SCRIPT" ]]; then + err "No local script found at ${LOCAL_SCRIPT}. Use 'Add' first." + exit 1 + fi + info "Running update script now..." + bash "$LOCAL_SCRIPT" | tee -a "$LOG_FILE" + ok "Run completed. Log appended to ${LOG_FILE}" +} + +rotate_log() { + if [[ ! -f "$LOG_FILE" ]]; then + info "No log file to rotate." + return + fi + local log_size + log_size=$(stat -c '%s' "$LOG_FILE" 2>/dev/null || echo 0) + local log_size_h + log_size_h=$(du -h "$LOG_FILE" | awk '{print $1}') + if confirm "Rotate log file? (current size: ${log_size_h})"; then + mv "$LOG_FILE" "${LOG_FILE}.old" + ok "Rotated: ${LOG_FILE} → ${LOG_FILE}.old" + fi +} + +OPTIONS=( + Add "Download, review & install cron schedule" + Remove "Remove cron schedule & local script" + Update "Update local script from repository" + Status "Show installation status & last run" + Run "Run update script now (manual trigger)" + View "View cron config & installed script" + Rotate "Rotate log file" +) + +CHOICE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Cron Update LXCs" --menu "Select an option:" 16 68 7 \ + "${OPTIONS[@]}" 3>&1 1>&2 2>&3) || exit 0 case $CHOICE in -"Add") - add - ;; -"Remove") - remove - ;; -*) - echo "Exiting..." - exit 0 - ;; +"Add") add ;; +"Remove") remove ;; +"Update") update_script ;; +"Status") show_status ;; +"Run") run_now ;; +"View") view_script ;; +"Rotate") rotate_log ;; esac diff --git a/tools/pve/pbs_microcode.sh b/tools/pve/pbs-microcode.sh similarity index 100% rename from tools/pve/pbs_microcode.sh rename to tools/pve/pbs-microcode.sh diff --git a/tools/pve/pbs4-upgrade.sh b/tools/pve/pbs4-upgrade.sh index 2a2fa59fd..a05ba8bb9 100644 --- a/tools/pve/pbs4-upgrade.sh +++ b/tools/pve/pbs4-upgrade.sh @@ -57,7 +57,9 @@ start_routines() { yes) msg_info "Switching to Debian 13 (Trixie) Sources" rm -f /etc/apt/sources.list.d/*.list - sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list || true + if [ -f /etc/apt/sources.list ]; then + sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list + fi cat >/etc/apt/sources.list.d/debian.sources <.log +# +# AVAILABLE ENV VARIABLES +# ----------------------- +# APP - Pretty name (e.g. "Vaultwarden") +# NSAPP - Slug / lowercase (e.g. "vaultwarden") +# CTID - Numeric container ID (e.g. "103") +# IP - IPv4 address of the LXC (e.g. "192.168.1.50") +# HN - Hostname (e.g. "vaultwarden") +# STORAGE - Storage where the rootfs lives (e.g. "local-lvm") +# BRG - Bridge (e.g. "vmbr0") +# +# GENERAL TIPS +# ------------ +# - Use `set -euo pipefail` so failures actually surface. +# - Use `|| true` on best-effort steps you do not want to abort the hook. +# - The file just needs to be a valid script. `+x` is optional — it is +# invoked via `bash `. Shebang is honored only if you call it +# yourself; otherwise the shebang line is purely cosmetic. +# - If the hook exits non-zero, the user gets a whiptail popup with the +# last 15 log lines. The LXC creation itself is NOT rolled back. +# - Keep hooks idempotent — they may be re-run if you recreate a CT. +# +# HOW TO USE THIS FILE +# -------------------- +# 1. Copy ONE example block (between the BEGIN/END markers) into a new +# file on the Proxmox host, e.g. /opt/community-scripts/hooks/notify.sh +# 2. chmod +x /opt/community-scripts/hooks/notify.sh (optional) +# 3. Set var_post_install in default.vars / app.vars or pick the path +# in Advanced Settings. +# ============================================================================ + +# ============================================================================ +# ▼▼▼ EXAMPLE 1 — BEGIN ▼▼▼ +# ---------------------------------------------------------------------------- +# Name : minimal-logger.sh +# Purpose : Append every newly created LXC to a single CSV-ish log. +# Difficulty : ⭐ Beginner +# Side effects: Writes to /var/log/community-scripts/created-lxcs.log +# Use case : You just want a paper trail of "what got created when". +# ============================================================================ +#!/usr/bin/env bash +set -euo pipefail + +LOG_DIR="/var/log/community-scripts" +LOG_FILE="${LOG_DIR}/created-lxcs.log" + +mkdir -p "$LOG_DIR" + +# Header on first use +if [[ ! -s "$LOG_FILE" ]]; then + echo "timestamp;ctid;app;hostname;ip;bridge;storage" >"$LOG_FILE" +fi + +printf '%s;%s;%s;%s;%s;%s;%s\n' \ + "$(date -Iseconds)" \ + "${CTID}" \ + "${APP}" \ + "${HN}" \ + "${IP}" \ + "${BRG}" \ + "${STORAGE}" \ + >>"$LOG_FILE" + +echo "Logged ${APP} (CTID=${CTID}) to ${LOG_FILE}" +# ▲▲▲ EXAMPLE 1 — END ▲▲▲ + +# ============================================================================ +# ▼▼▼ EXAMPLE 2 — BEGIN ▼▼▼ +# ---------------------------------------------------------------------------- +# Name : discord-gotify-notify.sh +# Purpose : Send a rich Discord embed AND a Gotify push notification +# whenever a new LXC is provisioned. +# Difficulty : ⭐⭐ Intermediate +# Requires : curl on the host (default), reachable webhook URLs. +# Side effects: Outbound HTTPS to Discord + your Gotify server. +# ============================================================================ +#!/usr/bin/env bash +set -euo pipefail + +# --- CONFIG (edit me) ------------------------------------------------------- +DISCORD_WEBHOOK="https://discord.com/api/webhooks/XXXXXXXX/YYYYYYYY" +GOTIFY_URL="https://gotify.example.com" +GOTIFY_TOKEN="AbCdEfGhIjKlMnO" +GOTIFY_PRIORITY=5 +# ---------------------------------------------------------------------------- + +# Resolve the Proxmox node's hostname for context +NODE="$(hostname -s)" +TS="$(date -Iseconds)" + +# --- Discord embed ---------------------------------------------------------- +read -r -d '' DISCORD_PAYLOAD </dev/null || + echo "WARN: Discord webhook failed (non-fatal)" + +# --- Gotify push ------------------------------------------------------------ +curl -fsS --max-time 10 \ + -H "X-Gotify-Key: ${GOTIFY_TOKEN}" \ + -F "title=Proxmox: ${APP} LXC created" \ + -F "message=CTID=${CTID} IP=${IP} HN=${HN} on ${NODE}" \ + -F "priority=${GOTIFY_PRIORITY}" \ + "${GOTIFY_URL}/message" \ + >/dev/null || + echo "WARN: Gotify push failed (non-fatal)" + +echo "Notifications dispatched for CTID=${CTID}" +# ▲▲▲ EXAMPLE 2 — END ▲▲▲ + +# ============================================================================ +# ▼▼▼ EXAMPLE 3 — BEGIN ▼▼▼ +# ---------------------------------------------------------------------------- +# Name : auto-pool-tags-backup.sh +# Purpose : Add the new LXC to a Proxmox pool, append cluster-wide tags, +# register a DNS record in pi-hole, and trigger an immediate +# snapshot backup to a configured storage. +# Difficulty : ⭐⭐⭐ Advanced +# Requires : pvesh, pct, vzdump (host-side; available by default on PVE), +# a reachable pi-hole admin API. +# ============================================================================ +#!/usr/bin/env bash +set -euo pipefail + +# --- CONFIG (edit me) ------------------------------------------------------- +TARGET_POOL="auto-lxc" +EXTRA_TAGS=("auto-provisioned" "${NSAPP}") # community-script tag is set by build.func +BACKUP_STORAGE="pbs-main" # set to "" to skip initial backup +PIHOLE_HOST="192.168.1.5" +PIHOLE_PASSWORD="changeme" # web-UI password +DNS_DOMAIN="lan" # FQDN will be ${HN}.${DNS_DOMAIN} +# ---------------------------------------------------------------------------- + +# 1) Ensure the pool exists, then attach the CT +if ! pvesh get "/pools/${TARGET_POOL}" >/dev/null 2>&1; then + echo "Creating pool: ${TARGET_POOL}" + pvesh create /pools --poolid "${TARGET_POOL}" --comment "Auto-created by post-install hook" || true +fi +echo "Adding CTID=${CTID} to pool=${TARGET_POOL}" +pvesh set "/pools/${TARGET_POOL}" --vms "${CTID}" || echo "WARN: pool attach failed (non-fatal)" + +# 2) Merge new tags with existing ones (preserve community-script etc.) +CURRENT_TAGS="$(pct config "${CTID}" | awk -F': ' '/^tags:/{print $2}')" +declare -A TAG_SET +IFS=';' read -r -a CUR_ARR <<<"${CURRENT_TAGS:-}" +for t in "${CUR_ARR[@]}"; do [[ -n "$t" ]] && TAG_SET["$t"]=1; done +for t in "${EXTRA_TAGS[@]}"; do [[ -n "$t" ]] && TAG_SET["$t"]=1; done +NEW_TAGS="$( + IFS=';' + echo "${!TAG_SET[*]}" +)" +echo "Setting tags: ${NEW_TAGS}" +pct set "${CTID}" --tags "${NEW_TAGS}" || echo "WARN: tag update failed (non-fatal)" + +# 3) Register DNS in pi-hole (custom DNS record) +FQDN="${HN}.${DNS_DOMAIN}" +echo "Registering DNS: ${FQDN} → ${IP} on pi-hole ${PIHOLE_HOST}" +SID="$(curl -fsS --max-time 5 \ + -d "pw=${PIHOLE_PASSWORD}" \ + "http://${PIHOLE_HOST}/api/auth" 2>/dev/null | + sed -nE 's/.*"sid":"([^"]+)".*/\1/p' || true)" + +if [[ -n "${SID}" ]]; then + curl -fsS --max-time 5 -X PUT \ + -H "Content-Type: application/json" \ + -H "sid: ${SID}" \ + -d "{\"hosts\":[\"${IP} ${FQDN}\"]}" \ + "http://${PIHOLE_HOST}/api/config/dns/hosts" >/dev/null || + echo "WARN: pi-hole DNS update failed (non-fatal)" + curl -fsS --max-time 5 -X DELETE -H "sid: ${SID}" "http://${PIHOLE_HOST}/api/auth" >/dev/null || true +else + echo "WARN: could not obtain pi-hole session (skipping DNS)" +fi + +# 4) Initial backup (best-effort, can take a few minutes) +if [[ -n "${BACKUP_STORAGE}" ]]; then + if pvesh get "/storage/${BACKUP_STORAGE}" >/dev/null 2>&1; then + echo "Triggering initial backup of CTID=${CTID} to ${BACKUP_STORAGE}" + vzdump "${CTID}" \ + --storage "${BACKUP_STORAGE}" \ + --mode snapshot \ + --compress zstd \ + --notes-template "Initial backup of ${APP} (CTID=${CTID})" \ + --notification-mode auto || + echo "WARN: initial backup failed (non-fatal)" + else + echo "Backup storage '${BACKUP_STORAGE}' not found — skipping." + fi +fi + +echo "Post-provision routine complete for ${APP} (CTID=${CTID})" +# ▲▲▲ EXAMPLE 3 — END ▲▲▲ + +# ============================================================================ +# ▼▼▼ EXAMPLE 4 — BEGIN ▼▼▼ +# ---------------------------------------------------------------------------- +# Name : inject-ssh-and-monitoring.sh +# Purpose : Push the host's admin SSH key into the new LXC, install the +# Beszel monitoring agent inside the container, and register +# an Uptime-Kuma HTTP push monitor for the LXC's IP. +# Difficulty : ⭐⭐⭐ Advanced +# Requires : pct (host), curl (inside LXC), reachable Beszel hub + +# Uptime-Kuma push URL. +# ============================================================================ +#!/usr/bin/env bash +set -euo pipefail + +# --- CONFIG (edit me) ------------------------------------------------------- +ADMIN_KEY="/root/.ssh/admin_ed25519.pub" +BESZEL_HUB_URL="http://192.168.1.10:8090" +BESZEL_AGENT_KEY="ssh-ed25519 AAAA... beszel@hub" # public key of the hub +UPTIME_KUMA_PUSH_BASE="http://uptime.lan/api/push/abc123" +# ---------------------------------------------------------------------------- + +# 1) Inject the admin SSH key +if [[ -f "${ADMIN_KEY}" ]]; then + echo "Pushing admin SSH key into CTID=${CTID}" + pct exec "${CTID}" -- mkdir -p /root/.ssh + pct exec "${CTID}" -- chmod 700 /root/.ssh + pct push "${CTID}" "${ADMIN_KEY}" /root/.ssh/authorized_keys + pct exec "${CTID}" -- chmod 600 /root/.ssh/authorized_keys +else + echo "WARN: ${ADMIN_KEY} not found on host — skipping SSH key injection" +fi + +# 2) Wait for outbound networking inside the CT (max 30 s) +echo "Waiting for network inside CTID=${CTID}…" +for _ in $(seq 1 30); do + if pct exec "${CTID}" -- bash -c 'getent hosts deb.debian.org >/dev/null 2>&1'; then + break + fi + sleep 1 +done + +# 3) Install Beszel agent inside the LXC +echo "Installing Beszel agent inside CTID=${CTID}" +pct exec "${CTID}" -- bash -s <<'AGENT_INSTALL' || echo "WARN: Beszel install failed" +set -euo pipefail +ARCH="$(uname -m)" +case "$ARCH" in + x86_64) ARCH_TAG=amd64 ;; + aarch64) ARCH_TAG=arm64 ;; + *) echo "Unsupported arch: $ARCH"; exit 1 ;; +esac +TMP=$(mktemp -d) +cd "$TMP" +curl -fsSL "https://github.com/henrygd/beszel/releases/latest/download/beszel-agent_linux_${ARCH_TAG}.tar.gz" \ + | tar -xz +install -m 0755 beszel-agent /usr/local/bin/beszel-agent + +cat >/etc/systemd/system/beszel-agent.service </dev/null || + echo "WARN: Uptime-Kuma push failed (non-fatal)" + +echo "Provisioned monitoring for ${APP} (CTID=${CTID}, IP=${IP})" +# ▲▲▲ EXAMPLE 4 — END ▲▲▲ + +# ============================================================================ +# ▼▼▼ EXAMPLE 5 — BEGIN ▼▼▼ +# ---------------------------------------------------------------------------- +# Name : per-app-router.sh +# Purpose : Single dispatcher hook that runs different actions +# depending on the app being installed (NSAPP). Useful when +# you want ONE hook for the whole cluster but distinct +# behavior for, e.g., databases vs media services. +# Difficulty : ⭐⭐⭐ Advanced +# ============================================================================ +#!/usr/bin/env bash +set -euo pipefail + +# --- CONFIG (edit me) ------------------------------------------------------- +DEFAULT_DNS_SUFFIX="lan" +PROM_FILE_SD_DIR="/etc/prometheus/file_sd" # on the host that runs Prometheus +# ---------------------------------------------------------------------------- + +log() { printf '[%s] %s\n' "$(date +%H:%M:%S)" "$*"; } + +# ---------- shared helpers -------------------------------------------------- +register_prometheus_target() { + local job="$1" port="$2" + local file="${PROM_FILE_SD_DIR}/${job}.json" + mkdir -p "${PROM_FILE_SD_DIR}" + if [[ ! -f "$file" ]]; then echo "[]" >"$file"; fi + python3 - "$file" "${IP}:${port}" "${HN}" "${NSAPP}" <<'PY' +import json, sys +path, target, hn, app = sys.argv[1:5] +data = json.load(open(path)) +# Avoid duplicates +data = [b for b in data if target not in b.get("targets", [])] +data.append({"targets": [target], "labels": {"hostname": hn, "app": app}}) +json.dump(data, open(path, "w"), indent=2) +PY + log "Registered Prometheus target ${IP}:${port} in ${file}" +} + +set_ct_options() { + local cores="$1" mem="$2" desc="$3" + pct set "${CTID}" --cores "${cores}" --memory "${mem}" || true + pct set "${CTID}" --description "${desc}" || true +} + +# ---------- per-app dispatch ------------------------------------------------ +log "Dispatching post-install for NSAPP=${NSAPP} CTID=${CTID}" + +case "${NSAPP}" in + +# ------ Databases --------------------------------------------------------- +postgresql | mariadb | mongodb | redis | valkey) + log "Database role: bumping resources & adding to backup-critical pool" + set_ct_options 4 4096 "DB: ${APP}" + pvesh set /pools/db-critical --vms "${CTID}" 2>/dev/null || true + register_prometheus_target "${NSAPP}-exporter" 9187 + ;; + +# ------ *arr media stack -------------------------------------------------- +sonarr | radarr | prowlarr | lidarr | readarr | bazarr) + log "Media-arr role: tagging + Sonarr/Radarr API webhook" + pct set "${CTID}" --tags "community-script;media;arr-stack" || true + curl -fsS --max-time 5 -X POST \ + "http://media-hub.${DEFAULT_DNS_SUFFIX}/hooks/arr-added" \ + -H "Content-Type: application/json" \ + -d "{\"app\":\"${NSAPP}\",\"ctid\":${CTID},\"ip\":\"${IP}\"}" \ + >/dev/null || log "WARN: media-hub webhook failed" + ;; + +# ------ Web apps that should sit behind NPM/Traefik ---------------------- +vaultwarden | paperless-ngx | nextcloud | immich | bookstack) + log "Web app role: registering reverse-proxy entry" + curl -fsS --max-time 5 -X POST \ + "http://traefik.${DEFAULT_DNS_SUFFIX}/api/dynamic-add" \ + -H "Content-Type: application/json" \ + -d "$( + cat </dev/null || log "WARN: traefik registration failed" + register_prometheus_target "blackbox-http" 80 + ;; + +# ------ Default fallback -------------------------------------------------- +*) + log "No special handling for ${NSAPP} — applying generic defaults" + register_prometheus_target "node-exporter" 9100 + ;; +esac + +log "Finished dispatcher for ${APP} (CTID=${CTID})" +# ▲▲▲ EXAMPLE 5 — END ▲▲▲ + +# ============================================================================ +# END OF EXAMPLES +# ============================================================================ diff --git a/tools/pve/post-pbs-install.sh b/tools/pve/post-pbs-install.sh index 47100948e..315fa723e 100644 --- a/tools/pve/post-pbs-install.sh +++ b/tools/pve/post-pbs-install.sh @@ -65,6 +65,14 @@ component_exists_in_sources() { grep -h -E "^[^#]*Components:[^#]*\b${component}\b" /etc/apt/sources.list.d/*.sources 2>/dev/null | grep -q . } +require_whiptail() { + if ! command -v whiptail >/dev/null 2>&1; then + msg_error "Missing dependency: whiptail" + echo -e "Install it first (e.g. apt update && apt install -y whiptail), then re-run this script." + exit 127 + fi +} + # ---- main ---- main() { header_info @@ -90,8 +98,14 @@ main() { CODENAME="$(get_pbs_codename)" case "$CODENAME" in - bookworm) start_routines_3 ;; - trixie) start_routines_4 ;; + bookworm) + require_whiptail + start_routines_3 + ;; + trixie) + require_whiptail + start_routines_4 + ;; *) msg_error "Unsupported Debian codename: $CODENAME" echo -e "Supported: bookworm (PBS 3.x) and trixie (PBS 4.x)" @@ -174,7 +188,9 @@ start_routines_4() { yes) msg_info "Correcting Debian Sources (deb822)" rm -f /etc/apt/sources.list.d/*.list - sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list || true + if [ -f /etc/apt/sources.list ]; then + sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list + fi cat >/etc/apt/sources.list.d/debian.sources </dev/null || true declare -f init_tool_telemetry &>/dev/null && init_tool_telemetry "post-pmg-install" "pve" -if ! grep -q "Proxmox Mail Gateway" /etc/issue 2>/dev/null; then +if ! dpkg -s proxmox-mailgateway-container >/dev/null 2>&1 && + ! dpkg -s proxmox-mailgateway >/dev/null 2>&1; then msg_error "This script is only intended for Proxmox Mail Gateway" exit 232 fi @@ -57,14 +58,24 @@ repo_state() { local repo="$1" local file="" local state="missing" - for f in /etc/apt/sources.list /etc/apt/sources.list.d/*.list; do + for f in /etc/apt/sources.list /etc/apt/sources.list.d/*.list /etc/apt/sources.list.d/*.sources; do [[ -f "$f" ]] || continue if grep -q "$repo" "$f"; then file="$f" - if grep -qE "^[^#].*${repo}" "$f"; then - state="active" - elif grep -qE "^#.*${repo}" "$f"; then - state="disabled" + if [[ "$f" == *.sources ]]; then + # deb822 format: check Enabled field + if grep -qiE '^Enabled:\s*no' "$f"; then + state="disabled" + else + state="active" + fi + else + # legacy format + if grep -qE "^[^#].*${repo}" "$f"; then + state="active" + elif grep -qE "^#.*${repo}" "$f"; then + state="disabled" + fi fi break fi @@ -72,6 +83,28 @@ repo_state() { echo "$state $file" } +toggle_repo() { + # $1 = file, $2 = action (enable|disable) + local file="$1" action="$2" + if [[ "$file" == *.sources ]]; then + if [[ "$action" == "disable" ]]; then + if grep -qiE '^Enabled:' "$file"; then + sed -i 's/^Enabled:.*/Enabled: no/' "$file" + else + echo "Enabled: no" >>"$file" + fi + else + sed -i 's/^Enabled:.*/Enabled: yes/' "$file" + fi + else + if [[ "$action" == "disable" ]]; then + sed -i '/^[^#]/s/^/# /' "$file" + else + sed -i 's/^# *//' "$file" + fi + fi +} + start_routines() { header_info VERSION="$(awk -F'=' '/^VERSION_CODENAME=/{ print $NF }' /etc/os-release)" @@ -84,11 +117,20 @@ start_routines() { case $CHOICE in yes) msg_info "Correcting Debian Sources" - cat </etc/apt/sources.list -deb http://deb.debian.org/debian ${VERSION} main contrib -deb http://deb.debian.org/debian ${VERSION}-updates main contrib -deb http://security.debian.org/debian-security ${VERSION}-security main contrib + cat </etc/apt/sources.list.d/debian.sources +Types: deb +URIs: http://deb.debian.org/debian +Suites: ${VERSION} ${VERSION}-updates +Components: main contrib non-free non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg + +Types: deb +URIs: http://security.debian.org/debian-security +Suites: ${VERSION}-security +Components: main contrib non-free non-free-firmware +Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg EOF + rm -f /etc/apt/sources.list msg_ok "Corrected Debian Sources" ;; no) msg_error "Selected no to Correcting Debian Sources" ;; @@ -108,7 +150,7 @@ EOF keep) msg_ok "Kept 'pmg-enterprise' repository" ;; disable) msg_info "Disabling 'pmg-enterprise' repository" - sed -i "s/^[^#].*pmg-enterprise/# &/" "$file" + toggle_repo "$file" disable msg_ok "Disabled 'pmg-enterprise' repository" ;; delete) @@ -128,7 +170,7 @@ EOF case $CHOICE in enable) msg_info "Enabling 'pmg-enterprise' repository" - sed -i "s/^#.*pmg-enterprise/deb/" "$file" + toggle_repo "$file" enable msg_ok "Enabled 'pmg-enterprise' repository" ;; keep) msg_ok "Kept 'pmg-enterprise' repository disabled" ;; @@ -149,8 +191,12 @@ EOF case $CHOICE in yes) msg_info "Adding 'pmg-enterprise' repository" - cat >/etc/apt/sources.list.d/pmg-enterprise.list </etc/apt/sources.list.d/pmg-enterprise.sources </etc/apt/sources.list.d/pmg-install-repo.list </etc/apt/sources.list.d/pmg-no-subscription.sources </etc/apt/sources.list.d/pmgtest-for-beta.list </etc/apt/sources.list.d/pmgtest.sources </etc/apt/sources.list.d/debian.sources </dev/null } +function dry_run_container() { + local container="$1" + local service="$2" + + # Extract app name and source repo directly from check_for_gh_release call in the ct script + # Pattern: check_for_gh_release "appname" "owner/repo" + local check_line app_name app_lc source_repo + check_line=$(echo "$script" | grep -m1 'check_for_gh_release') + + if [[ -z "$check_line" ]]; then + echo -e "${YW}[DRY-RUN]${CL} Container $container ($service): no check_for_gh_release found — skipping" + DRY_RUN_RESULT="no check_for_gh_release found — skipping" + return + fi + + app_name=$(echo "$check_line" | cut -d'"' -f2) + source_repo=$(echo "$check_line" | cut -d'"' -f4) + app_lc=$(echo "${app_name,,}" | tr -d ' ') + + if [[ -z "$source_repo" || "$source_repo" != *"/"* ]]; then + echo -e "${YW}[DRY-RUN]${CL} Container $container ($service): cannot parse source repo — skipping" + DRY_RUN_RESULT="cannot parse source repo — skipping" + return + fi + + # Read installed version from container (stored by check_for_gh_release as ~/.) + local current_version + current_version=$(pct exec "$container" -- bash -c "cat \$HOME/.${app_lc} 2>/dev/null" 2>/dev/null || true) + current_version="${current_version#v}" + + # Query latest release from GitHub API + local latest_version + latest_version=$(curl -sSL --max-time 10 \ + -H 'Accept: application/vnd.github+json' \ + -H 'X-GitHub-Api-Version: 2022-11-28' \ + "https://api.github.com/repos/${source_repo}/releases/latest" 2>/dev/null | + grep '"tag_name"' | head -1 | cut -d'"' -f4 | sed 's/^v//') + + if [[ -z "$latest_version" ]]; then + echo -e "${YW}[DRY-RUN]${CL} Container $container ($service): cannot fetch latest version from $source_repo" + DRY_RUN_RESULT="cannot fetch latest version from $source_repo" + return + fi + + if [[ -z "$current_version" ]]; then + echo -e "${BL}[DRY-RUN]${CL} Container $container ($service): installed version unknown, latest: ${latest_version} (${source_repo})" + DRY_RUN_RESULT="version unknown — latest: ${latest_version}" + elif [[ "$current_version" == "$latest_version" ]]; then + echo -e "${GN}[DRY-RUN]${CL} Container $container ($service): up to date (${current_version})" + DRY_RUN_RESULT="up to date (${current_version})" + else + echo -e "${YW}[DRY-RUN]${CL} Container $container ($service): update available ${current_version} → ${latest_version}" + DRY_RUN_RESULT="update available ${current_version} → ${latest_version}" + fi +} + function backup_container() { msg_info "Creating backup for container $1" vzdump $1 --compress zstd --storage $STORAGE_CHOICE -notes-template "{{guestname}} - community-scripts backup updater" >/dev/null 2>&1 @@ -169,8 +246,32 @@ END { ' /etc/pve/storage.cfg) } +# Structured result tracking for the final summary report +# Each entry: "CTID|service|STATUS|details" +declare -a UPDATE_RESULTS=() +function log_result() { + # log_result
+ UPDATE_RESULTS+=("${1}|${2}|${3}|${4}") +} + header_info +# ============================================================================= +# LOGGING SETUP +# Key events are written directly to a timestamped log file under +# /usr/local/community-scripts/update_apps/ — this avoids any stdout +# redirection that would break interactive spinners or whiptail dialogs. +# The full summary table is appended at the end of the run. +# ============================================================================= +LOG_DIR="/usr/local/community-scripts/update_apps" +mkdir -p "$LOG_DIR" +LOG_FILE="${LOG_DIR}/$(date '+%Y%m%d_%H%M%S').log" +echo "Update started: $(date '+%Y-%m-%d %H:%M:%S')" >"$LOG_FILE" + +function log_write() { + echo "[$(date '+%H:%M:%S')] $*" >>"$LOG_FILE" +} + # Skip confirmation if var_skip_confirm is set to yes if [[ "$var_skip_confirm" != "yes" ]]; then whiptail --backtitle "Proxmox VE Helper Scripts" --title "LXC App Update" --yesno "This will update apps in LXCs installed by Helper-Scripts. Proceed?" 10 58 || exit @@ -199,7 +300,7 @@ while read -r container; do menu_items+=("$container_id" "$formatted_line" "OFF") fi done <<<"$containers" -msg_ok "Loaded ${#menu_items[@]} containers" +msg_ok "Loaded $((${#menu_items[@]} / 3)) containers" # Determine container selection based on var_container if [[ -n "$var_container" ]]; then @@ -260,7 +361,10 @@ fi header_info # Determine backup choice based on var_backup -if [[ -n "$var_backup" ]]; then +# Dry-run never needs a backup — skip the prompt entirely +if [[ "$var_dry_run" == "yes" ]]; then + BACKUP_CHOICE="no" +elif [[ -n "$var_backup" ]]; then BACKUP_CHOICE="$var_backup" else BACKUP_CHOICE="no" @@ -270,7 +374,10 @@ else fi # Determine unattended update based on var_unattended -if [[ -n "$var_unattended" ]]; then +# Dry-run never executes updates — skip the prompt entirely +if [[ "$var_dry_run" == "yes" ]]; then + UNATTENDED_UPDATE="no" +elif [[ -n "$var_unattended" ]]; then UNATTENDED_UPDATE="$var_unattended" else UNATTENDED_UPDATE="no" @@ -321,6 +428,7 @@ fi containers_needing_reboot=() for container in $CHOICE; do echo -e "${BL}[INFO]${CL} Updating container $container" + log_write "Container $container: starting" if [ "$BACKUP_CHOICE" == "yes" ]; then backup_container $container @@ -342,9 +450,12 @@ for container in $CHOICE; do #1.1) If update script not detected, return if [ -z "${service}" ]; then echo -e "${YW}[WARN]${CL} Update script not found. Skipping to next container" + log_result "$container" "(unknown)" "SKIPPED" "No update script found in container" + log_write "Container $container: SKIPPED — no update script found" continue else echo -e "${BL}[INFO]${CL} Detected service: ${GN}${service}${CL}" + log_write "Container $container: detected service '$service'" fi #2) Extract service build/update resource requirements from config/installation file @@ -391,25 +502,32 @@ for container in $CHOICE; do fi #3) if build resources are different than run resources, then: - if [ "$UPDATE_BUILD_RESOURCES" -eq "1" ]; then + if [ "$UPDATE_BUILD_RESOURCES" -eq "1" ] && [[ "$var_dry_run" != "yes" ]]; then pct set "$container" --cores "$build_cpu" --memory "$build_ram" fi + #3.5) Dry-run: report update availability without applying + if [[ "$var_dry_run" == "yes" ]]; then + DRY_RUN_RESULT="" + dry_run_container "$container" "$service" + log_result "$container" "$service" "DRY-RUN" "${DRY_RUN_RESULT:-version check only}" + log_write "Container $container ($service): DRY-RUN — ${DRY_RUN_RESULT:-version check only}" + continue + fi + #4) Update service, using the update command + # Prepend a no-op 'clear' wrapper to PATH so update scripts calling clear + # don't fail without a TTY — works for all shells incl. ash (no export -f) + SETUP_CMD="mkdir -p /tmp/.nc; printf '#!/bin/sh\n:\n' > /tmp/.nc/clear; chmod +x /tmp/.nc/clear; export PATH=/tmp/.nc:\$PATH; export TERM=dumb; " case "$os" in - alpine) pct exec "$container" -- ash -c "$UPDATE_CMD" ;; - archlinux) pct exec "$container" -- bash -c "$UPDATE_CMD" ;; - fedora | rocky | centos | alma) pct exec "$container" -- bash -c "$UPDATE_CMD" ;; - ubuntu | debian | devuan) pct exec "$container" -- bash -c "$UPDATE_CMD" ;; - opensuse) pct exec "$container" -- bash -c "$UPDATE_CMD" ;; + alpine) pct exec "$container" -- ash -c "${SETUP_CMD}${UPDATE_CMD}" ;; + archlinux) pct exec "$container" -- bash -c "${SETUP_CMD}${UPDATE_CMD}" ;; + fedora | rocky | centos | alma) pct exec "$container" -- bash -c "${SETUP_CMD}${UPDATE_CMD}" ;; + ubuntu | debian | devuan) pct exec "$container" -- bash -c "${SETUP_CMD}${UPDATE_CMD}" ;; + opensuse) pct exec "$container" -- bash -c "${SETUP_CMD}${UPDATE_CMD}" ;; esac exit_code=$? - if [ "$template" == "false" ] && [ "$status" == "status: stopped" ]; then - echo -e "${BL}[Info]${GN} Shutting down${BL} $container ${CL} \n" - pct shutdown $container & - fi - #5) if build resources are different than run resources, then: if [ "$UPDATE_BUILD_RESOURCES" -eq "1" ]; then pct set "$container" --cores "$run_cpu" --memory "$run_ram" @@ -421,32 +539,116 @@ for container in $CHOICE; do containers_needing_reboot+=("$container ($container_hostname)") fi + if [ "$template" == "false" ] && [ "$status" == "status: stopped" ]; then + echo -e "${BL}[Info]${GN} Shutting down${BL} $container ${CL} \n" + pct shutdown $container &>/dev/null & + fi + if [ $exit_code -eq 0 ]; then msg_ok "Updated container $container" + log_result "$container" "$service" "OK" "Updated successfully" + log_write "Container $container ($service): OK" elif [ $exit_code -eq 75 ]; then echo -e "${YW}[WARN]${CL} Container $container skipped (requires interactive mode)" + log_result "$container" "$service" "SKIPPED" "Requires interactive mode (exit 75)" + log_write "Container $container ($service): SKIPPED — requires interactive mode" + elif [ $exit_code -eq 113 ]; then + echo -e "${YW}[WARN]${CL} Container $container skipped (under-provisioned: increase CPU/RAM to match template)" + log_result "$container" "$service" "SKIPPED" "Under-provisioned — increase CPU/RAM to match template" + log_write "Container $container ($service): SKIPPED — under-provisioned" + elif [ $exit_code -eq 114 ]; then + echo -e "${YW}[WARN]${CL} Container $container skipped (storage critically low on /boot)" + log_result "$container" "$service" "SKIPPED" "Storage critically low on /boot (>80%)" + log_write "Container $container ($service): SKIPPED — storage critically low on /boot" elif [ "$BACKUP_CHOICE" == "yes" ]; then - msg_info "Restoring LXC from backup" + msg_error "Update failed for container $container (exit code: $exit_code) — attempting restore" + log_write "Container $container ($service): FAILED (exit $exit_code) — attempting restore" + msg_info "Restoring LXC $container from backup ($STORAGE_CHOICE)" pct stop $container LXC_STORAGE=$(pct config $container | awk -F '[:,]' '/rootfs/ {print $2}') - pct restore $container /var/lib/vz/dump/vzdump-lxc-${container}-*.tar.zst --storage $LXC_STORAGE --force >/dev/null 2>&1 - pct start $container + BACKUP_ENTRY=$(pvesm list "$STORAGE_CHOICE" 2>/dev/null | awk -v ctid="$container" '$1 ~ "vzdump-lxc-"ctid"-" || $1 ~ "/ct/"ctid"/" {print $1}' | sort -r | head -n1) + if [ -z "$BACKUP_ENTRY" ]; then + msg_error "No backup found in storage $STORAGE_CHOICE for container $container" + log_result "$container" "$service" "FAILED" "Update failed (exit $exit_code) — no backup found for restore" + log_write "Container $container ($service): FAILED — no backup found for restore" + exit 235 + fi + msg_info "Restoring from: $BACKUP_ENTRY" + pct restore $container "$BACKUP_ENTRY" --storage $LXC_STORAGE --force >/dev/null 2>&1 restorestatus=$? if [ $restorestatus -eq 0 ]; then - msg_ok "Restored LXC from backup" + pct start $container + msg_ok "Container $container successfully restored from backup" + log_result "$container" "$service" "RESTORED" "Update failed (exit $exit_code) — restored from backup" + log_write "Container $container ($service): RESTORED from $BACKUP_ENTRY" else - msg_error "Restored LXC from backup failed" + msg_error "Restore failed for container $container" + log_result "$container" "$service" "FAILED" "Update failed (exit $exit_code) — restore also failed" + log_write "Container $container ($service): FAILED — restore also failed" exit 235 fi else - msg_error "Update failed for container $container. Exiting" - exit "$exit_code" + msg_error "Update failed for container $container (exit code: $exit_code)" + log_result "$container" "$service" "FAILED" "Exit code $exit_code" + log_write "Container $container ($service): FAILED (exit $exit_code)" + if [[ "$var_continue_on_error" == "yes" ]]; then + echo -e "${YW}[WARN]${CL} Continuing to next container (var_continue_on_error=yes)" + continue + else + exit "$exit_code" + fi fi done wait header_info -echo -e "${GN}The process is complete, and the containers have been successfully updated.${CL}\n" +if [[ "$var_dry_run" == "yes" ]]; then + echo -e "${GN}Dry-run complete. No containers were modified.${CL}\n" +else + echo -e "${GN}The process is complete, and the containers have been successfully updated.${CL}\n" +fi + +# ============================================================================= +# SUMMARY REPORT +# ============================================================================= +if [ "${#UPDATE_RESULTS[@]}" -gt 0 ]; then + SEPARATOR="━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + HEADER=$(printf " %-8s %-22s %-10s %s" "CTID" "Service" "Status" "Details") + + # terminal output (with colours) + echo "" + echo "$SEPARATOR" + echo "$HEADER" + echo "$SEPARATOR" + for entry in "${UPDATE_RESULTS[@]}"; do + IFS='|' read -r _ctid _svc _status _details <<<"$entry" + case "$_status" in + OK) _color="${GN}" ;; + FAILED) _color="${RD}" ;; + RESTORED) _color="${YW}" ;; + *) _color="${YW}" ;; + esac + printf " %-8s %-22s ${_color}%-10s${CL} %s\n" "$_ctid" "$_svc" "$_status" "$_details" + done + echo "$SEPARATOR" + echo "" + echo "Full log: $LOG_FILE" + echo "" + + # append plain-text summary to log file + { + echo "" + echo "Update finished: $(date '+%Y-%m-%d %H:%M:%S')" + echo "$SEPARATOR" + echo "$HEADER" + echo "$SEPARATOR" + for entry in "${UPDATE_RESULTS[@]}"; do + IFS='|' read -r _ctid _svc _status _details <<<"$entry" + printf " %-8s %-22s %-10s %s\n" "$_ctid" "$_svc" "$_status" "$_details" + done + echo "$SEPARATOR" + } >>"$LOG_FILE" +fi if [ "${#containers_needing_reboot[@]}" -gt 0 ]; then echo -e "${RD}The following containers require a reboot:${CL}" for container_name in "${containers_needing_reboot[@]}"; do diff --git a/tools/pve/update-lxcs-cron.sh b/tools/pve/update-lxcs-cron.sh index a6d6f0987..d7abc4cae 100644 --- a/tools/pve/update-lxcs-cron.sh +++ b/tools/pve/update-lxcs-cron.sh @@ -1,23 +1,46 @@ #!/usr/bin/env bash -# Copyright (c) 2021-2026 tteck -# Author: tteck (tteckster) -# License: MIT -# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Copyright (c) 2021-2026 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# +# This script is installed locally by cron-update-lxcs.sh and executed +# by cron. It updates all LXC containers using their native package manager. + +# Ensure full PATH when running via cron (pct lives in /usr/sbin) +export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + +CONF_FILE="/etc/update-lxcs.conf" echo -e "\n $(date)" + +# Collect excluded containers from arguments excluded_containers=("$@") + +# Merge exclusions from config file if it exists +if [[ -f "$CONF_FILE" ]]; then + conf_exclude=$(grep -oP '^\s*EXCLUDE\s*=\s*\K[0-9,]+' "$CONF_FILE" 2>/dev/null || true) + IFS=',' read -ra conf_ids <<<"$conf_exclude" + for id in "${conf_ids[@]}"; do + id="${id// /}" + [[ -n "$id" ]] && excluded_containers+=("$id") + done +fi + function update_container() { - container=$1 - name=$(pct exec "$container" hostname) - echo -e "\n [Info] Updating $container : $name \n" + local container=$1 + local name + name=$(pct exec "$container" hostname 2>/dev/null || echo "unknown") + local os os=$(pct config "$container" | awk '/^ostype/ {print $2}') + echo -e "\n [Info] Updating $container : $name (os: $os)" case "$os" in alpine) pct exec "$container" -- ash -c "apk -U upgrade" ;; archlinux) pct exec "$container" -- bash -c "pacman -Syyu --noconfirm" ;; fedora | rocky | centos | alma) pct exec "$container" -- bash -c "dnf -y update && dnf -y upgrade" ;; - ubuntu | debian | devuan) pct exec "$container" -- bash -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confold" dist-upgrade -y; rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED" ;; + ubuntu | debian | devuan) pct exec "$container" -- bash -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::='--force-confold' dist-upgrade -y; rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED" ;; opensuse) pct exec "$container" -- bash -c "zypper ref && zypper --non-interactive dup" ;; + *) echo " [Warn] Unknown OS type '$os' for container $container, skipping" ;; esac } @@ -34,16 +57,29 @@ for container in $(pct list | awk '{if(NR>1) print $1}'); do sleep 1 else status=$(pct status "$container") - template=$(pct config "$container" | grep -q "template:" && echo "true" || echo "false") - if [ "$template" == "false" ] && [ "$status" == "status: stopped" ]; then + if pct config "$container" 2>/dev/null | grep -q "^template:"; then + echo -e "[Info] Skipping template $container" + continue + fi + if [ "$status" == "status: stopped" ]; then echo -e "[Info] Starting $container" pct start "$container" sleep 5 - update_container "$container" + update_container "$container" || echo " [Error] Update failed for $container" + # check if patchmon agent is present in container and run a report if found + if pct exec "$container" -- [ -e "/usr/local/bin/patchmon-agent" ]; then + echo -e "${BL}[Info]${GN} patchmon-agent found in ${BL} $container ${CL}, triggering report. \n" + pct exec "$container" -- "/usr/local/bin/patchmon-agent" "report" + fi echo -e "[Info] Shutting down $container" - pct shutdown "$container" & + pct shutdown "$container" --timeout 60 & elif [ "$status" == "status: running" ]; then - update_container "$container" + update_container "$container" || echo " [Error] Update failed for $container" + # check if patchmon agent is present in container and run a report if found + if pct exec "$container" -- [ -e "/usr/local/bin/patchmon-agent" ]; then + echo -e "${BL}[Info]${GN} patchmon-agent found in ${BL} $container ${CL}, triggering report. \n" + pct exec "$container" -- "/usr/local/bin/patchmon-agent" "report" + fi fi fi done diff --git a/tools/pve/update-lxcs.sh b/tools/pve/update-lxcs.sh index 5b0dfc2a5..52e9d2528 100644 --- a/tools/pve/update-lxcs.sh +++ b/tools/pve/update-lxcs.sh @@ -78,7 +78,7 @@ function update_container() { alpine) pct exec "$container" -- ash -c "apk -U upgrade" ;; archlinux) pct exec "$container" -- bash -c "pacman -Syyu --noconfirm" ;; fedora | rocky | centos | alma) pct exec "$container" -- bash -c "dnf -y update && dnf -y upgrade" ;; - ubuntu | debian | devuan) pct exec "$container" -- bash -c "apt-get update 2>/dev/null | grep 'packages.*upgraded'; apt list --upgradable && apt-get -yq dist-upgrade 2>&1; rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED || true" ;; + ubuntu | debian | devuan) pct exec "$container" -- bash -c "apt-get update 2>/dev/null | grep 'packages.*upgraded'; apt list --upgradable 2>/dev/null | cat && apt-get -yq dist-upgrade 2>&1; rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED || true" ;; opensuse) pct exec "$container" -- bash -c "zypper ref && zypper --non-interactive dup" ;; esac } @@ -110,15 +110,17 @@ for container in $(pct list | awk '{if(NR>1) print $1}'); do elif [ "$status" == "status: running" ]; then update_container $container fi - if pct exec "$container" -- [ -e "/var/run/reboot-required" ]; then - # Get the container's hostname and add it to the list - container_hostname=$(pct exec "$container" hostname) - containers_needing_reboot+=("$container ($container_hostname)") - fi - # check if patchmon agent is present in container and run a report if found - if pct exec "$container" -- [ -e "/usr/local/bin/patchmon-agent" ]; then - echo -e "${BL}[Info]${GN} patchmon-agent found in ${BL} $container ${CL}, triggering report. \n" - pct exec "$container" -- "/usr/local/bin/patchmon-agent" "report" + if [ "$status" == "status: running" ]; then + if pct exec "$container" -- [ -e "/var/run/reboot-required" ]; then + # Get the container's hostname and add it to the list + container_hostname=$(pct exec "$container" hostname) + containers_needing_reboot+=("$container ($container_hostname)") + fi + # check if patchmon agent is present in container and run a report if found + if pct exec "$container" -- [ -e "/usr/local/bin/patchmon-agent" ]; then + echo -e "${BL}[Info]${GN} patchmon-agent found in ${BL} $container ${CL}, triggering report. \n" + pct exec "$container" -- "/usr/local/bin/patchmon-agent" "report" + fi fi fi done diff --git a/turnkey/turnkey.sh b/turnkey/turnkey.sh index 6b53cf747..063b90432 100644 --- a/turnkey/turnkey.sh +++ b/turnkey/turnkey.sh @@ -1,10 +1,23 @@ #!/usr/bin/env bash -# Copyright (c) 2021-2026 tteck +# Copyright (c) 2021-2026 community-scripts ORG # Author: tteck (tteckster) -# License: MIT -# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE -function header_info { +# Source shared libraries +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/api.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/core.func) +source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/error_handler.func) +load_functions +catch_errors + +APP="TurnKey LXC" +NSAPP="turnkey" +DIAGNOSTICS="no" +METHOD="default" +RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" +EXECUTION_ID="${RANDOM_UUID}" + +header_info() { clear cat <<"EOF" ______ __ __ __ _ _______ @@ -15,281 +28,345 @@ function header_info { EOF } -set -euo pipefail -shopt -s expand_aliases -alias die='EXIT=$? LINE=$LINENO error_exit' -trap die ERR -function error_exit() { - trap - ERR - local DEFAULT='Unknown failure occured.' - local REASON="\e[97m${1:-$DEFAULT}\e[39m" - local FLAG="\e[91m[ERROR] \e[93m$EXIT@$LINE" - msg "$FLAG $REASON" 1>&2 - [ ! -z ${CTID-} ] && cleanup_ctid - exit $EXIT -} -function warn() { - local REASON="\e[97m$1\e[39m" - local FLAG="\e[93m[WARNING]\e[39m" - msg "$FLAG $REASON" -} -function info() { - local REASON="$1" - local FLAG="\e[36m[INFO]\e[39m" - msg "$FLAG $REASON" -} -function msg() { - local TEXT="$1" - echo -e "$TEXT" -} -function validate_container_id() { +# Validate if a container ID is available (cluster-aware) +validate_container_id() { local ctid="$1" - # Check if ID is numeric - if ! [[ "$ctid" =~ ^[0-9]+$ ]]; then - return 1 + [[ "$ctid" =~ ^[0-9]+$ ]] || return 1 + + # Cluster-wide check via pvesh + if command -v pvesh &>/dev/null; then + local cluster_ids + cluster_ids=$(pvesh get /cluster/resources --type vm --output-format json 2>/dev/null | + grep -oP '"vmid":\s*\K[0-9]+' 2>/dev/null || true) + if [[ -n "$cluster_ids" ]] && echo "$cluster_ids" | grep -qw "$ctid"; then + return 1 + fi fi - # Check if config file exists for VM or LXC + + # Local fallback if [[ -f "/etc/pve/qemu-server/${ctid}.conf" ]] || [[ -f "/etc/pve/lxc/${ctid}.conf" ]]; then return 1 fi - # Check if ID is used in LVM logical volumes + + # Check all cluster nodes + if [[ -d "/etc/pve/nodes" ]]; then + for node_dir in /etc/pve/nodes/*/; do + if [[ -f "${node_dir}qemu-server/${ctid}.conf" ]] || [[ -f "${node_dir}lxc/${ctid}.conf" ]]; then + return 1 + fi + done + fi + + # Check LVM volumes if lvs --noheadings -o lv_name 2>/dev/null | grep -qE "(^|[-_])${ctid}($|[-_])"; then return 1 fi return 0 } -function get_valid_container_id() { - local suggested_id="${1:-$(pvesh get /cluster/nextid)}" + +get_valid_container_id() { + local suggested_id="${1:-$(pvesh get /cluster/nextid 2>/dev/null || echo 100)}" while ! validate_container_id "$suggested_id"; do suggested_id=$((suggested_id + 1)) done echo "$suggested_id" } -function cleanup_ctid() { - if pct status $CTID &>/dev/null; then - if [ "$(pct status $CTID | awk '{print $2}')" == "running" ]; then - pct stop $CTID + +cleanup_ctid() { + if pct status "$CTID" &>/dev/null; then + if [[ "$(pct status "$CTID" | awk '{print $2}')" == "running" ]]; then + pct stop "$CTID" fi - pct destroy $CTID + pct destroy "$CTID" fi } +select_storage() { + local class="$1" content content_label + case "$class" in + container) + content='rootdir' + content_label='Container' + ;; + template) + content='vztmpl' + content_label='Container template' + ;; + *) + msg_error "Invalid storage class '$class'" + return 1 + ;; + esac + + local -a MENU=() + local MSG_MAX_LENGTH=0 + + while read -r line; do + local TAG TYPE FREE ITEM OFFSET=2 + TAG=$(echo "$line" | awk '{print $1}') + TYPE=$(echo "$line" | awk '{printf "%-10s", $2}') + FREE=$(echo "$line" | numfmt --field 4-6 --from-unit=K --to=iec --format %.2f | awk '{printf( "%9sB", $6)}') + ITEM=" Type: $TYPE Free: $FREE " + ((${#ITEM} + OFFSET > MSG_MAX_LENGTH)) && MSG_MAX_LENGTH=$((${#ITEM} + OFFSET)) + MENU+=("$TAG" "$ITEM" "OFF") + done < <(pvesm status -content "$content" | awk 'NR>1') + + if [[ $((${#MENU[@]} / 3)) -eq 0 ]]; then + msg_error "'$content_label' needs to be selected for at least one storage location." + return 1 + elif [[ $((${#MENU[@]} / 3)) -eq 1 ]]; then + printf '%s' "${MENU[0]}" + else + local STORAGE + while [[ -z "${STORAGE:+x}" ]]; do + STORAGE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Storage Pools" --radiolist \ + "Which storage pool for the ${content_label,,}?\n\n" \ + 16 $((MSG_MAX_LENGTH + 23)) 6 \ + "${MENU[@]}" 3>&1 1>&2 2>&3) || exit_script + done + printf '%s' "$STORAGE" + fi +} + +# ============================================================================== +# MAIN +# ============================================================================== + +# Cleanup on error: destroy container, report telemetry, and restart monitor +turnkey_cleanup() { + local exit_code=$? + if [[ $exit_code -ne 0 ]]; then + # Report failure to telemetry + if [[ "${POST_TO_API_DONE:-}" == "true" && "${POST_UPDATE_DONE:-}" != "true" ]]; then + post_update_to_api "failed" "$exit_code" 2>/dev/null || true + fi + # Destroy failed container + if [[ -n "${CTID:-}" ]]; then + cleanup_ctid 2>/dev/null || true + fi + fi + if [[ -f /etc/systemd/system/ping-instances.service ]]; then + systemctl start ping-instances.service 2>/dev/null || true + fi +} +trap turnkey_cleanup EXIT + # Stop Proxmox VE Monitor-All if running if systemctl is-active -q ping-instances.service; then systemctl stop ping-instances.service fi + +pve_check +shell_check +root_check + +# Read diagnostics preference (same logic as build.func diagnostics_check) +DIAG_CONFIG="/usr/local/community-scripts/diagnostics" +if [[ -f "$DIAG_CONFIG" ]]; then + DIAGNOSTICS=$(awk -F '=' '/^DIAGNOSTICS/ {print $2}' "$DIAG_CONFIG") || true + DIAGNOSTICS="${DIAGNOSTICS:-no}" +fi + header_info -whiptail --backtitle "Proxmox VE Helper Scripts" --title "TurnKey LXCs" --yesno "This will allow for the creation of one of the many TurnKey LXC Containers. Proceed?" 10 68 +whiptail --backtitle "Proxmox VE Helper Scripts" --title "TurnKey LXCs" --yesno \ + "This will allow for the creation of one of the many TurnKey LXC Containers. Proceed?" 10 68 || exit_script + +# Update template catalog early so the menu reflects the latest available templates +msg_info "Updating LXC template list" +pveam update >/dev/null +msg_ok "Updated LXC template list" + +# Build TurnKey selection menu dynamically from available templates +# Requires gawk for regex capture groups in match() +command -v gawk &>/dev/null || apt-get install -y gawk &>/dev/null +declare -A TURNKEY_TEMPLATES TURNKEY_MENU=() MSG_MAX_LENGTH=0 -while read -r TAG ITEM; do +while IFS=$'\t' read -r TEMPLATE_FILE TAG ITEM; do + TURNKEY_TEMPLATES["$TAG"]="$TEMPLATE_FILE" OFFSET=2 - ((${#ITEM} + OFFSET > MSG_MAX_LENGTH)) && MSG_MAX_LENGTH=${#ITEM}+OFFSET + ((${#ITEM} + OFFSET > MSG_MAX_LENGTH)) && MSG_MAX_LENGTH=$((${#ITEM} + OFFSET)) TURNKEY_MENU+=("$TAG" "$ITEM " "OFF") -done < <( - cat <&1 1>&2 2>&3 | tr -d '"') -[ -z "$turnkey" ] && { - whiptail --backtitle "Proxmox VE Helper Scripts" --title "No TurnKey LXC Selected" --msgbox "It appears that no TurnKey LXC container was selected" 10 68 - msg "Done" - exit -} +done < <(pveam available -section turnkeylinux | gawk '{ + tpl = $2 + if (match(tpl, /debian-([0-9]+)-turnkey-([^_]+)_([^_]+)_/, m)) { + app = m[2]; deb = m[1]; ver = m[3] + display = app + gsub(/-/, " ", display) + n = split(display, words, " ") + display = "" + for (i = 1; i <= n; i++) { + words[i] = toupper(substr(words[i], 1, 1)) substr(words[i], 2) + display = display (i > 1 ? " " : "") words[i] + } + tag = app "-" deb + printf "%s\t%s\t%s | Debian %s | %s\n", tpl, tag, display, deb, ver + } +}' | sort -t$'\t' -k2,2) -# Setup script environment +if [[ ${#TURNKEY_MENU[@]} -eq 0 ]]; then + msg_error "No TurnKey templates found. Check your internet connection or template repository." + exit 1 +fi + +selected=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "TurnKey LXCs" --radiolist \ + "\nSelect a TurnKey LXC to create:\n" 20 $((MSG_MAX_LENGTH + 58)) 12 \ + "${TURNKEY_MENU[@]}" 3>&1 1>&2 2>&3 | tr -d '"') || exit_script + +if [[ -z "$selected" ]]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "No TurnKey LXC Selected" \ + --msgbox "It appears that no TurnKey LXC container was selected" 10 68 + exit_script +fi + +# Extract template filename and app name from selection +TEMPLATE="${TURNKEY_TEMPLATES[$selected]}" +turnkey="${selected%-*}" + +# Generate random password PASS="$(openssl rand -base64 8)" -# Prompt user to confirm container ID + +# Prompt for Container ID +NEXT_ID=$(pvesh get /cluster/nextid 2>/dev/null || echo 100) while true; do - CTID=$(whiptail --backtitle "Container ID" --title "Choose the Container ID" --inputbox "Enter the container ID..." 8 40 $(pvesh get /cluster/nextid) 3>&1 1>&2 2>&3) + CTID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Container ID" \ + --inputbox "Enter the container ID..." 8 40 "$NEXT_ID" 3>&1 1>&2 2>&3) || exit_script - # Check if user cancelled - [ -z "$CTID" ] && die "No Container ID selected" + if [[ -z "$CTID" ]]; then + msg_error "No Container ID selected" + exit_script + fi - # Validate Container ID if ! validate_container_id "$CTID"; then SUGGESTED_ID=$(get_valid_container_id "$CTID") - if whiptail --backtitle "Container ID" --title "ID Already In Use" --yesno "Container/VM ID $CTID is already in use.\n\nWould you like to use the next available ID ($SUGGESTED_ID)?" 10 58; then + if whiptail --backtitle "Proxmox VE Helper Scripts" --title "ID Already In Use" --yesno \ + "Container/VM ID $CTID is already in use.\n\nWould you like to use the next available ID ($SUGGESTED_ID)?" 10 58; then CTID="$SUGGESTED_ID" break fi - # User declined, loop back to input else break fi done -# Prompt user to confirm Hostname -HOST_NAME=$(whiptail --backtitle "Hostname" --title "Choose the Hostname" --inputbox "Enter the containers Hostname..." 8 40 "turnkey-${turnkey}" 3>&1 1>&2 2>&3) -PCT_OPTIONS=" - -features keyctl=1,nesting=1 - -hostname $HOST_NAME - -tags community-script - -onboot 1 - -cores 2 - -memory 2048 - -password $PASS - -net0 name=eth0,bridge=vmbr0,ip=dhcp - -unprivileged 1 - " -DEFAULT_PCT_OPTIONS=( - -arch $(dpkg --print-architecture) + +# Prompt for Hostname +HOST_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Hostname" \ + --inputbox "Enter the container hostname..." 8 40 "turnkey-${turnkey}" 3>&1 1>&2 2>&3) || exit_script + +# Container options +PCT_OPTIONS=( + -features keyctl=1,nesting=1 + -hostname "$HOST_NAME" + -tags community-script + -onboot 1 + -cores 2 + -memory 2048 + -password "$PASS" + -net0 name=eth0,bridge=vmbr0,ip=dhcp + -unprivileged 1 + -arch "$(dpkg --print-architecture)" ) -# Set the CONTENT and CONTENT_LABEL variables -function select_storage() { - local CLASS=$1 - local CONTENT - local CONTENT_LABEL - case $CLASS in - container) - CONTENT='rootdir' - CONTENT_LABEL='Container' - ;; - template) - CONTENT='vztmpl' - CONTENT_LABEL='Container template' - ;; - *) false || die "Invalid storage class." ;; - esac - - # Query all storage locations - local -a MENU - while read -r line; do - local TAG=$(echo $line | awk '{print $1}') - local TYPE=$(echo $line | awk '{printf "%-10s", $2}') - local FREE=$(echo $line | numfmt --field 4-6 --from-unit=K --to=iec --format %.2f | awk '{printf( "%9sB", $6)}') - local ITEM=" Type: $TYPE Free: $FREE " - local OFFSET=2 - if [[ $((${#ITEM} + $OFFSET)) -gt ${MSG_MAX_LENGTH:-} ]]; then - local MSG_MAX_LENGTH=$((${#ITEM} + $OFFSET)) - fi - MENU+=("$TAG" "$ITEM" "OFF") - done < <(pvesm status -content $CONTENT | awk 'NR>1') - - # Select storage location - if [ $((${#MENU[@]} / 3)) -eq 0 ]; then - warn "'$CONTENT_LABEL' needs to be selected for at least one storage location." - die "Unable to detect valid storage location." - elif [ $((${#MENU[@]} / 3)) -eq 1 ]; then - printf ${MENU[0]} - else - local STORAGE - while [ -z "${STORAGE:+x}" ]; do - STORAGE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "Storage Pools" --radiolist \ - "Which storage pool would you like to use for the ${CONTENT_LABEL,,}?\n\n" \ - 16 $(($MSG_MAX_LENGTH + 23)) 6 \ - "${MENU[@]}" 3>&1 1>&2 2>&3) || die "Menu aborted." - done - printf $STORAGE - fi +# Storage selection +TEMPLATE_STORAGE=$(select_storage template) || { + msg_error "Failed to select template storage" + exit 1 } +msg_ok "Using '${BL}${TEMPLATE_STORAGE}${CL}' for template storage" -# Get template storage -TEMPLATE_STORAGE=$(select_storage template) -info "Using '$TEMPLATE_STORAGE' for template storage." +CONTAINER_STORAGE=$(select_storage container) || { + msg_error "Failed to select container storage" + exit 1 +} +msg_ok "Using '${BL}${CONTAINER_STORAGE}${CL}' for container storage" -# Get container storage -CONTAINER_STORAGE=$(select_storage container) -info "Using '$CONTAINER_STORAGE' for container storage." - -# Update LXC template list -msg "Updating LXC template list..." -pveam update >/dev/null - -# Get LXC template string -mapfile -t TEMPLATES < <(pveam available -section turnkeylinux | awk -v turnkey="${turnkey}" '$0 ~ turnkey {print $2}' | sort -t - -k 2 -V) -[ ${#TEMPLATES[@]} -gt 0 ] || die "Unable to find a template when searching for '${turnkey}'." -TEMPLATE="${TEMPLATES[-1]}" - -# Download LXC template -if ! pveam list $TEMPLATE_STORAGE | grep -q $TEMPLATE; then - msg "Downloading LXC template (Patience)..." - pveam download $TEMPLATE_STORAGE $TEMPLATE >/dev/null || - die "A problem occured while downloading the LXC template." +# Download template if not already cached +if ! pveam list "$TEMPLATE_STORAGE" | grep -q "$TEMPLATE"; then + msg_info "Downloading LXC template" + pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null || { + msg_error "Failed to download LXC template '${TEMPLATE}'" + exit 1 + } + msg_ok "Downloaded LXC template" fi -# Create variable for 'pct' options -PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}}) -[[ " ${PCT_OPTIONS[@]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs $CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}) +# Add rootfs if not specified +[[ " ${PCT_OPTIONS[*]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "${CONTAINER_STORAGE}:${PCT_DISK_SIZE:-8}") -# Create LXC -msg "Creating LXC container..." -pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} ${PCT_OPTIONS[@]} >/dev/null || - die "A problem occured while trying to create container." +# Set telemetry variables for the selected turnkey +TELEMETRY_TYPE="turnkey" +NSAPP="turnkey-${turnkey}" +CT_TYPE=1 +DISK_SIZE="${PCT_DISK_SIZE:-8}" +CORE_COUNT=2 +RAM_SIZE=2048 +var_os="turnkey" +var_version="${turnkey}" -# Save password -echo "TurnKey ${turnkey} password: ${PASS}" >>~/turnkey-${turnkey}.creds # file is located in the Proxmox root directory +# Report installation start to telemetry +post_to_api -# If turnkey is "OpenVPN", add access to the tun device -TUN_DEVICE_REQUIRED=("openvpn") # Setup this way in case future turnkeys also need tun access +# Create LXC container +msg_info "Creating LXC container" +pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" >/dev/null || { + msg_error "Failed to create container" + exit 1 +} +msg_ok "Created LXC container (ID: ${BL}${CTID}${CL})" + +# Save credentials securely +CREDS_FILE=~/turnkey-${turnkey}.creds +echo "TurnKey ${turnkey} password: ${PASS}" >>"$CREDS_FILE" +chmod 600 "$CREDS_FILE" + +# Configure TUN device access for VPN-based turnkeys +TUN_DEVICE_REQUIRED=("openvpn") if printf '%s\n' "${TUN_DEVICE_REQUIRED[@]}" | grep -qw "${turnkey}"; then - info "${turnkey} requires access to /dev/net/tun on the host. Modifying the container configuration to allow this." - echo "lxc.cgroup2.devices.allow: c 10:200 rwm" >>/etc/pve/lxc/${CTID}.conf - echo "lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file 0 0" >>/etc/pve/lxc/${CTID}.conf + msg_info "Configuring TUN device access for ${turnkey}" + { + echo "lxc.cgroup2.devices.allow: c 10:200 rwm" + echo "lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file 0 0" + } >>"/etc/pve/lxc/${CTID}.conf" + msg_ok "TUN device access configured" sleep 5 fi # Start container -msg "Starting LXC Container..." +msg_info "Starting LXC container" pct start "$CTID" +msg_ok "Started LXC container" sleep 10 -# Get container IP -set +euo pipefail # Turn off error checking -max_attempts=5 -attempt=1 +# Detect container IP +msg_info "Detecting IP address" IP="" -while [[ $attempt -le $max_attempts ]]; do - IP=$(pct exec $CTID ip a show dev eth0 | grep -oP 'inet \K[^/]+') - if [[ -n $IP ]]; then +for attempt in $(seq 1 5); do + IP=$(pct exec "$CTID" -- ip -4 a show dev eth0 2>/dev/null | grep -oP 'inet \K[^/]+' || true) + if [[ -n "$IP" ]]; then break - else - warn "Attempt $attempt: IP address not found. Pausing for 5 seconds..." - sleep 5 - ((attempt++)) fi + [[ $attempt -lt 5 ]] && sleep 5 done -if [[ -z $IP ]]; then - warn "Maximum number of attempts reached. IP address not found." +if [[ -z "$IP" ]]; then + msg_warn "IP address not found after 5 attempts" IP="NOT FOUND" +else + msg_ok "IP address: ${BL}${IP}${CL}" fi -# Start Proxmox VE Monitor-All if available -if [[ -f /etc/systemd/system/ping-instances.service ]]; then - systemctl start ping-instances.service -fi +# Report success to telemetry +post_update_to_api "done" "none" -# Success message +# Success summary header_info echo -info "LXC container '$CTID' was successfully created, and its IP address is ${IP}." +msg_ok "TurnKey ${BL}${turnkey}${CL} LXC container '${BL}${CTID}${CL}' was successfully created." echo -info "Proceed to the LXC console to complete the setup." +echo -e " ${TAB}${YW}IP Address:${CL} ${BL}${IP}${CL}" +echo -e " ${TAB}${YW}Login:${CL} ${GN}root${CL}" +echo -e " ${TAB}${YW}Password:${CL} ${GN}${PASS}${CL}" echo -info "login: root" -info "password: $PASS" -info "(credentials also stored in the root user's root directory in the 'turnkey-${turnkey}.creds' file.)" +echo -e " ${TAB}Proceed to the LXC console to complete the TurnKey setup." +echo -e " ${TAB}Credentials stored in: ${BL}~/turnkey-${turnkey}.creds${CL}" echo diff --git a/vm/archlinux-vm.sh b/vm/archlinux-vm.sh index dc681a694..0a5974832 100644 --- a/vm/archlinux-vm.sh +++ b/vm/archlinux-vm.sh @@ -22,7 +22,7 @@ echo -e "\n Loading..." GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" METHOD="" -NSAPP="arch-linux-vm" +NSAPP="archlinux-vm" var_os="arch-linux" var_version="n.d." @@ -310,7 +310,10 @@ function advanced_settings() { HN="arch-linux" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo "${VM_NAME,,}" | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -332,27 +335,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z "$CORE_COUNT" ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z "$RAM_SIZE" ]; then - RAM_SIZE="2048" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="2048"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 2048)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z "$BRG" ]; then @@ -365,43 +372,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 "$GEN_MAC" --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z "$MAC1" ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 "$GEN_MAC" --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z "$VLAN1" ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z "$MTU1" ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" @@ -526,7 +551,7 @@ qm set $VMID \ DESCRIPTION=$( cat < - + Logo diff --git a/vm/debian-13-vm.sh b/vm/debian-13-vm.sh index 18cb1a681..26d4fdfc3 100644 --- a/vm/debian-13-vm.sh +++ b/vm/debian-13-vm.sh @@ -22,7 +22,7 @@ echo -e "\n Loading..." GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" METHOD="" -NSAPP="debian13vm" +NSAPP="debian-13-vm" var_os="debian" var_version="13" @@ -325,7 +325,10 @@ function advanced_settings() { HN="debian" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo "${VM_NAME,,}" | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -347,27 +350,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z "$CORE_COUNT" ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z "$RAM_SIZE" ]; then - RAM_SIZE="2048" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="2048"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 2048)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z "$BRG" ]; then @@ -380,43 +387,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 "$GEN_MAC" --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z "$MAC1" ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 "$GEN_MAC" --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z "$VLAN1" ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z "$MTU1" ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done select_cloud_init @@ -606,7 +631,7 @@ rm -f "$WORK_FILE" DESCRIPTION=$( cat < - + Logo diff --git a/vm/debian-vm.sh b/vm/debian-vm.sh index 063057a78..2bcedc717 100644 --- a/vm/debian-vm.sh +++ b/vm/debian-vm.sh @@ -22,7 +22,7 @@ echo -e "\n Loading..." GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" METHOD="" -NSAPP="debian12vm" +NSAPP="debian-vm" var_os="debian" var_version="12" @@ -315,7 +315,10 @@ function advanced_settings() { HN="debian" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -337,27 +340,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="2048" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="2048"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 2048)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z $BRG ]; then @@ -370,43 +377,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "CLOUD-INIT" --yesno "Configure the VM with Cloud-init?" --defaultno 10 58); then echo -e "${CLOUD}${BOLD}${DGN}Configure Cloud-init: ${BGN}yes${CL}" @@ -543,7 +568,7 @@ fi DESCRIPTION=$( cat < - + Logo diff --git a/vm/docker-vm.sh b/vm/docker-vm.sh index 42fb22091..194470a40 100644 --- a/vm/docker-vm.sh +++ b/vm/docker-vm.sh @@ -251,7 +251,10 @@ function advanced_settings() { if [ -z $VM_NAME ]; then HN="docker" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else @@ -275,24 +278,32 @@ function advanced_settings() { fi # CPU Cores - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 + else + exit_script fi - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" - else - exit_script - fi + done # RAM Size - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 4096 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="4096" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 4096 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="4096"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 4096)." 8 58 + else + exit_script fi - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" - else - exit_script - fi + done # Bridge if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then @@ -305,42 +316,63 @@ function advanced_settings() { fi # MAC Address - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" + exit_script fi - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" - else - exit_script - fi + done # VLAN - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan (leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan (leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" + exit_script fi - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" - else - exit_script - fi + done # MTU - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" + exit_script fi - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" - else - exit_script - fi + done # Start VM if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then diff --git a/vm/haos-vm.sh b/vm/haos-vm.sh index f0d58afe8..24f9a9d9b 100644 --- a/vm/haos-vm.sh +++ b/vm/haos-vm.sh @@ -23,7 +23,7 @@ GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1: RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" VERSIONS=(stable beta dev) METHOD="" -NSAPP="homeassistant-os" +NSAPP="haos-vm" var_os="homeassistant" DISK_SIZE="32G" @@ -391,7 +391,10 @@ function advanced_settings() { HN="haos${BRANCH}" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -416,27 +419,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="4096" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="4096"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 4096)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z $BRG ]; then @@ -449,43 +456,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then echo -e "${DGN}Start VM when completed: ${BGN}yes${CL}" @@ -614,7 +639,7 @@ msg_ok "Resized disk" DESCRIPTION=$( cat < - + Logo diff --git a/vm/mikrotik-routeros.sh b/vm/mikrotik-routeros.sh index eb0532d6a..8b62ee30b 100644 --- a/vm/mikrotik-routeros.sh +++ b/vm/mikrotik-routeros.sh @@ -23,7 +23,7 @@ echo -e "Loading..." GEN_MAC=$(echo '00 60 2f'$(od -An -N3 -t xC /dev/urandom) | sed -e 's/ /:/g' | tr '[:lower:]' '[:upper:]') RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" METHOD="" -NSAPP="mikrotik-router-os" +NSAPP="mikrotik-routeros" var_os="mikrotik" var_version=" " DISK_SIZE="1G" @@ -377,7 +377,10 @@ function advanced_settings() { HN="mikrotik-routeros-chr" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -399,27 +402,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="2048" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="2048"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 2048)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z $BRG ]; then @@ -432,43 +439,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" @@ -597,7 +622,7 @@ qm set $VMID \ DESCRIPTION=$( cat < - + Logo diff --git a/vm/nextcloud-vm.sh b/vm/nextcloud-vm.sh index c17b299be..064a613e4 100644 --- a/vm/nextcloud-vm.sh +++ b/vm/nextcloud-vm.sh @@ -22,7 +22,7 @@ echo -e "\n Loading..." GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" METHOD="" -NSAPP="turnkey-nextcloud" +NSAPP="nextcloud-vm" var_os="turnkey-nextcloud" var_version="n.d." @@ -313,7 +313,10 @@ function advanced_settings() { HN="nextcloud-vm" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -335,27 +338,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="2048" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="2048"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 2048)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z $BRG ]; then @@ -368,43 +375,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" @@ -521,7 +546,7 @@ qm set $VMID \ DESCRIPTION=$( cat < - + Logo diff --git a/vm/openwrt-vm.sh b/vm/openwrt-vm.sh index c7e030245..8a8e39cb0 100644 --- a/vm/openwrt-vm.sh +++ b/vm/openwrt-vm.sh @@ -328,30 +328,41 @@ function advanced_settings() { if [ -z $VM_NAME ]; then HN="openwrt" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi fi echo -e "${DGN}Using Hostname: ${BGN}$HN${CL}" else exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 1 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="1" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 1 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="1"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${DGN}Allocated Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 1)." 8 58 + else + exit-script fi - echo -e "${DGN}Allocated Cores: ${BGN}$CORE_COUNT${CL}" - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 256 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="256" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 256 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="256"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${DGN}Allocated RAM: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 256)." 8 58 + else + exit-script fi - echo -e "${DGN}Allocated RAM: ${BGN}$RAM_SIZE${CL}" - else - exit-script - fi + done if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" \ --inputbox "Set Disk Size in GiB (e.g., 1, 2, 4)" 8 58 "1" \ @@ -422,41 +433,62 @@ function advanced_settings() { exit-script fi - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a WAN Vlan (leave blank for default)" 8 58 --title "WAN VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a WAN Vlan (leave blank for default)" 8 58 --title "WAN VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${DGN}Using WAN Vlan: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${DGN}Using WAN Vlan: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" + exit-script fi - echo -e "${DGN}Using WAN Vlan: ${BGN}$VLAN1${CL}" - else - exit-script - fi + done - if VLAN2=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a LAN Vlan" 8 58 999 --title "LAN VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN2 ]; then - VLAN2="Default" - LAN_VLAN="" + while true; do + if VLAN2=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a LAN Vlan" 8 58 999 --title "LAN VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN2" ]; then + VLAN2="Default" + LAN_VLAN="" + echo -e "${DGN}Using LAN Vlan: ${BGN}$VLAN2${CL}" + break + fi + if [[ "$VLAN2" =~ ^[0-9]+$ ]] && [ "$VLAN2" -ge 1 ] && [ "$VLAN2" -le 4094 ]; then + LAN_VLAN=",tag=$VLAN2" + echo -e "${DGN}Using LAN Vlan: ${BGN}$VLAN2${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - LAN_VLAN=",tag=$VLAN2" + exit-script fi - echo -e "${DGN}Using LAN Vlan: ${BGN}$VLAN2${CL}" - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DGN}Using Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DGN}Using Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" + exit-script fi - echo -e "${DGN}Using Interface MTU Size: ${BGN}$MTU1${CL}" - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then START_VM="yes" @@ -573,7 +605,7 @@ msg_ok "Resized disk to ${DISK_SIZE}" DESCRIPTION=$( cat < - + Logo @@ -631,7 +663,7 @@ if qm status "$VMID" | grep -q "running"; then send_line_to_vm "uci set network.lan.ipaddr=${LAN_IP_ADDR}" send_line_to_vm "uci set network.lan.netmask=${LAN_NETMASK}" send_line_to_vm "uci commit" - send_line_to_vm "halt" + send_line_to_vm "poweroff" msg_ok "Network interfaces configured in OpenWrt" else msg_error "VM is not running" diff --git a/vm/opnsense-vm.sh b/vm/opnsense-vm.sh index 71ec65f41..b57c82cfd 100644 --- a/vm/opnsense-vm.sh +++ b/vm/opnsense-vm.sh @@ -24,7 +24,7 @@ RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" METHOD="" NSAPP="opnsense-vm" var_os="opnsense" -var_version="25.7" +var_version="26.1" # GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') GEN_MAC_LAN=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') @@ -317,7 +317,7 @@ function default_settings() { # Determine available network modes based on bridge count local DEFAULT_WAN_BRG - DEFAULT_WAN_BRG=$(echo "$AVAILABLE_BRIDGES" | grep -v "^${BRG}$" | head -n1) + DEFAULT_WAN_BRG=$(echo "$AVAILABLE_BRIDGES" | grep -v "^${BRG}$" | head -n1 || true) if [ "$BRIDGE_COUNT" -ge 2 ]; then # Multiple bridges available - offer dual or single mode @@ -421,30 +421,41 @@ function advanced_settings() { if [ -z "$VM_NAME" ]; then HN="OPNsense" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi fi echo -e "${DGN}Using Hostname: ${BGN}$HN${CL}" else exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 4 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z "$CORE_COUNT" ]; then - CORE_COUNT="2" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 4 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="4"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${DGN}Allocated Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 4)." 8 58 + else + exit-script fi - echo -e "${DGN}Allocated Cores: ${BGN}$CORE_COUNT${CL}" - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 8192 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="8192" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 8192 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="8192"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${DGN}Allocated RAM: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 8192)." 8 58 + else + exit-script fi - echo -e "${DGN}Allocated RAM: ${BGN}$RAM_SIZE${CL}" - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a LAN Bridge" 8 58 vmbr0 --title "LAN BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z $BRG ]; then @@ -498,7 +509,7 @@ function advanced_settings() { # Build WAN bridge selection from available bridges (excluding LAN bridge) local WAN_BRIDGES - WAN_BRIDGES=$(get_available_bridges | grep -v "^${BRG}$") + WAN_BRIDGES=$(get_available_bridges | grep -v "^${BRG}$" || true) if [ -z "$WAN_BRIDGES" ]; then msg_error "No additional bridge available for WAN. Only '${BRG}' exists." msg_error "Create a second bridge (e.g. vmbr1) in Proxmox network config first." @@ -726,9 +737,26 @@ done msg_info "Creating a OPNsense VM" qm create $VMID -agent 1${MACHINE} -tablet 0 -localtime 1 -bios ovmf${CPU_TYPE} -cores $CORE_COUNT -memory $RAM_SIZE \ - -name $HN -tags proxmox-helper-scripts -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci -pvesm alloc $STORAGE $VMID $DISK0 4M 1>&/dev/null -qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} 1>&/dev/null + -name $HN -tags community-script -net0 virtio,bridge=$BRG,macaddr=$MAC$VLAN$MTU -onboot 1 -ostype l26 -scsihw virtio-scsi-pci + +# Retry pvesm alloc on transient zfs_request "got timeout" errors (#14127) +alloc_attempt=1 +alloc_max=4 +alloc_delay=5 +while :; do + alloc_err=$(pvesm alloc $STORAGE $VMID $DISK0 4M 2>&1 >/dev/null) && break + if [[ "$alloc_err" == *"got timeout"* && $alloc_attempt -lt $alloc_max ]]; then + msg_warn "pvesm alloc hit zfs timeout (attempt $alloc_attempt/$alloc_max), retrying in ${alloc_delay}s..." + pvesm free "${DISK0_REF}" &>/dev/null || true + sleep "$alloc_delay" + alloc_attempt=$((alloc_attempt + 1)) + alloc_delay=$((alloc_delay * 2)) + continue + fi + echo -e "$alloc_err" >&2 + exit 220 +done +qm importdisk $VMID ${FILE} $STORAGE ${DISK_IMPORT:-} &>/dev/null qm set $VMID \ -efidisk0 ${DISK0_REF}${FORMAT} \ -scsi0 ${DISK1_REF},${DISK_CACHE}${THIN}size=2G \ @@ -739,7 +767,7 @@ qm resize $VMID scsi0 20G >/dev/null DESCRIPTION=$( cat < - + Logo @@ -786,7 +814,7 @@ if [ -n "$WAN_BRG" ]; then msg_ok "WAN interface added" sleep 5 # Brief pause after adding network interface fi -send_line_to_vm "sh ./opnsense-bootstrap.sh.in -y -f -r 25.7" +send_line_to_vm "sh ./opnsense-bootstrap.sh.in -y -f -r 26.1" msg_ok "OPNsense VM is being installed, do not close the terminal, or the installation will fail." #We need to wait for the OPNsense build proccess to finish, this takes a few minutes sleep 1000 diff --git a/vm/owncloud-vm.sh b/vm/owncloud-vm.sh index 5efe13ca4..30252c83c 100644 --- a/vm/owncloud-vm.sh +++ b/vm/owncloud-vm.sh @@ -22,7 +22,7 @@ echo -e "\n Loading..." GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" METHOD="" -NSAPP="turnkey-owncloud-vm" +NSAPP="owncloud-vm" var_os="owncloud" var_version="18.0" APP="TurnKey ownCloud VM" @@ -314,7 +314,10 @@ function advanced_settings() { HN="owncloud-vm" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -336,27 +339,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="2048" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="2048"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 2048)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z $BRG ]; then @@ -369,43 +376,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" @@ -535,7 +560,7 @@ qm set $VMID \ DESCRIPTION=$( cat < - + Logo diff --git a/vm/pimox-haos-vm.sh b/vm/pimox-haos-vm.sh index a3bbba24d..026568b09 100644 --- a/vm/pimox-haos-vm.sh +++ b/vm/pimox-haos-vm.sh @@ -274,26 +274,35 @@ function advanced_settings() { echo -e "${DGN}Using Hostname: ${BGN}$HN${CL}" else if [ $exitstatus = 0 ]; then - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${DGN}Using Hostname: ${BGN}$HN${CL}" fi fi - CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3) - exitstatus=$? - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" - echo -e "${DGN}Allocated Cores: ${BGN}$CORE_COUNT${CL}" - else - if [ $exitstatus = 0 ]; then echo -e "${DGN}Allocated Cores: ${BGN}$CORE_COUNT${CL}"; fi - fi - RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 4096 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3) - exitstatus=$? - if [ -z $RAM_SIZE ]; then - RAM_SIZE="4096" - echo -e "${DGN}Allocated RAM: ${BGN}$RAM_SIZE${CL}" - else - if [ $exitstatus = 0 ]; then echo -e "${DGN}Allocated RAM: ${BGN}$RAM_SIZE${CL}"; fi - fi + while true; do + CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3) + exitstatus=$? + if [ $exitstatus -ne 0 ]; then exit-script; fi + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${DGN}Allocated Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 + done + while true; do + RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 4096 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3) + exitstatus=$? + if [ $exitstatus -ne 0 ]; then exit-script; fi + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="4096"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${DGN}Allocated RAM: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 4096)." 8 58 + done BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3) exitstatus=$? if [ -z $BRG ]; then @@ -302,39 +311,56 @@ function advanced_settings() { else if [ $exitstatus = 0 ]; then echo -e "${DGN}Using Bridge: ${BGN}$BRG${CL}"; fi fi - MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3) - exitstatus=$? - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" - echo -e "${DGN}Using MAC Address: ${BGN}$MAC${CL}" - else - if [ $exitstatus = 0 ]; then + while true; do + MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3) + exitstatus=$? + if [ $exitstatus -ne 0 ]; then exit-script; fi + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${DGN}Using MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then MAC="$MAC1" echo -e "${DGN}Using MAC Address: ${BGN}$MAC1${CL}" + break fi - fi - VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3) - exitstatus=$? - if [ $exitstatus = 0 ]; then - if [ -z $VLAN1 ]; then - VLAN1="Default" VLAN="" + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 + done + while true; do + VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3) + exitstatus=$? + if [ $exitstatus -ne 0 ]; then exit-script; fi + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" echo -e "${DGN}Using Vlan: ${BGN}$VLAN1${CL}" - else + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then VLAN=",tag=$VLAN1" echo -e "${DGN}Using Vlan: ${BGN}$VLAN1${CL}" + break fi - fi - MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3) - exitstatus=$? - if [ $exitstatus = 0 ]; then - if [ -z $MTU1 ]; then - MTU1="Default" MTU="" + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 + done + while true; do + MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3) + exitstatus=$? + if [ $exitstatus -ne 0 ]; then exit-script; fi + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" echo -e "${DGN}Using Interface MTU Size: ${BGN}$MTU1${CL}" - else + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then MTU=",mtu=$MTU1" echo -e "${DGN}Using Interface MTU Size: ${BGN}$MTU1${CL}" + break fi - fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then echo -e "${DGN}Start VM when completed: ${BGN}yes${CL}" START_VM="yes" @@ -436,7 +462,7 @@ qm set $VMID \ DESCRIPTION=$( cat < - + Logo diff --git a/vm/truenas-vm.sh b/vm/truenas-vm.sh index 6066a29d6..75e3c9d79 100644 --- a/vm/truenas-vm.sh +++ b/vm/truenas-vm.sh @@ -23,6 +23,7 @@ echo -e "\n Loading..." GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" METHOD="" +NSAPP="truenas-vm" YW=$(echo "\033[33m") BL=$(echo "\033[36m") @@ -354,7 +355,10 @@ function advanced_settings() { if [ -z $VM_NAME ]; then echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -379,27 +383,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 "$CORE_COUNT" --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 "$CORE_COUNT" --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 "$RAM_SIZE" --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="8192" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 "$RAM_SIZE" --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="8192"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 8192)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 "$BRG" --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z $BRG ]; then @@ -412,43 +420,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "IMPORT ONBOARD DISKS" --yesno "Would you like to import onboard disks?" 10 58); then echo -e "${DISK}${BOLD}${DGN}Import onboard disks: ${BGN}yes${CL}" @@ -584,7 +610,7 @@ fi DESCRIPTION=$( cat < - + Logo diff --git a/vm/ubuntu2204-vm.sh b/vm/ubuntu2204-vm.sh index b118863ec..4b794a97b 100644 --- a/vm/ubuntu2204-vm.sh +++ b/vm/ubuntu2204-vm.sh @@ -22,7 +22,7 @@ echo -e "\n Loading..." GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" METHOD="" -NSAPP="ubuntu-2204-vm" +NSAPP="ubuntu2204-vm" var_os="ubuntu" var_version="2204" @@ -310,7 +310,10 @@ function advanced_settings() { HN="ubuntu" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -332,27 +335,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="2048" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="2048"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 2048)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z $BRG ]; then @@ -365,43 +372,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" @@ -517,7 +542,7 @@ qm set $VMID \ DESCRIPTION=$( cat < - + Logo diff --git a/vm/ubuntu2404-vm.sh b/vm/ubuntu2404-vm.sh index 25e5c2ea9..fcb081273 100644 --- a/vm/ubuntu2404-vm.sh +++ b/vm/ubuntu2404-vm.sh @@ -23,7 +23,7 @@ echo -e "\n Loading..." GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" METHOD="" -NSAPP="ubuntu-2404-vm" +NSAPP="ubuntu2404-vm" var_os="ubuntu" var_version="2404" @@ -313,7 +313,10 @@ function advanced_settings() { HN="ubuntu" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -335,27 +338,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="2048" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="2048"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 2048)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z $BRG ]; then @@ -368,43 +375,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" @@ -519,7 +544,7 @@ qm set $VMID \ DESCRIPTION=$( cat < - + Logo diff --git a/vm/ubuntu2504-vm.sh b/vm/ubuntu2504-vm.sh index 7b0176f69..8106cc393 100644 --- a/vm/ubuntu2504-vm.sh +++ b/vm/ubuntu2504-vm.sh @@ -22,7 +22,7 @@ echo -e "\n Loading..." GEN_MAC=02:$(openssl rand -hex 5 | awk '{print toupper($0)}' | sed 's/\(..\)/\1:/g; s/.$//') RANDOM_UUID="$(cat /proc/sys/kernel/random/uuid)" METHOD="" -NSAPP="ubuntu-2504-vm" +NSAPP="ubuntu2504-vm" var_os="ubuntu" var_version="2504" @@ -312,7 +312,10 @@ function advanced_settings() { HN="ubuntu" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -334,27 +337,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="2048" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="2048"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 2048)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z $BRG ]; then @@ -367,43 +374,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" @@ -469,7 +494,7 @@ fi msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location." msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}." msg_info "Retrieving the URL for the Ubuntu 25.04 Disk Image" -URL=https://cloud-images.ubuntu.com/plucky/current/plucky-server-cloudimg-amd64.img +URL=https://cloud-images.ubuntu.com/releases/server/plucky/release/ubuntu-25.04-server-cloudimg-amd64.img sleep 2 msg_ok "${CL}${BL}${URL}${CL}" curl -f#SL -o "$(basename "$URL")" "$URL" @@ -518,7 +543,7 @@ qm set $VMID \ DESCRIPTION=$( cat < - + Logo diff --git a/vm/umbrel-os-vm.sh b/vm/umbrel-os-vm.sh index af9bc41dc..44e1ff2be 100644 --- a/vm/umbrel-os-vm.sh +++ b/vm/umbrel-os-vm.sh @@ -373,7 +373,10 @@ function advanced_settings() { HN="umbrelos" echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" else - HN=$(echo ${VM_NAME,,} | tr -d ' ') + HN=$(echo "${VM_NAME,,}" | tr -cs 'a-z0-9-' '-' | sed 's/^-//;s/-$//') + if [ "$HN" != "${VM_NAME,,}" ]; then + whiptail --backtitle "Proxmox VE Helper Scripts" --title "HOSTNAME ADJUSTED" --msgbox "Invalid characters detected. Hostname has been adjusted to:\n\n $HN" 10 58 + fi echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}" fi else @@ -398,27 +401,31 @@ function advanced_settings() { exit-script fi - if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $CORE_COUNT ]; then - CORE_COUNT="2" - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + while true; do + if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 2 --title "CORE COUNT" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$CORE_COUNT" ]; then CORE_COUNT="2"; fi + if [[ "$CORE_COUNT" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "CPU Cores must be a positive integer (e.g., 2)." 8 58 else - echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}" + exit-script fi - else - exit-script - fi + done - if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $RAM_SIZE ]; then - RAM_SIZE="2048" - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + while true; do + if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 2048 --title "RAM" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$RAM_SIZE" ]; then RAM_SIZE="2048"; fi + if [[ "$RAM_SIZE" =~ ^[1-9][0-9]*$ ]]; then + echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "RAM Size must be a positive integer in MiB (e.g., 2048)." 8 58 else - echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}$RAM_SIZE${CL}" + exit-script fi - else - exit-script - fi + done if BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Bridge" 8 58 vmbr0 --title "BRIDGE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then if [ -z $BRG ]; then @@ -431,43 +438,61 @@ function advanced_settings() { exit-script fi - if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MAC1 ]; then - MAC="$GEN_MAC" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + while true; do + if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address" 8 58 $GEN_MAC --title "MAC ADDRESS" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MAC1" ]; then + MAC="$GEN_MAC" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC${CL}" + break + fi + if [[ "$MAC1" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then + MAC="$MAC1" + echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "Invalid MAC address format. Use XX:XX:XX:XX:XX:XX (e.g., AA:BB:CC:DD:EE:FF)." 8 58 else - MAC="$MAC1" - echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}" + exit-script fi - else - exit-script - fi + done - if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $VLAN1 ]; then - VLAN1="Default" - VLAN="" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + while true; do + if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for default)" 8 58 --title "VLAN" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$VLAN1" ]; then + VLAN1="Default" + VLAN="" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + if [[ "$VLAN1" =~ ^[0-9]+$ ]] && [ "$VLAN1" -ge 1 ] && [ "$VLAN1" -le 4094 ]; then + VLAN=",tag=$VLAN1" + echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "VLAN must be a number between 1 and 4094, or leave blank for default." 8 58 else - VLAN=",tag=$VLAN1" - echo -e "${VLANTAG}${BOLD}${DGN}VLAN: ${BGN}$VLAN1${CL}" + exit-script fi - else - exit-script - fi + done - if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then - if [ -z $MTU1 ]; then - MTU1="Default" - MTU="" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + while true; do + if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default)" 8 58 --title "MTU SIZE" --cancel-button Exit-Script 3>&1 1>&2 2>&3); then + if [ -z "$MTU1" ]; then + MTU1="Default" + MTU="" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + if [[ "$MTU1" =~ ^[0-9]+$ ]] && [ "$MTU1" -ge 576 ] && [ "$MTU1" -le 65520 ]; then + MTU=",mtu=$MTU1" + echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + break + fi + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID INPUT" --msgbox "MTU Size must be a number between 576 and 65520, or leave blank for default." 8 58 else - MTU=",mtu=$MTU1" - echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}" + exit-script fi - else - exit-script - fi + done if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "START VIRTUAL MACHINE" --yesno "Start VM when completed?" 10 58); then echo -e "${GATEWAY}${BOLD}${DGN}Start VM when completed: ${BGN}yes${CL}" @@ -504,6 +529,7 @@ pve_check ssh_check ensure_pv start_script +post_to_api_vm msg_info "Validating Storage" STORAGE_MENU=() @@ -564,7 +590,7 @@ qm resize $VMID scsi0 ${DISK_SIZE} >/dev/null DESCRIPTION=$( cat < - + Logo