Compare commits

..

1 Commits

Author SHA1 Message Date
CanbiZ (MickLesk)
e3446b6367 fix(homepage): preserve config directory during updates
Fixes #10985 - The update script was deleting user config files due to
CLEAN_INSTALL=1 flag. Now backs up and restores both .env and config/
directory to preserve user configurations.
2026-01-20 13:06:52 +01:00
40 changed files with 600 additions and 1804 deletions

View File

@@ -10,78 +10,8 @@
> [!CAUTION]
Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit the project's popularity for potentially malicious purposes.
## 2026-01-23
## 2026-01-22
### 🆕 New Scripts
- Loki | Alpine-Loki ([#11048](https://github.com/community-scripts/ProxmoxVE/pull/11048))
### 🚀 Updated Scripts
- Immich: Increase RAM to 6GB [@vhsdream](https://github.com/vhsdream) ([#10965](https://github.com/community-scripts/ProxmoxVE/pull/10965))
- #### 🐞 Bug Fixes
- Jotty: Increase default disk size from 6 to 8 [@tremor021](https://github.com/tremor021) ([#11056](https://github.com/community-scripts/ProxmoxVE/pull/11056))
- Fix tags in several scripts [@s4dmach1ne](https://github.com/s4dmach1ne) ([#11050](https://github.com/community-scripts/ProxmoxVE/pull/11050))
### 💾 Core
- #### ✨ New Features
- tools: use distro packages for MariaDB by default [@MickLesk](https://github.com/MickLesk) ([#11049](https://github.com/community-scripts/ProxmoxVE/pull/11049))
## 2026-01-21
### 🆕 New Scripts
- Byparr ([#11039](https://github.com/community-scripts/ProxmoxVE/pull/11039))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- fix: Snipe-IT update missing all user uploads (#11032) [@ruanmed](https://github.com/ruanmed) ([#11033](https://github.com/community-scripts/ProxmoxVE/pull/11033))
- yubal: fix for v0.2 [@CrazyWolf13](https://github.com/CrazyWolf13) ([#11006](https://github.com/community-scripts/ProxmoxVE/pull/11006))
- Joplin-Server: use yarn workspaces focus for faster builds [@MickLesk](https://github.com/MickLesk) ([#11027](https://github.com/community-scripts/ProxmoxVE/pull/11027))
### 💾 Core
- #### ✨ New Features
- tools: add ubuntu PHP repository setup [@MickLesk](https://github.com/MickLesk) ([#11034](https://github.com/community-scripts/ProxmoxVE/pull/11034))
- #### 🔧 Refactor
- core: allow empty tags & improve template search [@MickLesk](https://github.com/MickLesk) ([#11020](https://github.com/community-scripts/ProxmoxVE/pull/11020))
### 🌐 Website
- #### 📝 Script Information
- Joplin Server: Set disable flag to true in joplin-server.json [@tremor021](https://github.com/tremor021) ([#11008](https://github.com/community-scripts/ProxmoxVE/pull/11008))
## 2026-01-20
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- dolibarr: switch mirror [@MickLesk](https://github.com/MickLesk) ([#11004](https://github.com/community-scripts/ProxmoxVE/pull/11004))
- checkmk: reordner base function [@MickLesk](https://github.com/MickLesk) ([#10990](https://github.com/community-scripts/ProxmoxVE/pull/10990))
- Homepage: preserve config directory during updates [@MickLesk](https://github.com/MickLesk) ([#10993](https://github.com/community-scripts/ProxmoxVE/pull/10993))
- DiscoPanel: add go for update build process [@miausalvaje](https://github.com/miausalvaje) ([#10991](https://github.com/community-scripts/ProxmoxVE/pull/10991))
### 💾 Core
- #### ✨ New Features
- core: add retry logic for template lock in LXC container creation [@MickLesk](https://github.com/MickLesk) ([#11002](https://github.com/community-scripts/ProxmoxVE/pull/11002))
- core: implement ensure_profile_loaded function [@MickLesk](https://github.com/MickLesk) ([#10999](https://github.com/community-scripts/ProxmoxVE/pull/10999))
- core: add input validations for several functions [@MickLesk](https://github.com/MickLesk) ([#10995](https://github.com/community-scripts/ProxmoxVE/pull/10995))
## 2026-01-19
### 🆕 New Scripts

View File

@@ -6,7 +6,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
# Source: https://komo.do
APP="Alpine-Komodo"
var_tags="${var_tags:-docker;alpine}"
var_tags="${var_tags:-docker,alpine}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-1024}"
var_disk="${var_disk:-10}"

View File

@@ -1,71 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: hoholms
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/grafana/loki
APP="Alpine-Loki"
var_tags="${var_tags:-alpine;monitoring}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-256}"
var_disk="${var_disk:-1}"
var_os="${var_os:-alpine}"
var_version="${var_version:-3.23}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
if ! apk -e info newt >/dev/null 2>&1; then
apk add -q newt
fi
LXCIP=$(ip a s dev eth0 | awk '/inet / {print $2}' | cut -d/ -f1)
while true; do
CHOICE=$(
whiptail --backtitle "Proxmox VE Helper Scripts" --title "SUPPORT" --menu "Select option" 11 58 3 \
"1" "Check for Loki Updates" \
"2" "Allow 0.0.0.0 for listening" \
"3" "Allow only ${LXCIP} for listening" 3>&2 2>&1 1>&3
)
exit_status=$?
if [ $exit_status == 1 ]; then
clear
exit-script
fi
header_info
case $CHOICE in
1)
$STD apk -U upgrade
msg_ok "Updated successfully!"
exit
;;
2)
sed -i -e "s/cfg:server.http_addr=.*/cfg:server.http_addr=0.0.0.0/g" /etc/conf.d/loki
service loki restart
msg_ok "Allowed listening on all interfaces!"
exit
;;
3)
sed -i -e "s/cfg:server.http_addr=.*/cfg:server.http_addr=$LXCIP/g" /etc/conf.d/loki
service loki restart
msg_ok "Allowed listening only on ${LXCIP}!"
exit
;;
esac
done
exit 0
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${APP} should be reachable by going to the following URL.
${BL}http://${IP}:3100${CL} \n"
echo -e "Promtail should be reachable by going to the following URL.
${BL}http://${IP}:9080${CL} \n"

View File

@@ -1,53 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: luismco
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/ThePhaseless/Byparr
APP="Byparr"
var_tags="${var_tags:-proxy}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-4}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/Byparr ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "Byparr" "ThePhaseless/Byparr"; then
msg_info "Stopping Service"
systemctl stop byparr
msg_ok "Stopped Service"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "Byparr" "ThePhaseless/Byparr" "tarball" "latest"
msg_info "Starting Service"
systemctl start byparr
msg_ok "Started Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8191${CL}"

View File

@@ -54,14 +54,9 @@ function update_script() {
cd /opt/discopanel/web/discopanel
$STD npm install
$STD npm run build
msg_ok "Built Web Interface"
setup_go
msg_info "Building DiscoPanel"
cd /opt/discopanel
$STD go build -o discopanel cmd/discopanel/main.go
msg_ok "Built DiscoPanel"
msg_ok "Setup DiscoPanel"
msg_info "Restoring Data"
mkdir -p /opt/discopanel/data

View File

@@ -6,7 +6,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
# Source: https://fluidcalendar.com
APP="fluid-calendar"
var_tags="${var_tags:-calendar;tasks}"
var_tags="${var_tags:-calendar,tasks}"
var_cpu="${var_cpu:-3}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-7}"

View File

@@ -1,6 +0,0 @@
___ __ _ __ __ _
/ | / /___ (_)___ ___ / / ____ / /__(_)
/ /| | / / __ \/ / __ \/ _ \______/ / / __ \/ //_/ /
/ ___ |/ / /_/ / / / / / __/_____/ /___/ /_/ / ,< / /
/_/ |_/_/ .___/_/_/ /_/\___/ /_____/\____/_/|_/_/
/_/

View File

@@ -1,6 +0,0 @@
____
/ __ )__ ______ ____ ___________
/ __ / / / / __ \/ __ `/ ___/ ___/
/ /_/ / /_/ / /_/ / /_/ / / / /
/_____/\__, / .___/\__,_/_/ /_/
/____/_/

View File

@@ -1,6 +0,0 @@
__ __ _
/ / ____ / /__(_)
/ / / __ \/ //_/ /
/ /___/ /_/ / ,< / /
/_____/\____/_/|_/_/

View File

@@ -9,7 +9,7 @@ APP="immich"
var_tags="${var_tags:-photos}"
var_disk="${var_disk:-20}"
var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-6144}"
var_ram="${var_ram:-4096}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"

View File

@@ -44,9 +44,7 @@ function update_script() {
sed -i "/onenote-converter/d" packages/lib/package.json
$STD yarn config set --home enableTelemetry 0
export BUILD_SEQUENCIAL=1
$STD yarn workspaces focus @joplin/server
$STD yarn workspaces foreach -R --topological-dev --from @joplin/server run build
$STD yarn workspaces foreach -R --topological-dev --from @joplin/server run tsc
$STD yarn install --inline-builds
msg_ok "Updated Joplin-Server"
msg_info "Starting Services"

View File

@@ -9,7 +9,7 @@ APP="jotty"
var_tags="${var_tags:-tasks;notes}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-8}"
var_disk="${var_disk:-6}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"

View File

@@ -1,103 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2025 community-scripts ORG
# Author: hoholms
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/grafana/loki
APP="Loki"
var_tags="${var_tags:-monitoring;logs}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-512}"
var_disk="${var_disk:-2}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if ! dpkg -s loki >/dev/null 2>&1; then
msg_error "No ${APP} Installation Found!"
exit 1
fi
while true; do
CHOICE=$(
whiptail --backtitle "Proxmox VE Helper Scripts" --title "SUPPORT" --menu "Select option" 11 58 3 \
"1" "Update Loki & Promtail" \
"2" "Allow 0.0.0.0 for listening" \
"3" "Allow only ${LOCAL_IP} for listening" 3>&2 2>&1 1>&3
)
exit_status=$?
if [ $exit_status == 1 ]; then
clear
exit-script
fi
header_info
case $CHOICE in
1)
msg_info "Stopping Loki"
systemctl stop loki
if systemctl is-active --quiet promtail 2>/dev/null || dpkg -s promtail >/dev/null 2>&1; then
systemctl stop promtail
fi
msg_ok "Stopped Loki"
msg_info "Updating Loki"
$STD apt update
$STD apt install -y --only-upgrade loki
if dpkg -s promtail >/dev/null 2>&1; then
$STD apt install -y --only-upgrade promtail
fi
msg_ok "Updated Loki"
msg_info "Starting Loki"
systemctl start loki
if dpkg -s promtail >/dev/null 2>&1; then
systemctl start promtail
fi
msg_ok "Started Loki"
msg_ok "Updated successfully!"
exit
;;
2)
msg_info "Configuring Loki to listen on 0.0.0.0"
sed -i 's/http_listen_address:.*/http_listen_address: 0.0.0.0/' /etc/loki/config.yml
sed -i 's/http_listen_port:.*/http_listen_port: 3100/' /etc/loki/config.yml
systemctl restart loki
msg_ok "Configured Loki to listen on 0.0.0.0"
exit
;;
3)
msg_info "Configuring Loki to listen on ${LOCAL_IP}"
sed -i "s/http_listen_address:.*/http_listen_address: $LOCAL_IP/" /etc/loki/config.yml
sed -i 's/http_listen_port:.*/http_listen_port: 3100/' /etc/loki/config.yml
systemctl restart loki
msg_ok "Configured Loki to listen on ${LOCAL_IP}"
exit
;;
esac
done
exit 0
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access loki using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3100${CL}\n"
if dpkg -s promtail >/dev/null 2>&1; then
echo -e "${INFO}${YW} Access promtail using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:9080${CL}"
fi

View File

@@ -6,7 +6,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
# Source: https://github.com/opf/openproject
APP="OpenProject"
var_tags="${var_tags:-project-management;erp}"
var_tags="${var_tags:-project-management,erp}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-4096}"
var_disk="${var_disk:-8}"

View File

@@ -6,7 +6,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
# Source: https://github.com/plankanban/planka
APP="PLANKA"
var_tags="${var_tags:-Todo;kanban}"
var_tags="${var_tags:-Todo,kanban}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-1024}"
var_disk="${var_disk:-4}"

View File

@@ -6,7 +6,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
# Source: https://github.com/rcourtman/Pulse
APP="Pulse"
var_tags="${var_tags:-monitoring;proxmox}"
var_tags="${var_tags:-monitoring,proxmox}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-1024}"
var_disk="${var_disk:-4}"

View File

@@ -50,8 +50,8 @@ function update_script() {
$STD apt update
$STD apt -y upgrade
cp /opt/snipe-it-backup/.env /opt/snipe-it/.env
cp -r /opt/snipe-it-backup/public/uploads/. /opt/snipe-it/public/uploads/
cp -r /opt/snipe-it-backup/storage/private_uploads/. /opt/snipe-it/storage/private_uploads/
cp -r /opt/snipe-it-backup/public/uploads/ /opt/snipe-it/public/uploads/
cp -r /opt/snipe-it-backup/storage/private_uploads /opt/snipe-it/storage/private_uploads
cd /opt/snipe-it/
export COMPOSER_ALLOW_SUPERUSER=1
$STD composer install --no-dev --optimize-autoloader --no-interaction

View File

@@ -6,7 +6,7 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
# Source: https://github.com/CrazyWolf13/streamlink-webui
APP="streamlink-webui"
var_tags="${var_tags:-download;streaming}"
var_tags="${var_tags:-download,streaming}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-10}"

View File

@@ -544,8 +544,7 @@ network_check
update_os
PHP_VERSION="8.4" PHP_MODULE="bcmath,curl,pdo_mysql" setup_php
setup_mariadb # Uses distribution packages (recommended)
# Or for specific version: MARIADB_VERSION="11.4" setup_mariadb
MARIADB_VERSION="11.4" setup_mariadb
# Database setup
DB_PASS=$(openssl rand -base64 18 | tr -dc 'a-zA-Z0-9' | head -c13)

View File

@@ -85,8 +85,7 @@ pkg_install curl wget git
```bash
setup_nodejs "20"
setup_php "8.3"
setup_mariadb # Uses distribution packages (recommended)
# MARIADB_VERSION="11.4" setup_mariadb # For specific version
setup_mariadb "11"
```
### Phase 4: Application Download

View File

@@ -75,8 +75,7 @@ Complete reference of environment variables and configuration options.
```bash
setup_nodejs "20" # Install Node.js v20
setup_php "8.2" # Install PHP 8.2
setup_mariadb # Install MariaDB (distribution packages)
# MARIADB_VERSION="11.4" setup_mariadb # Specific version from official repo
setup_mariadb "11" # Install MariaDB 11
```
### Safe Package Operations

View File

@@ -24,7 +24,7 @@ Complete alphabetical reference of all functions in tools.func with parameters,
- `setup_golang(VERSION)` - Install Go programming language
**Databases**:
- `setup_mariadb()` - Install MariaDB server (distro packages by default)
- `setup_mariadb(VERSION)` - Install MariaDB server
- `setup_postgresql(VERSION)` - Install PostgreSQL
- `setup_mongodb(VERSION)` - Install MongoDB
- `setup_redis(VERSION)` - Install Redis cache
@@ -238,20 +238,17 @@ setup_php "8.3"
---
### setup_mariadb()
### setup_mariadb(VERSION)
Install MariaDB server and client utilities.
**Signature**:
```bash
setup_mariadb # Uses distribution packages (recommended)
MARIADB_VERSION="11.4" setup_mariadb # Uses official MariaDB repository
setup_mariadb VERSION
```
**Variables**:
- `MARIADB_VERSION` - (optional) Specific MariaDB version
- Not set or `"latest"`: Uses distribution packages (most reliable, avoids mirror issues)
- Specific version (e.g., `"11.4"`, `"12.2"`): Uses official MariaDB repository
**Parameters**:
- `VERSION` - MariaDB version (e.g., "10.6", "11.0")
**Returns**:
- `0` - Installation successful
@@ -262,11 +259,7 @@ MARIADB_VERSION="11.4" setup_mariadb # Uses official MariaDB repository
**Example**:
```bash
# Recommended: Use distribution packages (stable, no mirror issues)
setup_mariadb
# Specific version from official repository
MARIADB_VERSION="11.4" setup_mariadb
setup_mariadb "11.0"
```
---
@@ -448,7 +441,7 @@ source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
pkg_update # Update package lists
setup_nodejs "20" # Install Node.js
setup_mariadb # Install MariaDB (distribution packages)
setup_mariadb "11" # Install MariaDB
# ... application installation ...
@@ -467,7 +460,7 @@ source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
pkg_update
setup_nginx
setup_php "8.3"
setup_mariadb # Uses distribution packages
setup_mariadb "11"
setup_composer
```

View File

@@ -65,7 +65,7 @@ pkg_update
setup_nginx
setup_php "8.3"
setup_mariadb # Uses distribution packages (recommended)
setup_mariadb "11"
setup_composer
msg_ok "Web stack installed"
@@ -388,7 +388,7 @@ pkg_install package-name
# Chain multiple tools together
setup_nodejs "20"
setup_php "8.3"
setup_mariadb # Distribution packages (recommended)
setup_mariadb "11"
# Check command success
if ! setup_docker; then

View File

@@ -1,35 +0,0 @@
{
"name": "Byparr",
"slug": "byparr",
"categories": [
14
],
"date_created": "2026-01-21",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 8191,
"documentation": "https://github.com/ThePhaseless/Byparr/blob/master/README.md",
"website": "https://github.com/ThePhaseless/Byparr",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/byparr.webp",
"config_path": "",
"description": "Byparr is a proxy server to bypass Cloudflare and DDoS-GUARD protection.",
"install_methods": [
{
"type": "default",
"script": "ct/byparr.sh",
"resources": {
"cpu": 2,
"ram": 2048,
"hdd": 4,
"os": "debian",
"version": "13"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": []
}

View File

@@ -33,11 +33,7 @@
},
"notes": [
{
"text": "After install, run: cloudflared tunnel login && cloudflared tunnel create <NAME>",
"type": "info"
},
{
"text": "Or create tunnel via Cloudflare Zero Trust Dashboard",
"text": "With an option to configure cloudflared as a DNS-over-HTTPS (DoH) proxy",
"type": "info"
}
]

View File

@@ -20,7 +20,7 @@
"script": "ct/immich.sh",
"resources": {
"cpu": 4,
"ram": 6144,
"ram": 4096,
"hdd": 20,
"os": "Debian",
"version": "13"

View File

@@ -21,7 +21,7 @@
"resources": {
"cpu": 2,
"ram": 4096,
"hdd": 8,
"hdd": 6,
"os": "debian",
"version": "13"
}

View File

@@ -1,51 +0,0 @@
{
"name": "Loki",
"slug": "loki",
"categories": [
9
],
"date_created": "2026-01-22",
"type": "ct",
"updateable": true,
"privileged": false,
"interface_port": 3100,
"documentation": "https://grafana.com/docs/loki/latest/",
"website": "https://github.com/grafana/loki",
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons@main/webp/loki.webp",
"config_path": "Debian: /etc/loki/config.yml | Alpine: /etc/loki/loki-local-config.yaml",
"description": "Grafana Loki is a set of open source components that can be composed into a fully featured logging stack. A small index and highly compressed chunks simplifies the operation and significantly lowers the cost of Loki.",
"install_methods": [
{
"type": "default",
"script": "ct/loki.sh",
"resources": {
"cpu": 1,
"ram": 512,
"hdd": 2,
"os": "debian",
"version": "13"
}
},
{
"type": "alpine",
"script": "ct/alpine-loki.sh",
"resources": {
"cpu": 1,
"ram": 256,
"hdd": 1,
"os": "alpine",
"version": "3.23"
}
}
],
"default_credentials": {
"username": null,
"password": null
},
"notes": [
{
"text": "Promtail can be optionally installed during setup to collect and ship logs to Loki.",
"type": "info"
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,86 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: hoholms
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/grafana/loki
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Loki"
$STD apk add loki
$STD sed -i '/http_addr/s/127.0.0.1/0.0.0.0/g' /etc/conf.d/loki
mkdir -p /var/lib/loki/{chunks,boltdb-shipper-active,boltdb-shipper-cache}
chown -R loki:grafana /var/lib/loki
mkdir -p /var/log/loki
chown -R loki:grafana /var/log/loki
cat <<EOF >/etc/loki/loki-local-config.yaml
auth_enabled: false
server:
http_listen_port: 3100
log_level: info
common:
instance_addr: 127.0.0.1
path_prefix: /var/lib/loki
storage:
filesystem:
chunks_directory: /var/lib/loki/chunks
rules_directory: /var/lib/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
schema_config:
configs:
- from: 2020-10-24
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
limits_config:
metric_aggregation_enabled: true
ruler:
alertmanager_url: http://localhost:9093
EOF
chown loki:grafana /etc/loki/loki-local-config.yaml
chmod 644 /etc/loki/loki-local-config.yaml
echo "output_log=\"\${output_log:-/var/log/loki/output.log}\"" >> /etc/init.d/loki
echo "error_log=\"\${error_log:-/var/log/loki/error.log}\"" >> /etc/init.d/loki
echo "start_stop_daemon_args=\"\${SSD_OPTS} -1 \${output_log} -2 \${error_log}\"" >> /etc/init.d/loki
$STD rc-update add loki default
$STD rc-service loki start
msg_ok "Installed Loki"
read -rp "Would you like to install Promtail? (y/N): " INSTALL_PROMTAIL
if [[ "${INSTALL_PROMTAIL,,}" =~ ^(y|yes)$ ]]; then
msg_info "Installing Promtail"
$STD apk add loki-promtail
$STD sed -i '/http_addr/s/127.0.0.1/0.0.0.0/g' /etc/conf.d/loki-promtail
$STD rc-update add loki-promtail default
$STD rc-service loki-promtail start
msg_ok "Installed Promtail"
fi
motd_ssh
customize
cleanup_lxc

View File

@@ -1,50 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: luismco
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/ThePhaseless/Byparr
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt -y install \
xauth \
xvfb \
scrot \
chromium \
chromium-driver \
ca-certificates
msg_ok "Installed Dependencies"
fetch_and_deploy_gh_release "Byparr" "ThePhaseless/Byparr" "tarball" "latest"
setup_uv
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/byparr.service
[Unit]
Description=Byparr
After=network.target
[Service]
Type=simple
WorkingDirectory=/opt/Byparr
ExecStart=/usr/local/bin/uv run python3 main.py
Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now byparr
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@@ -21,6 +21,9 @@ rm -rf /opt/checkmk.deb
echo "${RELEASE}" >"/opt/checkmk_version.txt"
msg_ok "Installed Checkmk"
motd_ssh
customize
msg_info "Creating Service"
SITE_NAME="monitoring"
$STD omd create "$SITE_NAME"
@@ -39,5 +42,3 @@ $STD omd start "$SITE_NAME"
msg_ok "Created Service"
cleanup_lxc
motd_ssh
customize

View File

@@ -23,6 +23,41 @@ setup_deb822_repo \
$STD apt install -y cloudflared
msg_ok "Installed Cloudflared"
read -r -p "${TAB3}Would you like to configure cloudflared as a DNS-over-HTTPS (DoH) proxy? <y/N> " prompt
if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
msg_info "Creating Service"
cat <<EOF >/usr/local/etc/cloudflared/config.yml
proxy-dns: true
proxy-dns-address: 0.0.0.0
proxy-dns-port: 53
proxy-dns-max-upstream-conns: 5
proxy-dns-upstream:
- https://1.1.1.1/dns-query
- https://1.0.0.1/dns-query
#- https://8.8.8.8/dns-query
#- https://8.8.4.4/dns-query
#- https://9.9.9.9/dns-query
#- https://149.112.112.112/dns-query
EOF
cat <<EOF >/etc/systemd/system/cloudflared.service
[Unit]
Description=cloudflared DNS-over-HTTPS (DoH) proxy
After=syslog.target network-online.target
[Service]
Type=simple
ExecStart=/usr/local/bin/cloudflared --config /usr/local/etc/cloudflared/config.yml
Restart=on-failure
RestartSec=10
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now cloudflared
msg_ok "Created Service"
fi
motd_ssh
customize
cleanup_lxc

View File

@@ -34,7 +34,7 @@ msg_info "Setup Dolibarr"
BASE="https://sourceforge.net/projects/dolibarr/files/Dolibarr%20installer%20for%20Debian-Ubuntu%20(DoliDeb)/"
RELEASE=$(curl -fsSL "$BASE" | grep -oP '(?<=/Dolibarr%20installer%20for%20Debian-Ubuntu%20%28DoliDeb%29/)\d+(\.\d+)+(?=/)' | sort -V | tail -n1)
FILE=$(curl -fsSL "${BASE}${RELEASE}/" | grep -oP 'dolibarr_[^"]+_all.deb' | head -n1)
curl -fsSL "https://altushost-swe.dl.sourceforge.net/project/dolibarr/Dolibarr%20installer%20for%20Debian-Ubuntu%20(DoliDeb)/${RELEASE}/${FILE}?viasf=1" -o ""$FILE""
curl -fsSL "https://netcologne.dl.sourceforge.net/project/dolibarr/Dolibarr%20installer%20for%20Debian-Ubuntu%20(DoliDeb)/${RELEASE}/${FILE}?viasf=1" -o ""$FILE""
echo "dolibarr dolibarr/reconfigure-webserver multiselect apache2" | debconf-set-selections
$STD apt-get install ./$FILE -y
$STD apt install -f

View File

@@ -36,9 +36,8 @@ cd /opt/joplin-server
sed -i "/onenote-converter/d" packages/lib/package.json
$STD yarn config set --home enableTelemetry 0
export BUILD_SEQUENCIAL=1
$STD yarn workspaces focus @joplin/server
$STD yarn workspaces foreach -R --topological-dev --from @joplin/server run build
$STD yarn workspaces foreach -R --topological-dev --from @joplin/server run tsc
$STD yarn install --inline-builds
cat <<EOF >/opt/joplin-server/.env
PM2_HOME=/opt/pm2
NODE_ENV=production

View File

@@ -1,83 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# Author: bysinka-95
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/grafana/loki
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
setup_deb822_repo \
"grafana" \
"https://apt.grafana.com/gpg.key" \
"https://apt.grafana.com" \
"stable" \
"main"
msg_info "Installing Loki"
$STD apt install -y loki
mkdir -p /var/lib/loki/{chunks,boltdb-shipper-active,boltdb-shipper-cache}
chown -R loki /var/lib/loki
cat <<EOF >/etc/loki/config.yml
auth_enabled: false
server:
http_listen_port: 3100
log_level: info
common:
instance_addr: 127.0.0.1
path_prefix: /var/lib/loki
storage:
filesystem:
chunks_directory: /var/lib/loki/chunks
rules_directory: /var/lib/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
schema_config:
configs:
- from: 2020-10-24
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
limits_config:
metric_aggregation_enabled: true
ruler:
alertmanager_url: http://localhost:9093
EOF
chown loki /etc/loki/config.yml
systemctl enable -q --now loki
msg_ok "Installed Loki"
read -rp "Would you like to install Promtail? (y/N): " INSTALL_PROMTAIL
if [[ "${INSTALL_PROMTAIL,,}" =~ ^(y|yes)$ ]]; then
msg_info "Installing Promtail"
$STD apt install -y promtail
systemctl enable -q --now promtail
msg_ok "Installed Promtail"
fi
motd_ssh
customize
cleanup_lxc

View File

@@ -59,7 +59,7 @@ msg_ok "Installed Python Dependencies"
msg_info "Creating Service"
cat <<EOF >/opt/yubal.env
YUBAL_HOST=0.0.0.0
YUBAL_PORT=8000
YUBAL_PORT=8001
YUBAL_DATA=/opt/yubal_data
YUBAL_CONFIG=/opt/yubal_config
YUBAL_ROOT=/opt/yubal
@@ -76,7 +76,7 @@ User=root
WorkingDirectory=/opt/yubal
EnvironmentFile=/opt/yubal.env
Environment="PATH=/opt/yubal/.venv/bin:/usr/local/bin:/usr/bin:/bin"
ExecStart=/opt/yubal/.venv/bin/python -m yubal_api
ExecStart=/opt/yubal/.venv/bin/python -m yubal
Restart=always
RestartSec=5

View File

@@ -357,268 +357,6 @@ validate_hostname() {
return 0
}
# ------------------------------------------------------------------------------
# validate_mac_address()
#
# - Validates MAC address format (XX:XX:XX:XX:XX:XX)
# - Empty value is allowed (auto-generated)
# - Returns 0 if valid, 1 if invalid
# ------------------------------------------------------------------------------
validate_mac_address() {
local mac="$1"
[[ -z "$mac" ]] && return 0
if [[ ! "$mac" =~ ^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$ ]]; then
return 1
fi
return 0
}
# ------------------------------------------------------------------------------
# validate_vlan_tag()
#
# - Validates VLAN tag (1-4094)
# - Empty value is allowed (no VLAN)
# - Returns 0 if valid, 1 if invalid
# ------------------------------------------------------------------------------
validate_vlan_tag() {
local vlan="$1"
[[ -z "$vlan" ]] && return 0
if ! [[ "$vlan" =~ ^[0-9]+$ ]] || ((vlan < 1 || vlan > 4094)); then
return 1
fi
return 0
}
# ------------------------------------------------------------------------------
# validate_mtu()
#
# - Validates MTU size (576-65535, common values: 1500, 9000)
# - Empty value is allowed (default 1500)
# - Returns 0 if valid, 1 if invalid
# ------------------------------------------------------------------------------
validate_mtu() {
local mtu="$1"
[[ -z "$mtu" ]] && return 0
if ! [[ "$mtu" =~ ^[0-9]+$ ]] || ((mtu < 576 || mtu > 65535)); then
return 1
fi
return 0
}
# ------------------------------------------------------------------------------
# validate_ipv6_address()
#
# - Validates IPv6 address with optional CIDR notation
# - Supports compressed (::) and full notation
# - Empty value is allowed
# - Returns 0 if valid, 1 if invalid
# ------------------------------------------------------------------------------
validate_ipv6_address() {
local ipv6="$1"
[[ -z "$ipv6" ]] && return 0
# Extract address and CIDR
local addr="${ipv6%%/*}"
local cidr="${ipv6##*/}"
# Validate CIDR if present (1-128)
if [[ "$ipv6" == */* ]]; then
if ! [[ "$cidr" =~ ^[0-9]+$ ]] || ((cidr < 1 || cidr > 128)); then
return 1
fi
fi
# Basic IPv6 validation - check for valid characters and structure
# Must contain only hex digits and colons
if [[ ! "$addr" =~ ^[0-9a-fA-F:]+$ ]]; then
return 1
fi
# Must contain at least one colon
if [[ ! "$addr" == *:* ]]; then
return 1
fi
# Check for valid double-colon usage (only one :: allowed)
if [[ "$addr" == *::*::* ]]; then
return 1
fi
# Check that no segment exceeds 4 hex chars
local IFS=':'
local -a segments
read -ra segments <<< "$addr"
for seg in "${segments[@]}"; do
if [[ ${#seg} -gt 4 ]]; then
return 1
fi
done
return 0
}
# ------------------------------------------------------------------------------
# validate_bridge()
#
# - Validates that network bridge exists and is active
# - Returns 0 if valid, 1 if invalid
# ------------------------------------------------------------------------------
validate_bridge() {
local bridge="$1"
[[ -z "$bridge" ]] && return 1
# Check if bridge interface exists
if ! ip link show "$bridge" &>/dev/null; then
return 1
fi
return 0
}
# ------------------------------------------------------------------------------
# validate_gateway_in_subnet()
#
# - Validates that gateway IP is in the same subnet as static IP
# - Arguments: static_ip (with CIDR), gateway_ip
# - Returns 0 if valid, 1 if invalid
# ------------------------------------------------------------------------------
validate_gateway_in_subnet() {
local static_ip="$1"
local gateway="$2"
[[ -z "$static_ip" || -z "$gateway" ]] && return 0
# Extract IP and CIDR
local ip="${static_ip%%/*}"
local cidr="${static_ip##*/}"
# Convert CIDR to netmask bits
local mask=$((0xFFFFFFFF << (32 - cidr) & 0xFFFFFFFF))
# Convert IPs to integers
local IFS='.'
read -r i1 i2 i3 i4 <<< "$ip"
read -r g1 g2 g3 g4 <<< "$gateway"
local ip_int=$(( (i1 << 24) + (i2 << 16) + (i3 << 8) + i4 ))
local gw_int=$(( (g1 << 24) + (g2 << 16) + (g3 << 8) + g4 ))
# Check if both are in same network
if (( (ip_int & mask) != (gw_int & mask) )); then
return 1
fi
return 0
}
# ------------------------------------------------------------------------------
# validate_ip_address()
#
# - Validates IPv4 address with CIDR notation
# - Checks each octet is 0-255
# - Checks CIDR is 1-32
# - Returns 0 if valid, 1 if invalid
# ------------------------------------------------------------------------------
validate_ip_address() {
local ip="$1"
[[ -z "$ip" ]] && return 1
# Check format with CIDR
if [[ ! "$ip" =~ ^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/([0-9]{1,2})$ ]]; then
return 1
fi
local o1="${BASH_REMATCH[1]}"
local o2="${BASH_REMATCH[2]}"
local o3="${BASH_REMATCH[3]}"
local o4="${BASH_REMATCH[4]}"
local cidr="${BASH_REMATCH[5]}"
# Validate octets (0-255)
for octet in "$o1" "$o2" "$o3" "$o4"; do
if ((octet > 255)); then
return 1
fi
done
# Validate CIDR (1-32)
if ((cidr < 1 || cidr > 32)); then
return 1
fi
return 0
}
# ------------------------------------------------------------------------------
# validate_gateway_ip()
#
# - Validates gateway IPv4 address (without CIDR)
# - Checks each octet is 0-255
# - Returns 0 if valid, 1 if invalid
# ------------------------------------------------------------------------------
validate_gateway_ip() {
local ip="$1"
[[ -z "$ip" ]] && return 0
# Check format without CIDR
if [[ ! "$ip" =~ ^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$ ]]; then
return 1
fi
local o1="${BASH_REMATCH[1]}"
local o2="${BASH_REMATCH[2]}"
local o3="${BASH_REMATCH[3]}"
local o4="${BASH_REMATCH[4]}"
# Validate octets (0-255)
for octet in "$o1" "$o2" "$o3" "$o4"; do
if ((octet > 255)); then
return 1
fi
done
return 0
}
# ------------------------------------------------------------------------------
# validate_timezone()
#
# - Validates timezone string against system zoneinfo
# - Empty value or "host" is allowed
# - Returns 0 if valid, 1 if invalid
# ------------------------------------------------------------------------------
validate_timezone() {
local tz="$1"
[[ -z "$tz" || "$tz" == "host" ]] && return 0
# Check if timezone file exists
if [[ ! -f "/usr/share/zoneinfo/$tz" ]]; then
return 1
fi
return 0
}
# ------------------------------------------------------------------------------
# validate_tags()
#
# - Validates Proxmox tags format
# - Only alphanumeric, hyphens, underscores, and semicolons allowed
# - Empty value is allowed
# - Returns 0 if valid, 1 if invalid
# ------------------------------------------------------------------------------
validate_tags() {
local tags="$1"
[[ -z "$tags" ]] && return 0
# Tags can only contain alphanumeric, -, _, and ; (separator)
if [[ ! "$tags" =~ ^[a-zA-Z0-9_\;-]+$ ]]; then
return 1
fi
return 0
}
# ------------------------------------------------------------------------------
# find_host_ssh_keys()
#
@@ -1034,119 +772,6 @@ load_vars_file() {
# Trim trailing whitespace
var_val="${var_val%"${var_val##*[![:space:]]}"}"
# Validate values before setting (skip empty values - they use defaults)
if [[ -n "$var_val" ]]; then
case "$var_key" in
var_mac)
if ! validate_mac_address "$var_val"; then
msg_warn "Invalid MAC address '$var_val' in $file, ignoring"
continue
fi
;;
var_vlan)
if ! validate_vlan_tag "$var_val"; then
msg_warn "Invalid VLAN tag '$var_val' in $file (must be 1-4094), ignoring"
continue
fi
;;
var_mtu)
if ! validate_mtu "$var_val"; then
msg_warn "Invalid MTU '$var_val' in $file (must be 576-65535), ignoring"
continue
fi
;;
var_tags)
if ! validate_tags "$var_val"; then
msg_warn "Invalid tags '$var_val' in $file (alphanumeric, -, _, ; only), ignoring"
continue
fi
;;
var_timezone)
if ! validate_timezone "$var_val"; then
msg_warn "Invalid timezone '$var_val' in $file, ignoring"
continue
fi
;;
var_brg)
if ! validate_bridge "$var_val"; then
msg_warn "Bridge '$var_val' not found in $file, ignoring"
continue
fi
;;
var_gateway)
if ! validate_gateway_ip "$var_val"; then
msg_warn "Invalid gateway IP '$var_val' in $file, ignoring"
continue
fi
;;
var_hostname)
if ! validate_hostname "$var_val"; then
msg_warn "Invalid hostname '$var_val' in $file, ignoring"
continue
fi
;;
var_cpu)
if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1 || var_val > 128)); then
msg_warn "Invalid CPU count '$var_val' in $file (must be 1-128), ignoring"
continue
fi
;;
var_ram)
if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 256)); then
msg_warn "Invalid RAM '$var_val' in $file (must be >= 256 MiB), ignoring"
continue
fi
;;
var_disk)
if ! [[ "$var_val" =~ ^[0-9]+$ ]] || ((var_val < 1)); then
msg_warn "Invalid disk size '$var_val' in $file (must be >= 1 GB), ignoring"
continue
fi
;;
var_unprivileged)
if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then
msg_warn "Invalid unprivileged value '$var_val' in $file (must be 0 or 1), ignoring"
continue
fi
;;
var_nesting)
if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then
msg_warn "Invalid nesting value '$var_val' in $file (must be 0 or 1), ignoring"
continue
fi
;;
var_keyctl)
if [[ "$var_val" != "0" && "$var_val" != "1" ]]; then
msg_warn "Invalid keyctl value '$var_val' in $file (must be 0 or 1), ignoring"
continue
fi
;;
var_net)
# var_net can be: dhcp, static IP/CIDR, or IP range
if [[ "$var_val" != "dhcp" ]]; then
if is_ip_range "$var_val"; then
: # IP range is valid, will be resolved at runtime
elif ! validate_ip_address "$var_val"; then
msg_warn "Invalid network '$var_val' in $file (must be dhcp or IP/CIDR), ignoring"
continue
fi
fi
;;
var_fuse|var_tun|var_gpu|var_ssh|var_verbose|var_protection)
if [[ "$var_val" != "yes" && "$var_val" != "no" ]]; then
msg_warn "Invalid boolean '$var_val' for $var_key in $file (must be yes/no), ignoring"
continue
fi
;;
var_ipv6_method)
if [[ "$var_val" != "auto" && "$var_val" != "dhcp" && "$var_val" != "static" && "$var_val" != "none" ]]; then
msg_warn "Invalid IPv6 method '$var_val' in $file (must be auto/dhcp/static/none), ignoring"
continue
fi
;;
esac
fi
# Set variable: force mode overrides existing, otherwise only set if empty
if [[ "$force" == "yes" ]]; then
export "${var_key}=${var_val}"
@@ -1987,14 +1612,8 @@ advanced_settings() {
# ═══════════════════════════════════════════════════════════════════════════
8)
if [[ ${#BRIDGE_MENU_OPTIONS[@]} -eq 0 ]]; then
# Validate default bridge exists
if validate_bridge "vmbr0"; then
_bridge="vmbr0"
((STEP++))
else
whiptail --msgbox "Default bridge 'vmbr0' not found!\n\nPlease configure a network bridge in Proxmox first." 10 58
exit 1
fi
_bridge="vmbr0"
((STEP++))
else
if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
--title "NETWORK BRIDGE" \
@@ -2002,13 +1621,8 @@ advanced_settings() {
--menu "\nSelect network bridge:" 16 58 6 \
"${BRIDGE_MENU_OPTIONS[@]}" \
3>&1 1>&2 2>&3); then
local bridge_test="${result:-vmbr0}"
if validate_bridge "$bridge_test"; then
_bridge="$bridge_test"
((STEP++))
else
whiptail --msgbox "Bridge '$bridge_test' is not available or not active." 8 58
fi
_bridge="${result:-vmbr0}"
((STEP++))
else
((STEP--))
fi
@@ -2036,7 +1650,7 @@ advanced_settings() {
--ok-button "Next" --cancel-button "Back" \
--inputbox "\nEnter Static IPv4 CIDR Address\n(e.g. 192.168.1.100/24)" 12 58 "" \
3>&1 1>&2 2>&3); then
if validate_ip_address "$static_ip"; then
if [[ "$static_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
# Get gateway
local gateway_ip
if gateway_ip=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
@@ -2044,21 +1658,16 @@ advanced_settings() {
--ok-button "Next" --cancel-button "Back" \
--inputbox "\nEnter Gateway IP address" 10 58 "" \
3>&1 1>&2 2>&3); then
if validate_gateway_ip "$gateway_ip"; then
# Validate gateway is in same subnet
if validate_gateway_in_subnet "$static_ip" "$gateway_ip"; then
_net="$static_ip"
_gate=",gw=$gateway_ip"
((STEP++))
else
whiptail --msgbox "Gateway is not in the same subnet as the static IP.\n\nStatic IP: $static_ip\nGateway: $gateway_ip" 10 58
fi
if [[ "$gateway_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
_net="$static_ip"
_gate=",gw=$gateway_ip"
((STEP++))
else
whiptail --msgbox "Invalid Gateway IP format.\n\nEach octet must be 0-255.\nExample: 192.168.1.1" 10 58
whiptail --msgbox "Invalid Gateway IP format." 8 58
fi
fi
else
whiptail --msgbox "Invalid IPv4 CIDR format.\n\nEach octet must be 0-255.\nCIDR must be 1-32.\nExample: 192.168.1.100/24" 12 58
whiptail --msgbox "Invalid IPv4 CIDR format.\nExample: 192.168.1.100/24" 8 58
fi
fi
elif [[ "$result" == "range" ]]; then
@@ -2082,17 +1691,12 @@ advanced_settings() {
--ok-button "Next" --cancel-button "Back" \
--inputbox "\nFound free IP: $NET_RESOLVED\n\nEnter Gateway IP address" 12 58 "" \
3>&1 1>&2 2>&3); then
if validate_gateway_ip "$gateway_ip"; then
# Validate gateway is in same subnet
if validate_gateway_in_subnet "$NET_RESOLVED" "$gateway_ip"; then
_net="$NET_RESOLVED"
_gate=",gw=$gateway_ip"
((STEP++))
else
whiptail --msgbox "Gateway is not in the same subnet as the IP.\n\nIP: $NET_RESOLVED\nGateway: $gateway_ip" 10 58
fi
if [[ "$gateway_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
_net="$NET_RESOLVED"
_gate=",gw=$gateway_ip"
((STEP++))
else
whiptail --msgbox "Invalid Gateway IP format.\n\nEach octet must be 0-255.\nExample: 192.168.1.1" 10 58
whiptail --msgbox "Invalid Gateway IP format." 8 58
fi
fi
else
@@ -2135,33 +1739,16 @@ advanced_settings() {
--title "STATIC IPv6 ADDRESS" \
--inputbox "\nEnter IPv6 CIDR address\n(e.g. 2001:db8::1/64)" 12 58 "" \
3>&1 1>&2 2>&3); then
if validate_ipv6_address "$ipv6_addr"; then
if [[ "$ipv6_addr" =~ ^([0-9a-fA-F:]+:+)+[0-9a-fA-F]+(/[0-9]{1,3})$ ]]; then
_ipv6_addr="$ipv6_addr"
# Optional gateway - loop until valid or empty
local ipv6_gw_valid=false
while [[ "$ipv6_gw_valid" == "false" ]]; do
local ipv6_gw
ipv6_gw=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
--title "IPv6 GATEWAY" \
--inputbox "\nEnter IPv6 gateway (optional, leave blank for none)" 10 58 "" \
3>&1 1>&2 2>&3) || true
# Validate gateway if provided
if [[ -n "$ipv6_gw" ]]; then
if validate_ipv6_address "$ipv6_gw"; then
_ipv6_gate="$ipv6_gw"
ipv6_gw_valid=true
((STEP++))
else
whiptail --msgbox "Invalid IPv6 gateway format.\n\nExample: 2001:db8::1" 8 58
fi
else
_ipv6_gate=""
ipv6_gw_valid=true
((STEP++))
fi
done
# Optional gateway
_ipv6_gate=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
--title "IPv6 GATEWAY" \
--inputbox "\nEnter IPv6 gateway (optional, leave blank for none)" 10 58 "" \
3>&1 1>&2 2>&3) || true
((STEP++))
else
whiptail --msgbox "Invalid IPv6 CIDR format.\n\nExample: 2001:db8::1/64\nCIDR must be 1-128." 10 58
whiptail --msgbox "Invalid IPv6 CIDR format." 8 58
fi
fi
;;
@@ -2194,14 +1781,10 @@ advanced_settings() {
if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
--title "MTU SIZE" \
--ok-button "Next" --cancel-button "Back" \
--inputbox "\nSet Interface MTU Size\n(leave blank for default 1500, common values: 1500, 9000)" 12 62 "" \
--inputbox "\nSet Interface MTU Size\n(leave blank for default 1500)" 12 58 "" \
3>&1 1>&2 2>&3); then
if validate_mtu "$result"; then
_mtu="$result"
((STEP++))
else
whiptail --msgbox "Invalid MTU size.\n\nMTU must be between 576 and 65535.\nCommon values: 1500 (default), 9000 (jumbo frames)" 10 58
fi
_mtu="$result"
((STEP++))
else
((STEP--))
fi
@@ -2246,14 +1829,10 @@ advanced_settings() {
if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
--title "MAC ADDRESS" \
--ok-button "Next" --cancel-button "Back" \
--inputbox "\nSet MAC Address\n(leave blank for auto-generated, format: XX:XX:XX:XX:XX:XX)" 12 62 "" \
--inputbox "\nSet MAC Address\n(leave blank for auto-generated)" 12 58 "" \
3>&1 1>&2 2>&3); then
if validate_mac_address "$result"; then
_mac="$result"
((STEP++))
else
whiptail --msgbox "Invalid MAC address format.\n\nRequired format: XX:XX:XX:XX:XX:XX\nExample: 02:00:00:00:00:01" 10 58
fi
_mac="$result"
((STEP++))
else
((STEP--))
fi
@@ -2266,14 +1845,10 @@ advanced_settings() {
if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
--title "VLAN TAG" \
--ok-button "Next" --cancel-button "Back" \
--inputbox "\nSet VLAN Tag (1-4094)\n(leave blank for no VLAN)" 12 58 "" \
--inputbox "\nSet VLAN Tag\n(leave blank for no VLAN)" 12 58 "" \
3>&1 1>&2 2>&3); then
if validate_vlan_tag "$result"; then
_vlan="$result"
((STEP++))
else
whiptail --msgbox "Invalid VLAN tag.\n\nVLAN must be a number between 1 and 4094." 8 58
fi
_vlan="$result"
((STEP++))
else
((STEP--))
fi
@@ -2286,16 +1861,11 @@ advanced_settings() {
if result=$(whiptail --backtitle "Proxmox VE Helper Scripts [Step $STEP/$MAX_STEP]" \
--title "CONTAINER TAGS" \
--ok-button "Next" --cancel-button "Back" \
--inputbox "\nSet Custom Tags (semicolon-separated)\n(alphanumeric, hyphens, underscores only)" 12 58 "$_tags" \
--inputbox "\nSet Custom Tags (semicolon-separated)\n(remove all for no tags)" 12 58 "$_tags" \
3>&1 1>&2 2>&3); then
local tags_test="${result:-}"
tags_test=$(echo "$tags_test" | tr -d '[:space:]')
if validate_tags "$tags_test"; then
_tags="$tags_test"
((STEP++))
else
whiptail --msgbox "Invalid tag format.\n\nTags can only contain:\n- Letters (a-z, A-Z)\n- Numbers (0-9)\n- Hyphens (-)\n- Underscores (_)\n- Semicolons (;) as separator" 14 58
fi
_tags="${result:-;}"
_tags=$(echo "$_tags" | tr -d '[:space:]')
((STEP++))
else
((STEP--))
fi
@@ -2474,14 +2044,9 @@ advanced_settings() {
--ok-button "Next" --cancel-button "Back" \
--inputbox "\nSet container timezone.\n\nExamples: Europe/Berlin, America/New_York, Asia/Tokyo\n\nHost timezone: ${_host_timezone:-unknown}\n\nLeave empty to inherit from host." 16 62 "$_ct_timezone" \
3>&1 1>&2 2>&3); then
local tz_test="$result"
[[ "${tz_test:-}" == Etc/* ]] && tz_test="host" # pct doesn't accept Etc/* zones
if validate_timezone "$tz_test"; then
_ct_timezone="$tz_test"
((STEP++))
else
whiptail --msgbox "Invalid timezone: '$result'\n\nTimezone must exist in /usr/share/zoneinfo/\n\nExamples:\n- Europe/Berlin\n- America/New_York\n- Asia/Tokyo\n- UTC" 14 58
fi
_ct_timezone="$result"
[[ "${_ct_timezone:-}" == Etc/* ]] && _ct_timezone="host" # pct doesn't accept Etc/* zones
((STEP++))
else
((STEP--))
fi
@@ -3278,7 +2843,6 @@ start() {
elif [ ! -z ${PHS_SILENT+x} ] && [[ "${PHS_SILENT}" == "1" ]]; then
VERBOSE="no"
set_std_mode
ensure_profile_loaded
update_script
cleanup_lxc
else
@@ -3304,7 +2868,6 @@ start() {
exit
;;
esac
ensure_profile_loaded
update_script
cleanup_lxc
fi
@@ -3442,13 +3005,8 @@ build_container() {
export DEV_MODE_DRYRUN="${DEV_MODE_DRYRUN:-false}"
# Build PCT_OPTIONS as multi-line string
PCT_OPTIONS_STRING=" -hostname $HN"
# Only add -tags if TAGS is not empty
if [ -n "$TAGS" ]; then
PCT_OPTIONS_STRING="$PCT_OPTIONS_STRING
PCT_OPTIONS_STRING=" -hostname $HN
-tags $TAGS"
fi
# Only add -features if FEATURES is not empty
if [ -n "$FEATURES" ]; then
@@ -4478,42 +4036,37 @@ create_lxc_container() {
msg_info "Searching for template '$TEMPLATE_SEARCH'"
# Initialize variables
ONLINE_TEMPLATE=""
ONLINE_TEMPLATES=()
# Step 1: Check local templates first (instant)
mapfile -t LOCAL_TEMPLATES < <(
pveam list "$TEMPLATE_STORAGE" 2>/dev/null |
awk -v search="${TEMPLATE_SEARCH}" -v pattern="${TEMPLATE_PATTERN}" '$1 ~ search && $1 ~ pattern {print $1}' |
sed 's|.*/||' | sort -t - -k 2 -V
)
# Step 2: If local template found, use it immediately (skip pveam update)
pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)."
msg_ok "Template search completed"
set +u
mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "^${TEMPLATE_SEARCH}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true)
set -u
ONLINE_TEMPLATE=""
[[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then
count=0
for idx in "${!ONLINE_TEMPLATES[@]}"; do
((count++))
[[ $count -ge 3 ]] && break
done
fi
if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then
TEMPLATE="${LOCAL_TEMPLATES[-1]}"
TEMPLATE_SOURCE="local"
msg_ok "Template search completed"
else
# Step 3: No local template - need to check online (this may be slow)
msg_info "No local template found, checking online catalog..."
# Update catalog with timeout to prevent long hangs
if command -v timeout &>/dev/null; then
if ! timeout 30 pveam update >/dev/null 2>&1; then
msg_warn "Template catalog update timed out (possible network/DNS issue). Run 'pveam update' manually to diagnose."
fi
else
pveam update >/dev/null 2>&1 || msg_warn "Could not update template catalog (pveam update failed)"
fi
ONLINE_TEMPLATES=()
mapfile -t ONLINE_TEMPLATES < <(pveam available -section system 2>/dev/null | grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' | awk '{print $2}' | grep -E "^${TEMPLATE_SEARCH}.*${TEMPLATE_PATTERN}" | sort -t - -k 2 -V 2>/dev/null || true)
[[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}"
TEMPLATE="$ONLINE_TEMPLATE"
TEMPLATE_SOURCE="online"
msg_ok "Template search completed"
fi
# If still no template, try to find alternatives
@@ -4522,7 +4075,6 @@ create_lxc_container() {
echo "[DEBUG] No template found for ${PCT_OSTYPE} ${PCT_OSVERSION}, searching for alternatives..."
# Get all available versions for this OS type
AVAILABLE_VERSIONS=()
mapfile -t AVAILABLE_VERSIONS < <(
pveam available -section system 2>/dev/null |
grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
@@ -4545,7 +4097,6 @@ create_lxc_container() {
PCT_OSVERSION="${AVAILABLE_VERSIONS[$((choice - 1))]}"
TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION}"
ONLINE_TEMPLATES=()
mapfile -t ONLINE_TEMPLATES < <(
pveam available -section system 2>/dev/null |
grep -E '\.(tar\.zst|tar\.xz|tar\.gz)$' |
@@ -4755,88 +4306,50 @@ create_lxc_container() {
-rootfs $CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}"
fi
# Lock by template file (avoid concurrent template downloads/validation)
# Lock by template file (avoid concurrent downloads/creates)
lockfile="/tmp/template.${TEMPLATE}.lock"
# Cleanup stale lock files (older than 1 hour - likely from crashed processes)
if [[ -f "$lockfile" ]]; then
local lock_age=$(($(date +%s) - $(stat -c %Y "$lockfile" 2>/dev/null || echo 0)))
if [[ $lock_age -gt 3600 ]]; then
msg_warn "Removing stale template lock file (age: ${lock_age}s)"
rm -f "$lockfile"
fi
fi
exec 9>"$lockfile" || {
msg_error "Failed to create lock file '$lockfile'."
exit 200
}
# Retry logic for template lock (another container creation may be running)
local lock_attempts=0
local max_lock_attempts=10
local lock_wait_time=30
while ! flock -w "$lock_wait_time" 9; do
lock_attempts=$((lock_attempts + 1))
if [[ $lock_attempts -ge $max_lock_attempts ]]; then
msg_error "Timeout while waiting for template lock after ${max_lock_attempts} attempts."
msg_custom "💡" "${YW}" "Another container creation may be stuck. Check running processes or remove: $lockfile"
exit 211
fi
msg_custom "⏳" "${YW}" "Another container is being created with this template. Waiting... (attempt ${lock_attempts}/${max_lock_attempts})"
done
flock -w 60 9 || {
msg_error "Timeout while waiting for template lock."
exit 211
}
LOGFILE="/tmp/pct_create_${CTID}_$(date +%Y%m%d_%H%M%S)_${SESSION_ID}.log"
# Validate template before pct create (while holding lock)
if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH" 2>/dev/null || echo 0)" -lt 1000000 ]]; then
msg_info "Template file missing or too small downloading"
rm -f "$TEMPLATE_PATH"
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1
msg_ok "Template downloaded"
elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
if [[ -n "$ONLINE_TEMPLATE" ]]; then
msg_info "Template appears corrupted re-downloading"
rm -f "$TEMPLATE_PATH"
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1
msg_ok "Template re-downloaded"
else
msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
fi
fi
# Release lock after template validation - pct create has its own internal locking
exec 9>&-
msg_debug "pct create command: pct create $CTID ${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE} $PCT_OPTIONS"
msg_debug "Logfile: $LOGFILE"
# First attempt (PCT_OPTIONS is a multi-line string, use it directly)
if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >"$LOGFILE" 2>&1; then
msg_debug "Container creation failed on ${TEMPLATE_STORAGE}. Checking error..."
msg_debug "Container creation failed on ${TEMPLATE_STORAGE}. Validating template..."
# Check if template issue - retry with fresh download
if grep -qiE 'unable to open|corrupt|invalid' "$LOGFILE"; then
msg_info "Template may be corrupted re-downloading"
# Validate template file
if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
msg_warn "Template file too small or missing re-downloading."
rm -f "$TEMPLATE_PATH"
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1
msg_ok "Template re-downloaded"
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
elif ! tar -tf "$TEMPLATE_PATH" &>/dev/null; then
if [[ -n "$ONLINE_TEMPLATE" ]]; then
msg_warn "Template appears corrupted re-downloading."
rm -f "$TEMPLATE_PATH"
pveam download "$TEMPLATE_STORAGE" "$TEMPLATE"
else
msg_warn "Template appears corrupted, but no online version exists. Skipping re-download."
fi
fi
# Retry after repair
if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then
# Fallback to local storage if not already on local
if [[ "$TEMPLATE_STORAGE" != "local" ]]; then
msg_info "Retrying container creation with fallback to local storage"
msg_info "Retrying container creation with fallback to local storage..."
LOCAL_TEMPLATE_PATH="/var/lib/vz/template/cache/$TEMPLATE"
if [[ ! -f "$LOCAL_TEMPLATE_PATH" ]]; then
msg_ok "Trying local storage fallback"
msg_info "Downloading template to local"
msg_info "Downloading template to local..."
pveam download local "$TEMPLATE" >/dev/null 2>&1
msg_ok "Template downloaded to local"
else
msg_ok "Trying local storage fallback"
fi
if ! pct create "$CTID" "local:vztmpl/${TEMPLATE}" $PCT_OPTIONS >>"$LOGFILE" 2>&1; then
# Local fallback also failed - check for LXC stack version issue

View File

@@ -38,6 +38,8 @@ load_functions() {
icons
default_vars
set_std_mode
# Note: get_lxc_ip() is NOT called here automatically
# Call it explicitly when you need LOCAL_IP variable
}
# ------------------------------------------------------------------------------
@@ -127,34 +129,6 @@ icons() {
HOURGLASS="${TAB}${TAB}"
}
# ------------------------------------------------------------------------------
# ensure_profile_loaded()
#
# - Sources /etc/profile.d/*.sh scripts if not already loaded
# - Fixes PATH issues when running via pct enter/exec (non-login shells)
# - Safe to call multiple times (uses guard variable)
# - Should be called in update_script() or any script running inside LXC
# ------------------------------------------------------------------------------
ensure_profile_loaded() {
# Skip if already loaded or running on Proxmox host
[[ -n "${_PROFILE_LOADED:-}" ]] && return
command -v pveversion &>/dev/null && return
# Source all profile.d scripts to ensure PATH is complete
if [[ -d /etc/profile.d ]]; then
for script in /etc/profile.d/*.sh; do
[[ -r "$script" ]] && source "$script"
done
fi
# Also ensure /usr/local/bin is in PATH (common install location)
if [[ ":$PATH:" != *":/usr/local/bin:"* ]]; then
export PATH="/usr/local/bin:$PATH"
fi
export _PROFILE_LOADED=1
}
# ------------------------------------------------------------------------------
# default_vars()
#

View File

@@ -3601,37 +3601,57 @@ EOF
}
# ------------------------------------------------------------------------------
# Installs or updates MariaDB.
# Installs or updates MariaDB from official repo.
#
# Description:
# - Uses Debian/Ubuntu distribution packages by default (most reliable)
# - Only uses official MariaDB repository when a specific version is requested
# - Detects current MariaDB version and replaces it if necessary
# - Preserves existing database data
# - Dynamically determines latest GA version if "latest" is given
#
# Variables:
# MARIADB_VERSION - MariaDB version to install (optional)
# - Not set or "latest": Uses distribution packages (recommended)
# - Specific version (e.g. "11.4", "12.2"): Uses MariaDB official repo
# MARIADB_VERSION - MariaDB version to install (e.g. 10.11, latest) (default: latest)
# ------------------------------------------------------------------------------
setup_mariadb() {
local MARIADB_VERSION="${MARIADB_VERSION:-latest}"
local USE_DISTRO_PACKAGES=false
# Ensure non-interactive mode for all apt operations
export DEBIAN_FRONTEND=noninteractive
export NEEDRESTART_MODE=a
export NEEDRESTART_SUSPEND=1
# Determine installation method:
# - "latest" or empty: Use distribution packages (avoids mirror issues)
# - Specific version: Use MariaDB official repository
if [[ "$MARIADB_VERSION" == "latest" || -z "$MARIADB_VERSION" ]]; then
USE_DISTRO_PACKAGES=true
msg_info "Setup MariaDB (distribution packages)"
else
msg_info "Setup MariaDB $MARIADB_VERSION (official repository)"
# Resolve "latest" to actual version
if [[ "$MARIADB_VERSION" == "latest" ]]; then
if ! curl -fsI --max-time 10 http://mirror.mariadb.org/repo/ >/dev/null 2>&1; then
msg_warn "MariaDB mirror not reachable - trying mariadb_repo_setup fallback"
# Try using official mariadb_repo_setup script as fallback
if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then
msg_ok "MariaDB repository configured via mariadb_repo_setup"
# Extract version from configured repo
MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2")
else
msg_warn "mariadb_repo_setup failed - using hardcoded fallback version"
MARIADB_VERSION="12.2"
fi
else
MARIADB_VERSION=$(curl -fsSL --max-time 15 http://mirror.mariadb.org/repo/ 2>/dev/null |
grep -Eo '[0-9]+\.[0-9]+\.[0-9]+/' |
grep -vE 'rc/|rolling/' |
sed 's|/||' |
sort -Vr |
head -n1 || echo "")
if [[ -z "$MARIADB_VERSION" ]]; then
msg_warn "Could not parse latest GA MariaDB version from mirror - trying mariadb_repo_setup"
if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then
msg_ok "MariaDB repository configured via mariadb_repo_setup"
MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2")
else
msg_warn "mariadb_repo_setup failed - using hardcoded fallback version"
MARIADB_VERSION="12.2"
fi
fi
fi
fi
# Get currently installed version
@@ -3639,105 +3659,17 @@ setup_mariadb() {
CURRENT_VERSION=$(is_tool_installed "mariadb" 2>/dev/null) || true
# Pre-configure debconf to prevent any interactive prompts during install/upgrade
debconf-set-selections <<EOF
local MARIADB_MAJOR_MINOR
MARIADB_MAJOR_MINOR=$(echo "$MARIADB_VERSION" | awk -F. '{print $1"."$2}')
if [[ -n "$MARIADB_MAJOR_MINOR" ]]; then
debconf-set-selections <<EOF
mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/feedback boolean false
mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/root_password password
mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/root_password_again password
mariadb-server mariadb-server/feedback boolean false
mariadb-server mariadb-server/root_password password
mariadb-server mariadb-server/root_password_again password
EOF
# If specific version requested, also configure version-specific debconf
if [[ "$USE_DISTRO_PACKAGES" == "false" ]]; then
local MARIADB_MAJOR_MINOR
MARIADB_MAJOR_MINOR=$(echo "$MARIADB_VERSION" | awk -F. '{print $1"."$2}')
if [[ -n "$MARIADB_MAJOR_MINOR" ]]; then
debconf-set-selections <<EOF
mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/feedback boolean false
mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/root_password password
mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/root_password_again password
EOF
fi
fi
# ============================================================================
# DISTRIBUTION PACKAGES PATH (default, most reliable)
# ============================================================================
if [[ "$USE_DISTRO_PACKAGES" == "true" ]]; then
# Check if MariaDB was previously installed from official repo
local HAD_MARIADB_REPO=false
if [[ -f /etc/apt/sources.list.d/mariadb.sources ]] || [[ -f /etc/apt/sources.list.d/mariadb.list ]]; then
HAD_MARIADB_REPO=true
msg_info "Removing MariaDB official repository (switching to distribution packages)"
fi
# Clean up any existing MariaDB repository files to avoid conflicts
cleanup_old_repo_files "mariadb"
# If we had a repo, we need to refresh APT cache
if [[ "$HAD_MARIADB_REPO" == "true" ]]; then
$STD apt update || msg_warn "APT update had issues, continuing..."
fi
# Ensure APT is working
ensure_apt_working || return 1
# Check if installed version is from official repo and higher than distro version
# In this case, we keep the existing installation to avoid data issues
if [[ -n "$CURRENT_VERSION" ]]; then
# Get available distro version
local DISTRO_VERSION=""
DISTRO_VERSION=$(apt-cache policy mariadb-server 2>/dev/null | grep -E "Candidate:" | awk '{print $2}' | grep -oP '^\d+:\K\d+\.\d+\.\d+' || echo "")
if [[ -n "$DISTRO_VERSION" ]]; then
# Compare versions - if current is higher, keep it
local CURRENT_MAJOR DISTRO_MAJOR
CURRENT_MAJOR=$(echo "$CURRENT_VERSION" | awk -F. '{print $1}')
DISTRO_MAJOR=$(echo "$DISTRO_VERSION" | awk -F. '{print $1}')
if [[ "$CURRENT_MAJOR" -gt "$DISTRO_MAJOR" ]]; then
msg_warn "MariaDB $CURRENT_VERSION is already installed (higher than distro $DISTRO_VERSION)"
msg_warn "Keeping existing installation to preserve data integrity"
msg_warn "To use distribution packages, manually remove MariaDB first"
_setup_mariadb_runtime_dir
cache_installed_version "mariadb" "$CURRENT_VERSION"
msg_ok "Setup MariaDB $CURRENT_VERSION (existing installation kept)"
return 0
fi
fi
fi
# Install or upgrade MariaDB from distribution packages
if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then
msg_error "Failed to install MariaDB packages from distribution"
return 1
fi
# Get installed version for caching
local INSTALLED_VERSION=""
INSTALLED_VERSION=$(mariadb --version 2>/dev/null | grep -oP '\d+\.\d+\.\d+' | head -n1 || echo "distro")
# Configure runtime directory and finish
_setup_mariadb_runtime_dir
cache_installed_version "mariadb" "$INSTALLED_VERSION"
msg_ok "Setup MariaDB $INSTALLED_VERSION (distribution packages)"
return 0
fi
# ============================================================================
# OFFICIAL REPOSITORY PATH (only when specific version requested)
# ============================================================================
# First, check if there's an old/broken repository that needs cleanup
if [[ -f /etc/apt/sources.list.d/mariadb.sources ]] || [[ -f /etc/apt/sources.list.d/mariadb.list ]]; then
local OLD_REPO_VERSION=""
OLD_REPO_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+(\.[0-9]+)?' /etc/apt/sources.list.d/mariadb.sources 2>/dev/null || \
grep -oP 'repo/\K[0-9]+\.[0-9]+(\.[0-9]+)?' /etc/apt/sources.list.d/mariadb.list 2>/dev/null || echo "")
# Check if old repo points to a different version
if [[ -n "$OLD_REPO_VERSION" ]] && [[ "${OLD_REPO_VERSION%.*}" != "${MARIADB_VERSION%.*}" ]]; then
msg_info "Cleaning up old MariaDB repository (was: $OLD_REPO_VERSION, requested: $MARIADB_VERSION)"
cleanup_old_repo_files "mariadb"
$STD apt update || msg_warn "APT update had issues, continuing..."
fi
fi
# Scenario 1: Already installed at target version - just update packages
@@ -3778,7 +3710,9 @@ EOF
remove_old_tool_version "mariadb"
fi
# Scenario 3: Fresh install or version change with specific version
# Scenario 3: Fresh install or version change
msg_info "Setup MariaDB $MARIADB_VERSION"
# Prepare repository (cleanup + validation)
prepare_repository_setup "mariadb" || {
msg_error "Failed to prepare MariaDB repository"
@@ -3806,37 +3740,21 @@ EOF
# Install packages with retry logic
if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then
# Fallback: try distribution packages
msg_warn "Failed to install MariaDB $MARIADB_VERSION from official repo, falling back to distribution packages..."
# Fallback: try without specific version
msg_warn "Failed to install MariaDB packages from upstream repo, trying distro fallback..."
cleanup_old_repo_files "mariadb"
$STD apt update || {
msg_warn "APT update also failed, continuing with cache"
}
if install_packages_with_retry "mariadb-server" "mariadb-client"; then
local FALLBACK_VERSION=""
FALLBACK_VERSION=$(mariadb --version 2>/dev/null | grep -oP '\d+\.\d+\.\d+' | head -n1 || echo "distro")
msg_warn "Installed MariaDB $FALLBACK_VERSION from distribution instead of requested $MARIADB_VERSION"
_setup_mariadb_runtime_dir
cache_installed_version "mariadb" "$FALLBACK_VERSION"
msg_ok "Setup MariaDB $FALLBACK_VERSION (fallback to distribution packages)"
return 0
else
msg_error "Failed to install MariaDB packages (both official repo and distribution)"
install_packages_with_retry "mariadb-server" "mariadb-client" || {
msg_error "Failed to install MariaDB packages (both upstream and distro)"
return 1
fi
}
fi
_setup_mariadb_runtime_dir
cache_installed_version "mariadb" "$MARIADB_VERSION"
msg_ok "Setup MariaDB $MARIADB_VERSION"
}
# ------------------------------------------------------------------------------
# Helper function: Configure MariaDB runtime directory persistence
# ------------------------------------------------------------------------------
_setup_mariadb_runtime_dir() {
# Configure tmpfiles.d to ensure /run/mysqld directory is created on boot
# This fixes the issue where MariaDB fails to start after container reboot
msg_info "Configuring MariaDB runtime directory persistence"
# Create tmpfiles.d configuration with error handling
if ! printf '# Ensure /run/mysqld directory exists with correct permissions for MariaDB\nd /run/mysqld 0755 mysql mysql -\n' >/etc/tmpfiles.d/mariadb.conf; then
@@ -3856,6 +3774,11 @@ _setup_mariadb_runtime_dir() {
msg_warn "mysql user not found - directory created with correct permissions but ownership not set"
fi
fi
msg_ok "Configured MariaDB runtime directory persistence"
cache_installed_version "mariadb" "$MARIADB_VERSION"
msg_ok "Setup MariaDB $MARIADB_VERSION"
}
# ------------------------------------------------------------------------------
@@ -4464,19 +4387,11 @@ EOF
return 1
}
# Use different repository based on OS
if [[ "$DISTRO_ID" == "ubuntu" ]]; then
# Ubuntu: Use ondrej/php PPA
msg_info "Adding ondrej/php PPA for Ubuntu"
$STD apt install -y software-properties-common
$STD add-apt-repository -y ppa:ondrej/php
else
# Debian: Use Sury repository
manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || {
msg_error "Failed to setup PHP repository"
return 1
}
fi
manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || {
msg_error "Failed to setup PHP repository"
return 1
}
ensure_apt_working || return 1
$STD apt update