1 Commits

Author SHA1 Message Date
Renovate Bot
28ce4837d7 Update docker.io/redis:7.4-alpine Docker digest to 8860d05 2025-01-06 20:13:19 +00:00
35 changed files with 77 additions and 878 deletions

View File

@@ -1 +0,0 @@
AUDIOBOOKSHELF_DIR=

View File

@@ -1,13 +0,0 @@
services:
audiobookshelf:
image: ghcr.io/advplyr/audiobookshelf:2.32.1
restart: always
ports:
- 13378:80
volumes:
- ${AUDIOBOOKSHELF_DIR}/audiobooks:/audiobooks
- ${AUDIOBOOKSHELF_DIR}/podcasts:/podcasts
- ${AUDIOBOOKSHELF_DIR}/config:/config
- ${AUDIOBOOKSHELF_DIR}/metadata:/metadata
environment:
- TZ=Europe/Amsterdam

View File

@@ -1,41 +0,0 @@
# Copy this file to `.env` and fill in the values that match your setup.
# Docker Compose files under deployment/ read these variables to keep settings in one place.
#
# IMPORTANT:
# 1. This file must be named exactly ".env" (not .env.txt or .env.example)
# 2. It must be in the SAME directory as your docker-compose-*.yaml file
# 3. Do NOT use spaces around the = sign
# 4. Do NOT use quotes around values (unless required by the value itself)
# 5. After editing, restart containers: docker-compose down && docker-compose up -d
#
# SPECIAL CHARACTERS IN VALUES:
# If your password or API key contains special characters like: $ ` " ' \ # ! & * ( ) [ ] { } | ; < > ?
# you may need to:
# - Avoid quotes entirely: GEMINI_API_KEY=AIza$pecial!Key (usually works)
# - OR use single quotes if the value has $: GEMINI_API_KEY='AIza$pecial!Key'
# - OR escape with backslash: GEMINI_API_KEY=AIza\$pecial\!Key
# Most problematic characters: $ (variable expansion), ` (command substitution), " (string delimiter)
#
# TROUBLESHOOTING:
# If API keys don't work, verify:
# - File is named ".env" exactly (check with: ls -la)
# - No spaces: GEMINI_API_KEY=AIza... (not GEMINI_API_KEY = "AIza...")
# - No unescaped special characters (especially $ ` " ')
# - Restart containers after changing this file
# If all else fails, try hardcoding the value directly in docker-compose-*.yaml to isolate the issue
# --- Jellyfin ---
JELLYFIN_USER_ID=
JELLYFIN_TOKEN=
JELLYFIN_URL=https://jellyfin.smittenfeld.nl
# --- Shared backend configuration ---
AUDIOMUSE_POSTGRES_USER=audiomuse
AUDIOMUSE_POSTGRES_PASSWORD=
AUDIOMUSE_POSTGRES_DB=audiomusedb
#
## --- Remote worker integration ---
#WORKER_URL=http://worker.example.com:8029/worker
#WORKER_POSTGRES_HOST=server.example.com
#WORKER_REDIS_URL=redis://server.example.com:6379/0

View File

@@ -1,126 +0,0 @@
services:
# Redis service for RQ (task queue)
audiomuse-ai-redis:
image: redis:8-alpine
container_name: audiomuse-redis
ports:
- "6379:6379" # Expose Redis port to the host
volumes:
- redis-data:/data # Persistent storage for Redis data
networks:
- audiomuse
restart: unless-stopped
# PostgreSQL database service
audiomuse-ai-postgres:
image: postgres:15-alpine
container_name: audiomuse-postgres
env_file:
- .env
environment:
POSTGRES_USER: ${AUDIOMUSE_POSTGRES_USER}
POSTGRES_PASSWORD: ${AUDIOMUSE_POSTGRES_PASSWORD}
POSTGRES_DB: ${AUDIOMUSE_POSTGRES_DB}
ports:
- "5435:5432" # Expose PostgreSQL port to the host
volumes:
- postgres-data:/var/lib/postgresql/data # Persistent storage for PostgreSQL data
networks:
- audiomuse
restart: unless-stopped
# AudioMuse-AI Flask application service
audiomuse-ai-flask:
image: ghcr.io/neptunehub/audiomuse-ai:0.8.9 # Reflects deployment.yaml
container_name: audiomuse-ai-flask-app
ports:
- "8013:8000"
env_file:
- .env
environment:
SERVICE_TYPE: "flask" # Tells the container to run the Flask app
MEDIASERVER_TYPE: "jellyfin" # Specify the media server type
POSTGRES_USER: ${AUDIOMUSE_POSTGRES_USER}
POSTGRES_PASSWORD: ${AUDIOMUSE_POSTGRES_PASSWORD}
POSTGRES_DB: ${AUDIOMUSE_POSTGRES_DB}
POSTGRES_PORT: "5432"
POSTGRES_HOST: "audiomuse-ai-postgres" # Service name of the postgres container
REDIS_URL: "redis://audiomuse-ai-redis:6379/0" # Connects to the 'redis' service
AI_MODEL_PROVIDER: "OPENAI"
OPENAI_API_KEY: "any-random-string" # Dummy key to enable local model usage
OPENAI_SERVER_URL: "http://172.17.0.1:12434/engines/llama.cpp/v1/chat/completions" #This is the API endpoint for local DMR model from within the Docker container.
OPENAI_MODEL_NAME: "ai/qwen3:0.6B-Q4_0"
TEMP_DIR: "/app/temp_audio"
# Use tmpfs to process audio files in memory for better performance. this reduuces disk I/O but might use more RAM.
# Mounted directories are not shared between containers, so each container gets its own tmpfs instance.
# Increase tmpfs size for very large audio files as needed.
# If host RAM is limited, use a Docker volume instead of tmpfs.
# For more info on tmpfs: https://docs.docker.com/engine/storage/tmpfs/
tmpfs:
- /app/temp_audio:rw,size=1000m
depends_on:
- audiomuse-ai-redis
- audiomuse-ai-postgres
restart: unless-stopped
networks:
- audiomuse
models:
- llm # Specify that LLM models are used in this service
# AudioMuse-AI RQ Worker service
audiomuse-ai-worker:
image: ghcr.io/neptunehub/audiomuse-ai:0.8.9 # Reflects deployment.yaml
container_name: audiomuse-ai-worker-instance
env_file:
- .env
environment:
SERVICE_TYPE: "worker" # Tells the container to run the RQ worker
MEDIASERVER_TYPE: "jellyfin" # Specify the media server type
JELLYFIN_USER_ID: "${JELLYFIN_USER_ID}"
JELLYFIN_TOKEN: "${JELLYFIN_TOKEN}"
JELLYFIN_URL: "${JELLYFIN_URL}"
# DATABASE_URL is now constructed by config.py from the following:
POSTGRES_USER: ${AUDIOMUSE_POSTGRES_USER}
POSTGRES_PASSWORD: ${AUDIOMUSE_POSTGRES_PASSWORD}
POSTGRES_DB: ${AUDIOMUSE_POSTGRES_DB}
POSTGRES_PORT: "5432"
POSTGRES_HOST: "audiomuse-ai-postgres" # Service name of the postgres container
REDIS_URL: "redis://audiomuse-ai-redis:6379/0" # Connects to the 'redis' service
AI_MODEL_PROVIDER: "OPENAI"
OPENAI_API_KEY: "any-random-string" # Dummy key to enable local model usage
OPENAI_SERVER_URL: "http://172.17.0.1:12434/engines/llama.cpp/v1/chat/completions" #This is the API endpoint for local DMR model from within the Docker container.
OPENAI_MODEL_NAME: "ai/qwen3:0.6B-Q4_0"
TEMP_DIR: "/app/temp_audio"
# Use tmpfs to process audio files in memory for better performance. this reduuces disk I/O but might use more RAM.
# Mounted directories are not shared between containers, so each container gets its own tmpfs instance.
# Increase tmpfs size for very large audio files as needed.
# If host RAM is limited, use a Docker volume instead of tmpfs.
# For more info on tmpfs: https://docs.docker.com/engine/storage/tmpfs/
tmpfs:
- /app/temp_audio:rw,size=1000m
depends_on:
- audiomuse-ai-redis
- audiomuse-ai-postgres
restart: unless-stopped
networks:
- audiomuse
models:
- llm # Specify that LLM models are used in this service
# Using Docker Model Runner (DMR)
# - Make sure your Docker Engine version supports the AI features and that the docker-model-plugin is installed.
# - Follow Docker's setup guide: https://docs.docker.com/ai/model-runner/get-started/#docker-engine
# - Once DMR is configured, you can download and run AI models locally just like Docker images — no code changes to this compose file are required.
# - For model integration with docker-compose, see: https://docs.docker.com/ai/compose/models-and-compose/
models:
llm:
model: ai/qwen3:0.6B-Q4_0 # Lightweight local model for testing. Change as needed; if changed, ensure it matches OPENAI_MODEL_NAME.
# Define volumes for persistent data and temporary files
volumes:
redis-data:
postgres-data:
networks:
audiomuse:

View File

@@ -1,17 +1,8 @@
include:
- audiobookshelf/docker-compose.yml
- audiomuse/docker-compose.yml
- gitea/docker-compose.yml
- immich/docker-compose.yml
- jellyfin/docker-compose.yml
- monitoring/docker-compose.yml
- nextcloud/docker-compose.yml
- paperless/docker-compose.yml
- pihole/docker-compose.yml
- pingvin/docker-compose.yml
- pocketid/docker-compose.yml
- spliit/docker-compose.yml
- shlink/docker-compose.yml
- synapse/docker-compose.yml
# - updater/docker-compose.yml
- vaultwarden/docker-compose.yml
- vaultwarden/docker-compose.yml

View File

@@ -1,7 +1,7 @@
services:
gitea:
container_name: gitea
image: gitea/gitea:1.25.2-rootless
image: gitea/gitea:1.22.6-rootless
restart: always
volumes:
- ${GITEA_DATA_LOCATION}:/var/lib/gitea
@@ -14,8 +14,7 @@ services:
- "3000:3000"
- "2222:2222"
gitea-runner:
image: gitea/act_runner:0.3.0
restart: always
image: gitea/act_runner:0.2.11
environment:
CONFIG_FILE: /config.yaml
GITEA_INSTANCE_URL: "${GITEA_INSTANCE_URL}"
@@ -24,4 +23,4 @@ services:
volumes:
- ./runner-config.yaml:/config.yaml
- ${GITEA_RUNNER_DATA_LOCATION}:/data
- /var/run/docker.sock:/var/run/docker.sock
- /var/run/docker.sock:/var/run/docker.sock

View File

@@ -1,15 +1,9 @@
UPLOAD_LOCATION=
IMMICH_VERSION=
IMMICH_DB_LOCATION=
IMMICH_DB_HOSTNAME=
IMMICH_POSTGRES_DB=
IMMICH_POSTGRES_USER=
IMMICH_POSTGRES_PASSWORD=
DB_HOSTNAME=
DB_USERNAME=
DB_PASSWORD=
DB_DATABASE_NAME=
REDIS_HOSTNAME=
IMMICH_EXTERNAL_MEDIA_LOCATION=
# Default timezone for images without a timezone set
TZ=
REDIS_HOSTNAME=

View File

@@ -1,59 +1,77 @@
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:v2.5.6
image: ghcr.io/immich-app/immich-server:v1.123.0
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
- ${UPLOAD_LOCATION}:/data
- ${IMMICH_EXTERNAL_MEDIA_LOCATION}:/external:ro
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
environment:
DB_HOSTNAME: ${IMMICH_DB_HOSTNAME}
DB_USERNAME: ${IMMICH_POSTGRES_USER}
DB_PASSWORD: ${IMMICH_POSTGRES_PASSWORD}
DB_DATABASE_NAME: ${IMMICH_POSTGRES_DB}
ports:
- '2283:2283'
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
image: ghcr.io/immich-app/immich-machine-learning:v2.5.6
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:v1.123.0
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:9@sha256:fb8d272e529ea567b9bf1302245796f21a2672b8368ca3fcb938ac334e613c8f
image: docker.io/redis:7.4-alpine@sha256:8860d052306f47904110630a97b1edd8439e24ef7b7ed4bb315ac12f1c3a58c3
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.3.0-pgvectors0.2.0
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
environment:
POSTGRES_USER: ${IMMICH_POSTGRES_USER}
POSTGRES_PASSWORD: ${IMMICH_POSTGRES_PASSWORD}
POSTGRES_DB: ${IMMICH_POSTGRES_DB}
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
DB_STORAGE_TYPE: 'HDD'
ports:
- 5433:5432
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${IMMICH_DB_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
restart: always
volumes:

View File

@@ -1,9 +1,9 @@
services:
jellyfin:
image: jellyfin/jellyfin:10.11.6
image: jellyfin/jellyfin:10.10.3
user: ${JELLYFIN_USER_UID}:${JELLYFIN_USER_GID}
network_mode: 'host'
restart: always
restart: 'unless-stopped'
env_file:
- .env
environment:

View File

@@ -1,27 +0,0 @@
GRAFANA_ADMIN_USER=
GRAFANA_ADMIN_PASSWORD=
GRAFANA_DOMAIN=
GRAFANA_ROOT_URL=
GRAFANA_DISABLE_LOGIN_FORM=false
GRAFANA_SMTP_ENABLED=true
GRAFANA_SMTP_HOST=
GRAFANA_SMTP_USER=
GRAFANA_SMTP_PASSWORD=
GRAFANA_SMTP_FROM=
GRAFANA_DATA_DIR=
PROMETHEUS_CONFIG_PATH=
PROMETHEUS_DATA_DIR=
BLACKBOX_CONFIG_PATH=
LOKI_CONFIG_PATH=
LOKI_DATA_DIR=
ALLOY_CONFIG_PATH=
GMF_MATRIX_HOMESERVER=
GMF_MATRIX_USER=
GMF_MATRIX_TOKEN=

View File

@@ -1,115 +0,0 @@
services:
# Prometheus - Metrics Collection
prometheus:
image: prom/prometheus:v3.9.1
restart: unless-stopped
ports:
- "9091:9090"
volumes:
- ${PROMETHEUS_CONFIG_PATH}:/etc/prometheus/prometheus.yml:ro
- ${PROMETHEUS_DATA_DIR}:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
- '--web.enable-admin-api'
# Grafana - Visualization Dashboard
grafana:
image: grafana/grafana:12.4.0
restart: unless-stopped
ports:
- "3002:3000"
volumes:
- ${GRAFANA_DATA_DIR}:/var/lib/grafana
environment:
- GF_SECURITY_ADMIN_USER=${GRAFANA_ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD}
- GF_USERS_ALLOW_SIGN_UP=false
- GF_SERVER_DOMAIN=${GRAFANA_DOMAIN}
- GF_SERVER_ROOT_URL=${GRAFANA_ROOT_URL}
- GF_AUTH_DISABLE_LOGIN_FORM=${GRAFANA_DISABLE_LOGIN_FORM}
- GF_SMTP_ENABLED=${GRAFANA_SMTP_ENABLED}
- GF_SMTP_HOST=${GRAFANA_SMTP_HOST}
- GF_SMTP_USER=${GRAFANA_SMTP_USER}
- GF_SMTP_PASSWORD=${GRAFANA_SMTP_PASSWORD}
- GF_SMTP_FROM_ADDRESS=${GRAFANA_SMTP_FROM}
depends_on:
- prometheus
loki:
image: grafana/loki:3.6.7
restart: unless-stopped
ports:
- "3100:3100"
volumes:
- ${LOKI_CONFIG_PATH}:/etc/loki/local-config.yaml
- ${LOKI_DATA_DIR}:/loki
command:
- '-config.file=/etc/loki/local-config.yaml'
- '-target=all'
alloy:
image: grafana/alloy:v1.13.2
restart: unless-stopped
ports:
- "12345:12345"
volumes:
- ${ALLOY_CONFIG_PATH}:/etc/alloy/config.alloy:ro
- /var/log:/var/log:ro
# Node Exporter - Host System Metrics
node_exporter:
image: prom/node-exporter:v1.10.2
restart: unless-stopped
ports:
- "9100:9100"
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
# cAdvisor - Container Metrics
cadvisor:
image: gcr.io/cadvisor/cadvisor:v0.55.1
restart: unless-stopped
privileged: true
ports:
- "8081:8080"
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
devices:
- /dev/kmsg
command:
- '--housekeeping_interval=30s'
- '--max_housekeeping_interval=35s'
- '--event_storage_event_limit=default=0'
- '--event_storage_age_limit=default=0'
- '--disable_metrics=disk,diskIO,tcp,udp,percpu,sched,process,hugetlb,referenced_memory'
- '--docker_only=true'
# Blackbox Exporter - Endpoint Monitoring
blackbox_exporter:
image: prom/blackbox-exporter:v0.28.0
restart: unless-stopped
ports:
- "9115:9115"
volumes:
- ${BLACKBOX_CONFIG_PATH}:/etc/blackbox_exporter/config.yml
grafana-matrix-forwarder:
build: ./grafana-matrix-forwarder
restart: unless-stopped
env_file:
- .env

View File

@@ -1,9 +0,0 @@
FROM alpine
# Create main app folder to run from
WORKDIR /app
# Copy compiled binary to release image
COPY grafana-matrix-forwarder /app/grafana-matrix-forwarder
ENTRYPOINT ["/app/grafana-matrix-forwarder"]

View File

@@ -1,2 +0,0 @@
NEXTCLOUD_AIO_DIR=
NEXTCLOUD_DATA_DIR=

View File

@@ -1,40 +0,0 @@
services:
nextcloud-aio-mastercontainer:
image: ghcr.io/nextcloud-releases/all-in-one:latest # This is the container image used. You can switch to ghcr.io/nextcloud-releases/all-in-one:beta if you want to help testing new releases. See https://github.com/nextcloud/all-in-one#how-to-switch-the-channel
init: true # This setting makes sure that signals from main process inside the container are correctly forwarded to children. See https://docs.docker.com/reference/compose-file/services/#init
restart: always # This makes sure that the container starts always together with the host OS. See https://docs.docker.com/reference/compose-file/services/#restart
container_name: nextcloud-aio-mastercontainer # This line is not allowed to be changed as otherwise AIO will not work correctly
volumes:
- nextcloud_aio_mastercontainer:/mnt/docker-aio-config # This line is not allowed to be changed as otherwise the built-in backup solution will not work
- /var/run/docker.sock:/var/run/docker.sock:ro # May be changed on macOS, Windows or docker rootless. See the applicable documentation. If adjusting, don't forget to also set 'WATCHTOWER_DOCKER_SOCKET_PATH'!
network_mode: bridge
ports:
- 8014:8080 # This is the AIO interface, served via https and self-signed certificate. See https://github.com/nextcloud/all-in-one#explanation-of-used-ports
environment: # Is needed when using any of the options below
# AIO_DISABLE_BACKUP_SECTION: false # Setting this to true allows to hide the backup section in the AIO interface. See https://github.com/nextcloud/all-in-one#how-to-disable-the-backup-section
APACHE_PORT: 11000 # Is needed when running behind a web server or reverse proxy (like Apache, Nginx, Caddy, Cloudflare Tunnel and else). See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
# APACHE_IP_BINDING: 127.0.0.1 # Should be set when running behind a web server or reverse proxy (like Apache, Nginx, Caddy, Cloudflare Tunnel and else) that is running on the same host. See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
# APACHE_ADDITIONAL_NETWORK: frontend_net # (Optional) Connect the apache container to an additional docker network. Needed when behind a web server or reverse proxy (like Apache, Nginx, Caddy, Cloudflare Tunnel and else) running in a different docker network on same server. See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
# BORG_RETENTION_POLICY: --keep-within=7d --keep-weekly=4 --keep-monthly=6 # Allows to adjust borgs retention policy. See https://github.com/nextcloud/all-in-one#how-to-adjust-borgs-retention-policy
# COLLABORA_SECCOMP_DISABLED: false # Setting this to true allows to disable Collabora's Seccomp feature. See https://github.com/nextcloud/all-in-one#how-to-disable-collaboras-seccomp-feature
# DOCKER_API_VERSION: 1.44 # You can adjust the internally used docker api version with this variable. ⚠️⚠️⚠️ Warning: please note that only the default api version (unset this variable) is supported and tested by the maintainers of Nextcloud AIO. So use this on your own risk and things might break without warning. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-internally-used-docker-api-version
# FULLTEXTSEARCH_JAVA_OPTIONS: "-Xms1024M -Xmx1024M" # Allows to adjust the fulltextsearch java options. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-fulltextsearch-java-options
NEXTCLOUD_DATADIR: ${NEXTCLOUD_DATA_DIR} # Allows to set the host directory for Nextcloud's datadir. ⚠️⚠️⚠️ Warning: do not set or adjust this value after the initial Nextcloud installation is done! See https://github.com/nextcloud/all-in-one#how-to-change-the-default-location-of-nextclouds-datadir
# NEXTCLOUD_MOUNT: /mnt/ # Allows the Nextcloud container to access the chosen directory on the host. See https://github.com/nextcloud/all-in-one#how-to-allow-the-nextcloud-container-to-access-directories-on-the-host
# NEXTCLOUD_UPLOAD_LIMIT: 16G # Can be adjusted if you need more. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-upload-limit-for-nextcloud
# NEXTCLOUD_MAX_TIME: 3600 # Can be adjusted if you need more. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-max-execution-time-for-nextcloud
# NEXTCLOUD_MEMORY_LIMIT: 512M # Can be adjusted if you need more. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-php-memory-limit-for-nextcloud
# NEXTCLOUD_TRUSTED_CACERTS_DIR: /path/to/my/cacerts # CA certificates in this directory will be trusted by the OS of the nextcloud container (Useful e.g. for LDAPS) See https://github.com/nextcloud/all-in-one#how-to-trust-user-defined-certification-authorities-ca
# NEXTCLOUD_STARTUP_APPS: deck twofactor_totp tasks calendar contacts notes # Allows to modify the Nextcloud apps that are installed on starting AIO the first time. See https://github.com/nextcloud/all-in-one#how-to-change-the-nextcloud-apps-that-are-installed-on-the-first-startup
# NEXTCLOUD_ADDITIONAL_APKS: imagemagick # This allows to add additional packages to the Nextcloud container permanently. Default is imagemagick but can be overwritten by modifying this value. See https://github.com/nextcloud/all-in-one#how-to-add-os-packages-permanently-to-the-nextcloud-container
# NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS: imagick # This allows to add additional php extensions to the Nextcloud container permanently. Default is imagick but can be overwritten by modifying this value. See https://github.com/nextcloud/all-in-one#how-to-add-php-extensions-permanently-to-the-nextcloud-container
# NEXTCLOUD_ENABLE_DRI_DEVICE: true # This allows to enable the /dev/dri device for containers that profit from it. ⚠️⚠️⚠️ Warning: this only works if the '/dev/dri' device is present on the host! If it should not exist on your host, don't set this to true as otherwise the Nextcloud container will fail to start! See https://github.com/nextcloud/all-in-one#how-to-enable-hardware-acceleration-for-nextcloud
# NEXTCLOUD_ENABLE_NVIDIA_GPU: true # This allows to enable the NVIDIA runtime and GPU access for containers that profit from it. ⚠️⚠️⚠️ Warning: this only works if an NVIDIA gpu is installed on the server. See https://github.com/nextcloud/all-in-one#how-to-enable-hardware-acceleration-for-nextcloud.
# NEXTCLOUD_KEEP_DISABLED_APPS: false # Setting this to true will keep Nextcloud apps that are disabled in the AIO interface and not uninstall them if they should be installed. See https://github.com/nextcloud/all-in-one#how-to-keep-disabled-apps
# SKIP_DOMAIN_VALIDATION: false # This should only be set to true if things are correctly configured. See https://github.com/nextcloud/all-in-one#how-to-skip-the-domain-validation
# TALK_PORT: 3478 # This allows to adjust the port that the talk container is using which is exposed on the host. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-talk-port
volumes: # If you want to store the data on a different drive, see https://github.com/nextcloud/all-in-one#how-to-store-the-filesinstallation-on-a-separate-drive
nextcloud_aio_mastercontainer:
name: nextcloud_aio_mastercontainer # This line is not allowed to be changed as otherwise the built-in backup solution will not work

View File

@@ -9,9 +9,9 @@ PAPERLESS_SECRET_KEY=
PAPERLESS_URL=
PAPERLESS_POSTGRES_DB=
PAPERLESS_POSTGRES_USER=
PAPERLESS_POSTGRES_PASSWORD=
POSTGRES_DB=
POSTGRES_USER=
POSTGRES_PASSWORD=
#optional
PAPERLESS_TIME_ZONE=

View File

@@ -1,25 +1,23 @@
services:
broker:
image: docker.io/library/redis:7
restart: always
restart: unless-stopped
volumes:
- ${PAPERLESS_REDIS_DATA_DIR}:/data
db:
image: docker.io/library/postgres:17
restart: always
restart: unless-stopped
volumes:
- ${PAPERLESS_POSTGRES_DATA_DIR}:/var/lib/postgresql/data
environment:
POSTGRES_DB: ${PAPERLESS_POSTGRES_DB}
POSTGRES_USER: ${PAPERLESS_POSTGRES_USER}
POSTGRES_PASSWORD: ${PAPERLESS_POSTGRES_PASSWORD}
ports:
- 5434:5432
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
paperless:
image: ghcr.io/paperless-ngx/paperless-ngx:2.20.10
restart: always
image: ghcr.io/paperless-ngx/paperless-ngx:2.13.5
restart: unless-stopped
depends_on:
- db
- broker
@@ -36,9 +34,9 @@ services:
environment:
PAPERLESS_REDIS: redis://broker:6379
PAPERLESS_DBHOST: db
PAPERLESS_DBNAME: ${PAPERLESS_POSTGRES_DB}
PAPERLESS_DBUSER: ${PAPERLESS_POSTGRES_USER}
PAPERLESS_DBPASS: ${PAPERLESS_POSTGRES_PASSWORD}
PAPERLESS_DBNAME: ${POSTGRES_DB}
PAPERLESS_DBUSER: ${POSTGRES_USER}
PAPERLESS_DBPASS: ${POSTGRES_PASSWORD}
PAPERLESS_TIKA_ENABLED: 1
PAPERLESS_TIKA_GOTENBERG_ENDPOINT: http://gotenberg:3000
PAPERLESS_TIKA_ENDPOINT: http://tika:9998
@@ -49,8 +47,8 @@ services:
PAPERLESS_REDIRECT_LOGIN_TO_SSO: true
gotenberg:
image: docker.io/gotenberg/gotenberg:8.27.0
restart: always
image: docker.io/gotenberg/gotenberg:8.15.2
restart: unless-stopped
# The gotenberg chromium route is used to convert .eml files. We do not
# want to allow external content like tracking pixels or even javascript.
@@ -60,5 +58,5 @@ services:
- "--chromium-allow-list=file:///tmp/.*"
tika:
image: docker.io/apache/tika:3.2.3.0
restart: always
image: docker.io/apache/tika:3.0.0.0
restart: unless-stopped

View File

@@ -1,2 +0,0 @@
PIHOLE_DATA_DIR=
PIHOLE_PASSWORD=

View File

@@ -1,37 +0,0 @@
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
services:
pihole:
container_name: pihole
image: pihole/pihole:2026.02.0
ports:
# DNS Ports
- "53:53/tcp"
- "53:53/udp"
# Default HTTP Port
- "8080:80/tcp"
# Default HTTPs Port. FTL will generate a self-signed certificate
# - "443:443/tcp"
# Uncomment the below if using Pi-hole as your DHCP Server
# - "67:67/udp"
# Uncomment the line below if you are using Pi-hole as your NTP server
#- "123:123/udp"
environment:
# Set the appropriate timezone for your location from
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones, e.g:
TZ: 'Europe/Amsterdam'
# Set a password to access the web interface. Not setting one will result in a random password being assigned
FTLCONF_webserver_api_password: ${PIHOLE_PASSWORD}
# If using Docker's default `bridge` network setting the dns listening mode should be set to 'all'
FTLCONF_dns_listeningMode: 'all'
volumes:
# For persisting Pi-hole's databases and common configuration file
- ${PIHOLE_DATA_DIR}:/etc/pihole
cap_add:
# See https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
# Required if you are using Pi-hole as your DHCP server, else not needed
# - NET_ADMIN
# Required if you are using Pi-hole as your NTP client to be able to set the host's system time
# - SYS_TIME
# Optional, if Pi-hole should get some more processing time
- SYS_NICE
restart: always

View File

@@ -1,7 +1,7 @@
services:
pingvin:
image: stonith404/pingvin-share:v1.13.0
restart: always
image: stonith404/pingvin-share:v1.8.1
restart: unless-stopped
ports:
- 3042:3000
environment:

View File

@@ -1,7 +1,5 @@
APP_URL=
PUBLIC_APP_URL=
TRUST_PROXY=
MAXMIND_LICENSE_KEY=
PUID=
PGID=
ENCRYPTION_KEY=
POCKETID_DATA_DIR=

View File

@@ -1,16 +1,16 @@
services:
pocket-id:
image: ghcr.io/pocket-id/pocket-id:v2.3.0
restart: always
image: stonith404/pocket-id:v0.23.0
restart: unless-stopped
env_file: .env
ports:
- "3043:1411"
- 3043:80
volumes:
- ${POCKETID_DATA_DIR}:/app/data
- "./data:/app/backend/data"
# Optional healthcheck
healthcheck:
test: "curl -f http://localhost:1411/healthz"
test: "curl -f http://localhost/health"
interval: 1m30s
timeout: 5s
retries: 2
start_period: 10s
start_period: 10s

View File

@@ -1,32 +1,3 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"packageRules": [
{
"matchUpdateTypes": ["digest"],
"enabled": false
},
{
"groupName": "Immich",
"matchPackageNames": "/^ghcr.io/immich-app/immich/"
},
{
"groupName": "Paperless-ngx",
"matchPackageNames": [
"/paperless-ngx/",
"/tika/",
"/gotenberg/"
]
},
{
"groupName": "Matrix",
"matchPackageNames": [
"matrixdotorg/synapse",
"ghcr.io/element-hq/matrix-authentication-service",
"dock.mau.dev/mautrix/whatsapp",
"dock.mau.dev/mautrix/signal",
"ghcr.io/element-hq/lk-jwt-service",
"livekit/livekit-server"
]
}
]
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
}

View File

@@ -1,5 +0,0 @@
SHLINK_DOMAIN=
SHLINK_IS_HTTPS_ENABLED=
SHLINK_GEOIP_LICENSE_KEY=
SLINK_API_KEY=

View File

@@ -1,31 +0,0 @@
services:
shlink:
image: shlinkio/shlink:5.0
container_name: shlink
restart: always
ports:
- "8085:8080"
environment:
DEFAULT_DOMAIN: ${SHLINK_DOMAIN}
IS_HTTPS_ENABLED: ${SHLINK_IS_HTTPS_ENABLED}
GEOLITE_LICENSE_KEY: ${SHLINK_GEOIP_LICENSE_KEY}
INITIAL_API_KEY: ${SHLINK_API_KEY}
DB_DRIVER: postgres
DB_HOST: shlink_db
DB_NAME: ${SHLINK_POSTGRES_DB}
DB_USER: ${SHLINK_POSTGRES_USER}
DB_PASSWORD: ${SHLINK_POSTGRES_PASSWORD}
depends_on:
- shlink_db
shlink_db:
image: postgres:17
container_name: shlink_db
restart: always
volumes:
- ${SHLINK_POSTGRES_DIR}:/var/lib/postgresql/data
environment:
POSTGRES_DB: ${SHLINK_POSTGRES_DB}
POSTGRES_USER: ${SHLINK_POSTGRES_USER}
POSTGRES_PASSWORD: ${SHLINK_POSTGRES_PASSWORD}
ports:
- 5436:5432

View File

@@ -1,14 +0,0 @@
TZ=Europe/Amsterdam
# db
SPLIIT_POSTGRES_DIR=
SPLIIT_POSTGRES_PASSWORD=
SPLIIT_POSTGRES_USER=spliit
SPLIIT_POSTGRES_DB=spliit
# Image upload (for receipts)
NEXT_PUBLIC_ENABLE_EXPENSE_DOCUMENTS=true
S3_UPLOAD_KEY=
S3_UPLOAD_SECRET=
S3_UPLOAD_BUCKET=
S3_UPLOAD_REGION=

View File

@@ -1,29 +0,0 @@
services:
spliit:
#image: ghcr.io/spliit-app/spliit:1.19.1
image: petersmit27/spliit:latest
restart: always
ports:
- 3001:3000
depends_on:
- spliit_db
env_file:
- .env
environment:
TZ: ${TZ}
POSTGRES_HOST: spliit_db
POSTGRES_DB: ${SPLIIT_POSTGRES_DB}
POSTGRES_USER: ${SPLIIT_POSTGRES_USER}
POSTGRES_PASSWORD: ${SPLIIT_POSTGRES_PASSWORD}
spliit_db:
image: postgres:17
restart: always
volumes:
- ${SPLIIT_POSTGRES_DIR}:/var/lib/postgresql/data
ports:
- 5437:5432
environment:
TZ: ${TZ}
POSTGRES_DB: ${SPLIIT_POSTGRES_DB}
POSTGRES_USER: ${SPLIIT_POSTGRES_USER}
POSTGRES_PASSWORD: ${SPLIIT_POSTGRES_PASSWORD}

View File

@@ -1,31 +0,0 @@
SYNAPSE_POSTGRES_DATA_DIR=
SYNAPSE_CONFIG_DIR=
SYNAPSE_POSTGRES_DB=
SYNAPSE_POSTGRES_USER=
SYNAPSE_POSTGRES_PASSWORD=
MAS_CONFIG_DIR=
MAS_POSTGRES_DATA_DIR=
MAS_POSTGRES_DB=
MAS_POSTGRES_USER=
MAS_POSTGRES_PASSWORD=
MAUTRIX_WHATSAPP_DATA_DIR=
MAUTRIX_WHATSAPP_POSTGRES_DATA_DIR=
MAUTRIX_WHATSAPP_POSTGRES_DB=
MAUTRIX_WHATSAPP_POSTGRES_USER=
MAUTRIX_WHATSAPP_POSTGRES_PASSWORD=
MAUTRIX_SIGNAL_DATA_DIR=
MAUTRIX_SIGNAL_POSTGRES_DATA_DIR=
MAUTRIX_SIGNAL_POSTGRES_DB=
MAUTRIX_SIGNAL_POSTGRES_USER=
MAUTRIX_SIGNAL_POSTGRES_PASSWORD=
MATRIX_DOMAIN=
LIVEKIT_DOMAIN=
LIVEKIT_SECRET_KEY=
LIVEKIT_CONFIG_DIR=

View File

@@ -1,116 +0,0 @@
services:
element-call-auth-service:
image: ghcr.io/element-hq/lk-jwt-service:0.4.1
container_name: element-call-jwt
hostname: auth-server
environment:
- LK_JWT_PORT=8080
- LIVEKIT_URL=https://${LIVEKIT_DOMAIN}/livekit/sfu
- LIVEKIT_KEY=devkey
- LIVEKIT_SECRET=${LIVEKIT_SECRET_KEY}
- LIVEKIT_FULL_ACCESS_HOMESERVERS=${MATRIX_DOMAIN}
restart: always
ports:
- 8071:8080
element-call-livekit:
image: livekit/livekit-server:v1.9.11
command: --config /etc/livekit.yaml
ports:
- "7880:7880/tcp"
- "7881:7881/tcp"
- "7882:7882/tcp"
- "50100-50200:50100-50200/udp"
restart: always
volumes:
- ${LIVEKIT_CONFIG_DIR}/config.yaml:/etc/livekit.yaml:ro
mautrix-signal:
container_name: mautrix-signal
image: dock.mau.dev/mautrix/signal:v0.2602.1
restart: always
volumes:
- ${MAUTRIX_SIGNAL_DATA_DIR}:/data
mautrix-signal_db:
image: docker.io/library/postgres:17
restart: always
volumes:
- ${MAUTRIX_SIGNAL_POSTGRES_DATA_DIR}:/var/lib/postgresql/data
environment:
POSTGRES_DB: ${MAUTRIX_SIGNAL_POSTGRES_DB}
POSTGRES_USER: ${MAUTRIX_SIGNAL_POSTGRES_USER}
POSTGRES_PASSWORD: ${MAUTRIX_SIGNAL_POSTGRES_PASSWORD}
ports:
- 5439:5432
mautrix-whatsapp:
container_name: mautrix-whatsapp
image: dock.mau.dev/mautrix/whatsapp:v0.2602.0
#image: petersmit27/mautrix-whatsapp:latest
restart: always
volumes:
- ${MAUTRIX_WHATSAPP_DATA_DIR}:/data
depends_on:
- mautrix-whatsapp_db
mautrix-whatsapp_db:
image: docker.io/library/postgres:17
restart: always
volumes:
- ${MAUTRIX_WHATSAPP_POSTGRES_DATA_DIR}:/var/lib/postgresql/data
environment:
POSTGRES_DB: ${MAUTRIX_WHATSAPP_POSTGRES_DB}
POSTGRES_USER: ${MAUTRIX_WHATSAPP_POSTGRES_USER}
POSTGRES_PASSWORD: ${MAUTRIX_WHATSAPP_POSTGRES_PASSWORD}
ports:
- 5440:5432
mas:
image: ghcr.io/element-hq/matrix-authentication-service:1.12.0
restart: always
working_dir: /config
volumes:
- ${MAS_CONFIG_DIR}:/config
environment:
MAS_CONFIG: /config/config.yaml
ports:
- "8090:8090"
depends_on:
- mas_db
mas_db:
image: docker.io/library/postgres:17
restart: always
volumes:
- ${MAS_POSTGRES_DATA_DIR}:/var/lib/postgresql/data
environment:
POSTGRES_DB: ${MAS_POSTGRES_DB}
POSTGRES_USER: ${MAS_POSTGRES_USER}
POSTGRES_PASSWORD: ${MAS_POSTGRES_PASSWORD}
ports:
- 5441:5432
synapse_db:
image: docker.io/library/postgres:17
restart: always
volumes:
- ${SYNAPSE_POSTGRES_DATA_DIR}:/var/lib/postgresql/data
environment:
POSTGRES_DB: ${SYNAPSE_POSTGRES_DB}
POSTGRES_USER: ${SYNAPSE_POSTGRES_USER}
POSTGRES_PASSWORD: ${SYNAPSE_POSTGRES_PASSWORD}
POSTGRES_INITDB_ARGS: '--encoding=UTF-8 --locale=C'
ports:
- 5442:5432
synapse:
container_name: synapse
image: matrixdotorg/synapse:v1.148.0
restart: always
volumes:
- ${SYNAPSE_CONFIG_DIR}:/data
ports:
- "8008:8008"
depends_on:
- synapse_db
- mas
- mautrix-whatsapp
element-admin:
image: oci.element.io/element-admin:0.1.10
restart: unless-stopped
ports:
- "8079:8080"
environment:
- SERVER_NAME=${MATRIX_DOMAIN}

1
update Executable file
View File

@@ -0,0 +1 @@
docker compose up -d

View File

@@ -1,5 +0,0 @@
GITEA_WEBHOOK_SECRET=
DOCKER_CONFIG_DIR=
DOCKER_CONFIG_DIR_UID=
DOCKER_CONFIG_DIR_GID=
DOCKER_GID=

View File

@@ -1,33 +0,0 @@
# Use the official PHP image with Apache
FROM php:8.5-apache
# Add Docker's official GPG key:
RUN apt-get update &&\
apt-get install ca-certificates curl &&\
install -m 0755 -d /etc/apt/keyrings &&\
curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc &&\
chmod a+r /etc/apt/keyrings/docker.asc
# Add the repository to Apt sources:
RUN echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
tee /etc/apt/sources.list.d/docker.list > /dev/null
# Install git and docker-compose
RUN apt-get update && \
apt-get install -y git docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-model-plugin && \
rm -rf /var/lib/apt/lists/*
# Create .docker directory and set permissions
RUN mkdir -p /var/www/.docker && \
chown www-data:www-data /var/www/.docker
# Copy the PHP file to the Apache document root
COPY webhook.php /var/www/html/index.php
# Expose port 80
EXPOSE 80
# Start Apache server
CMD ["apache2-foreground"]

View File

@@ -1,13 +0,0 @@
services:
updater:
build: .
profiles: [noautoupdate]
user: ${DOCKER_CONFIG_DIR_UID}:${DOCKER_GID}
restart: always
ports:
- "6969:80"
env_file:
- .env
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ${DOCKER_CONFIG_DIR}:${DOCKER_CONFIG_DIR}

View File

@@ -1,80 +0,0 @@
<?php
error_log("Received request from " . $_SERVER['REMOTE_ADDR']);
$secret_key = getenv('GITEA_WEBHOOK_SECRET');
if (empty($secret_key)) {
error_log('FAILED - secret key missing from environment');
exit();
}
$docker_config_dir = getenv('DOCKER_CONFIG_DIR');
if (empty($docker_config_dir)) {
error_log('FAILED - docker config dir missing from environment');
exit();
}
// check for POST request
if ($_SERVER['REQUEST_METHOD'] != 'POST') {
error_log('FAILED - not POST - ' . $_SERVER['REQUEST_METHOD']);
exit();
}
// get content type
$content_type = isset($_SERVER['CONTENT_TYPE']) ? strtolower(trim($_SERVER['CONTENT_TYPE'])) : '';
if ($content_type != 'application/json') {
error_log('FAILED - not application/json - ' . $content_type);
exit();
}
// get payload
$payload = trim(file_get_contents("php://input"));
if (empty($payload)) {
error_log('FAILED - no payload');
exit();
}
// get header signature
$header_signature = isset($_SERVER['HTTP_X_GITEA_SIGNATURE']) ? $_SERVER['HTTP_X_GITEA_SIGNATURE'] : '';
if (empty($header_signature)) {
error_log('FAILED - header signature missing');
exit();
}
// calculate payload signature
$payload_signature = hash_hmac('sha256', $payload, $secret_key);
// check payload signature against header signature
if ($header_signature !== $payload_signature) {
error_log('FAILED - payload signature');
exit();
}
// convert json to array
$decoded = json_decode($payload, true);
// check for json decode errors
if (json_last_error() !== JSON_ERROR_NONE) {
error_log('FAILED - json decode - ' . json_last_error());
exit();
}
error_log('SUCCESS');
chdir($docker_config_dir);
error_log('Pulling latest changes');
exec('git pull');
error_log('Building and starting containers');
exec('docker compose up -d --build --quiet-pull');
// send return code and text message
http_response_code(200);
echo 'lekker pik';

View File

@@ -1,14 +1,13 @@
services:
vaultwarden:
container_name: vaultwarden
image: vaultwarden/server:1.35.4
image: vaultwarden/server:1.32.7
restart: always
volumes:
- ${VAULTWARDEN_DATA_LOCATION}:/data/
environment:
- ADMIN_TOKEN=${VAULTWARDEN_ADMIN_TOKEN}
- EXPERIMENTAL_CLIENT_FEATURE_FLAGS=extension-refresh,inline-menu-positioning-improvements,ssh-key-vault-item,ssh-agent
env_file:
- .env
ports:
- "8082:80"
- "8080:80"