6 Commits

Author SHA1 Message Date
Renovate Bot
f3ed4fc925 Update ghcr.io/immich-app/postgres Docker tag to v16 2025-12-16 18:52:02 +00:00
Peter Smit
8168134448 Update audiomuse 2025-12-16 19:46:30 +01:00
Peter Smit
70b8b4bf7c disable spliit for now 2025-12-16 19:45:35 +01:00
Peter Smit
f0631f3e3d Remove central db for now 2025-12-16 19:44:18 +01:00
e827dd1bc9 Update audiomuse/docker-compose.yml 2025-12-14 14:51:38 +00:00
Peter Smit
30303aac3d Add audiomuse 2025-12-12 16:56:49 +01:00
14 changed files with 171 additions and 453 deletions

41
audiomuse/.env.example Normal file
View File

@@ -0,0 +1,41 @@
# Copy this file to `.env` and fill in the values that match your setup.
# Docker Compose files under deployment/ read these variables to keep settings in one place.
#
# IMPORTANT:
# 1. This file must be named exactly ".env" (not .env.txt or .env.example)
# 2. It must be in the SAME directory as your docker-compose-*.yaml file
# 3. Do NOT use spaces around the = sign
# 4. Do NOT use quotes around values (unless required by the value itself)
# 5. After editing, restart containers: docker-compose down && docker-compose up -d
#
# SPECIAL CHARACTERS IN VALUES:
# If your password or API key contains special characters like: $ ` " ' \ # ! & * ( ) [ ] { } | ; < > ?
# you may need to:
# - Avoid quotes entirely: GEMINI_API_KEY=AIza$pecial!Key (usually works)
# - OR use single quotes if the value has $: GEMINI_API_KEY='AIza$pecial!Key'
# - OR escape with backslash: GEMINI_API_KEY=AIza\$pecial\!Key
# Most problematic characters: $ (variable expansion), ` (command substitution), " (string delimiter)
#
# TROUBLESHOOTING:
# If API keys don't work, verify:
# - File is named ".env" exactly (check with: ls -la)
# - No spaces: GEMINI_API_KEY=AIza... (not GEMINI_API_KEY = "AIza...")
# - No unescaped special characters (especially $ ` " ')
# - Restart containers after changing this file
# If all else fails, try hardcoding the value directly in docker-compose-*.yaml to isolate the issue
# --- Jellyfin ---
JELLYFIN_USER_ID=
JELLYFIN_TOKEN=
JELLYFIN_URL=https://jellyfin.smittenfeld.nl
# --- Shared backend configuration ---
AUDIOMUSE_POSTGRES_USER=audiomuse
AUDIOMUSE_POSTGRES_PASSWORD=
AUDIOMUSE_POSTGRES_DB=audiomusedb
#
## --- Remote worker integration ---
#WORKER_URL=http://worker.example.com:8029/worker
#WORKER_POSTGRES_HOST=server.example.com
#WORKER_REDIS_URL=redis://server.example.com:6379/0

View File

@@ -0,0 +1,127 @@
version: '3.8'
services:
# Redis service for RQ (task queue)
audiomuse-ai-redis:
image: redis:7-alpine
container_name: audiomuse-redis
ports:
- "6379:6379" # Expose Redis port to the host
volumes:
- redis-data:/data # Persistent storage for Redis data
networks:
- audiomuse
restart: unless-stopped
# PostgreSQL database service
audiomuse-ai-postgres:
image: postgres:15-alpine
container_name: audiomuse-postgres
env_file:
- .env
environment:
POSTGRES_USER: ${AUDIOMUSE_POSTGRES_USER}
POSTGRES_PASSWORD: ${AUDIOMUSE_POSTGRES_PASSWORD}
POSTGRES_DB: ${AUDIOMUSE_POSTGRES_DB}
# ports:
# - "5432:5432" # Expose PostgreSQL port to the host
volumes:
- postgres-data:/var/lib/postgresql/data # Persistent storage for PostgreSQL data
networks:
- audiomuse
restart: unless-stopped
# AudioMuse-AI Flask application service
audiomuse-ai-flask:
image: ghcr.io/neptunehub/audiomuse-ai:0.8.2 # Reflects deployment.yaml
container_name: audiomuse-ai-flask-app
ports:
- "8013:8000"
env_file:
- .env
environment:
SERVICE_TYPE: "flask" # Tells the container to run the Flask app
MEDIASERVER_TYPE: "jellyfin" # Specify the media server type
POSTGRES_USER: ${AUDIOMUSE_POSTGRES_USER}
POSTGRES_PASSWORD: ${AUDIOMUSE_POSTGRES_PASSWORD}
POSTGRES_DB: ${AUDIOMUSE_POSTGRES_DB}
POSTGRES_PORT: "5432"
POSTGRES_HOST: "audiomuse-ai-postgres" # Service name of the postgres container
REDIS_URL: "redis://audiomuse-ai-redis:6379/0" # Connects to the 'redis' service
AI_MODEL_PROVIDER: "OPENAI"
OPENAI_API_KEY: "any-random-string" # Dummy key to enable local model usage
OPENAI_SERVER_URL: "http://172.17.0.1:12434/engines/llama.cpp/v1/chat/completions" #This is the API endpoint for local DMR model from within the Docker container.
OPENAI_MODEL_NAME: "ai/qwen3:0.6B-Q4_0"
TEMP_DIR: "/app/temp_audio"
# Use tmpfs to process audio files in memory for better performance. this reduuces disk I/O but might use more RAM.
# Mounted directories are not shared between containers, so each container gets its own tmpfs instance.
# Increase tmpfs size for very large audio files as needed.
# If host RAM is limited, use a Docker volume instead of tmpfs.
# For more info on tmpfs: https://docs.docker.com/engine/storage/tmpfs/
tmpfs:
- /app/temp_audio:rw,size=1000m
depends_on:
- audiomuse-ai-redis
- audiomuse-ai-postgres
restart: unless-stopped
networks:
- audiomuse
models:
- llm # Specify that LLM models are used in this service
# AudioMuse-AI RQ Worker service
audiomuse-ai-worker:
image: ghcr.io/neptunehub/audiomuse-ai:0.8.2 # Reflects deployment.yaml
container_name: audiomuse-ai-worker-instance
env_file:
- .env
environment:
SERVICE_TYPE: "worker" # Tells the container to run the RQ worker
MEDIASERVER_TYPE: "jellyfin" # Specify the media server type
JELLYFIN_USER_ID: "${JELLYFIN_USER_ID}"
JELLYFIN_TOKEN: "${JELLYFIN_TOKEN}"
JELLYFIN_URL: "${JELLYFIN_URL}"
# DATABASE_URL is now constructed by config.py from the following:
POSTGRES_USER: ${AUDIOMUSE_POSTGRES_USER}
POSTGRES_PASSWORD: ${AUDIOMUSE_POSTGRES_PASSWORD}
POSTGRES_DB: ${AUDIOMUSE_POSTGRES_DB}
POSTGRES_PORT: "5432"
POSTGRES_HOST: "audiomuse-ai-postgres" # Service name of the postgres container
REDIS_URL: "redis://audiomuse-ai-redis:6379/0" # Connects to the 'redis' service
AI_MODEL_PROVIDER: "OPENAI"
OPENAI_API_KEY: "any-random-string" # Dummy key to enable local model usage
OPENAI_SERVER_URL: "http://172.17.0.1:12434/engines/llama.cpp/v1/chat/completions" #This is the API endpoint for local DMR model from within the Docker container.
OPENAI_MODEL_NAME: "ai/qwen3:0.6B-Q4_0"
TEMP_DIR: "/app/temp_audio"
# Use tmpfs to process audio files in memory for better performance. this reduuces disk I/O but might use more RAM.
# Mounted directories are not shared between containers, so each container gets its own tmpfs instance.
# Increase tmpfs size for very large audio files as needed.
# If host RAM is limited, use a Docker volume instead of tmpfs.
# For more info on tmpfs: https://docs.docker.com/engine/storage/tmpfs/
tmpfs:
- /app/temp_audio:rw,size=1000m
depends_on:
- audiomuse-ai-redis
- audiomuse-ai-postgres
restart: unless-stopped
networks:
- audiomuse
models:
- llm # Specify that LLM models are used in this service
# Using Docker Model Runner (DMR)
# - Make sure your Docker Engine version supports the AI features and that the docker-model-plugin is installed.
# - Follow Docker's setup guide: https://docs.docker.com/ai/model-runner/get-started/#docker-engine
# - Once DMR is configured, you can download and run AI models locally just like Docker images — no code changes to this compose file are required.
# - For model integration with docker-compose, see: https://docs.docker.com/ai/compose/models-and-compose/
models:
llm:
model: ai/qwen3:0.6B-Q4_0 # Lightweight local model for testing. Change as needed; if changed, ensure it matches OPENAI_MODEL_NAME.
# Define volumes for persistent data and temporary files
volumes:
redis-data:
postgres-data:
networks:
audiomuse:

View File

@@ -1,5 +1,6 @@
include:
- audiobookshelf/docker-compose.yml
- audiomuse/docker-compose.yml
- baikal/docker-compose.yml
- gitea/docker-compose.yml
- immich/docker-compose.yml
@@ -9,8 +10,7 @@ include:
- pihole/docker-compose.yml
- pingvin/docker-compose.yml
- pocketid/docker-compose.yml
- postgres/docker-compose.yml
- spliit/docker-compose.yml
# - spliit/docker-compose.yml
- shlink/docker-compose.yml
- synapse/docker-compose.yml
- updater/docker-compose.yml

View File

@@ -1,48 +0,0 @@
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:v2.2.3
volumes:
- ${UPLOAD_LOCATION}:/data
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
environment:
DB_HOSTNAME: shared-postgres
DB_PORT: 5432
DB_USERNAME: ${IMMICH_POSTGRES_USER}
DB_PASSWORD: ${IMMICH_POSTGRES_PASSWORD}
DB_DATABASE_NAME: ${IMMICH_POSTGRES_DB}
ports:
- '2283:2283'
depends_on:
shared-postgres:
condition: service_healthy
redis:
condition: service_started
restart: always
networks:
- postgres-network
immich-machine-learning:
container_name: immich_machine_learning
image: ghcr.io/immich-app/immich-machine-learning:v2.2.3
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
redis:
container_name: immich_redis
image: docker.io/redis:7.4-alpine@sha256:1bf97f21f01b0e7bd4b7b34a26d3b9d8086e41e70c10f262e8a9e0b49b5116a0
healthcheck:
test: redis-cli ping || exit 1
restart: always
volumes:
model-cache:
networks:
postgres-network:
external: true

View File

@@ -41,7 +41,7 @@ services:
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.3.0-pgvectors0.2.0
image: ghcr.io/immich-app/postgres:16-vectorchord0.3.0-pgvectors0.2.0
environment:
POSTGRES_USER: ${IMMICH_POSTGRES_USER}
POSTGRES_PASSWORD: ${IMMICH_POSTGRES_PASSWORD}

View File

@@ -1,47 +0,0 @@
services:
nextcloud_db:
# Remove this service - using shared-postgres instead
nextcloud:
image: nextcloud:31.0.9
restart: always
ports:
- 8081:80
volumes:
- ${NEXTCLOUD_DATA_DIR}:/var/www/html
environment:
- POSTGRES_HOST=shared-postgres
- POSTGRES_PORT=5432
- POSTGRES_DB=${NEXTCLOUD_POSTGRES_DB}
- POSTGRES_USER=${NEXTCLOUD_POSTGRES_USER}
- POSTGRES_PASSWORD=${NEXTCLOUD_POSTGRES_PASSWORD}
- REDIS_HOST=nextcloud-redis
- REDIS_PORT=6379
- NEXTCLOUD_TRUSTED_DOMAINS=${NEXTCLOUD_DOMAIN}
depends_on:
shared-postgres:
condition: service_healthy
networks:
- nextcloud
- postgres-network
cron:
image: nextcloud:31.0.9
container_name: nextcloud-cron
volumes:
- ${NEXTCLOUD_DATA_DIR}:/var/www/html
entrypoint: /cron.sh
restart: always
networks:
- nextcloud
nextcloud-redis:
image: redis:7
restart: always
networks:
- nextcloud
networks:
nextcloud:
postgres-network:
external: true

View File

@@ -1,54 +0,0 @@
services:
broker:
image: docker.io/library/redis:7
restart: always
volumes:
- ${PAPERLESS_REDIS_DATA_DIR}:/data
paperless:
image: ghcr.io/paperless-ngx/paperless-ngx:2.18.4
restart: always
depends_on:
shared-postgres:
condition: service_healthy
broker:
condition: service_started
gotenberg:
condition: service_started
tika:
condition: service_started
ports:
- "8070:8000"
volumes:
- ${PAPERLESS_DATA_DIR}:/usr/src/paperless/data
- ${PAPERLESS_MEDIA_DIR}:/usr/src/paperless/media
- ${PAPERLESS_EXPORT_DIR}/export:/usr/src/paperless/export
- ${PAPERLESS_CONSUME_DIR}:/usr/src/paperless/consume
env_file: .env
environment:
PAPERLESS_REDIS: redis://broker:6379
PAPERLESS_DBHOST: shared-postgres
PAPERLESS_DBPORT: 5432
PAPERLESS_DBNAME: ${PAPERLESS_POSTGRES_DB}
PAPERLESS_DBUSER: ${PAPERLESS_POSTGRES_USER}
PAPERLESS_DBPASS: ${PAPERLESS_POSTGRES_PASSWORD}
PAPERLESS_TIKA_ENABLED: 1
PAPERLESS_TIKA_GOTENBERG_ENDPOINT: http://gotenberg:3000
PAPERLESS_TIKA_ENDPOINT: http://tika:9998
PAPERLESS_APPS: "allauth.socialaccount.providers.openid_connect"
USE_X_FORWARD_HOST: true
USE_X_FORWARDED_PORT: true
PAPERLESS_DISABLE_REGULAR_LOGIN: true
PAPERLESS_REDIRECT_LOGIN_TO_SSO: true
networks:
- postgres-network
gotenberg:
# ... existing gotenberg configuration ...
tika:
# ... existing tika configuration ...
networks:
postgres-network:
external: true

View File

@@ -1,2 +0,0 @@
SHARED_DB_PASSWORD=
SHARED_DB_DATA_DIR=

View File

@@ -1,35 +0,0 @@
services:
shared-postgres:
image: ghcr.io/immich-app/postgres:14-vectorchord0.3.0-pgvectors0.2.0
container_name: shared-postgres
restart: always
env_file:
# Env files for all services using this shared database
- ../spliit/.env
- ../shlink/.env
- ../immich/.env
- ../nextcloud/.env
- ../paperless/.env
- ../synapse/.env
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: ${SHARED_DB_PASSWORD}
POSTGRES_DB: postgres
POSTGRES_INITDB_ARGS: '--encoding=UTF-8 --locale=C --data-checksums'
volumes:
- ${SHARED_DB_DATA_DIR}:/var/lib/postgresql/data
- ./init-scripts:/docker-entrypoint-initdb.d:ro
ports:
- "5431:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
networks:
- postgres-network
networks:
postgres-network:
name: postgres-network
driver: bridge

View File

@@ -1,73 +0,0 @@
#!/bin/bash
set -e
# This script initializes all databases and users for the homelab services
# It runs automatically when the PostgreSQL container starts for the first time
echo "Creating databases and users for homelab services..."
# Function to create database and user with restricted permissions
create_db_and_user() {
local db_name=$1
local db_user=$2
local db_password=$3
echo "Creating database: $db_name with user: $db_user"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
-- Create database
CREATE DATABASE "$db_name";
-- Create user with password
CREATE USER "$db_user" WITH ENCRYPTED PASSWORD '$db_password';
-- Grant connection to the specific database only
GRANT CONNECT ON DATABASE "$db_name" TO "$db_user";
-- Make user owner of the database
ALTER DATABASE "$db_name" OWNER TO "$db_user";
-- Connect to the specific database to set schema permissions
\c "$db_name"
-- Grant schema permissions
GRANT ALL ON SCHEMA public TO "$db_user";
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "$db_user";
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO "$db_user";
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO "$db_user";
-- Set default privileges for future objects
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO "$db_user";
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO "$db_user";
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON FUNCTIONS TO "$db_user";
-- Switch back to postgres database
\c postgres
EOSQL
}
# Create databases for each service
# Using environment variables that will be set in your .env file
# Spliit
create_db_and_user "${SPLIIT_POSTGRES_DB}" "${SPLIIT_POSTGRES_USER}" "${SPLIIT_POSTGRES_PASSWORD}"
# Shlink
create_db_and_user "${SHLINK_POSTGRES_DB}" "${SHLINK_POSTGRES_USER}" "${SHLINK_POSTGRES_PASSWORD}"
# Immich
create_db_and_user "${IMMICH_POSTGRES_DB}" "${IMMICH_POSTGRES_USER}" "${IMMICH_POSTGRES_PASSWORD}"
# Nextcloud
create_db_and_user "${NEXTCLOUD_POSTGRES_DB}" "${NEXTCLOUD_POSTGRES_USER}" "${NEXTCLOUD_POSTGRES_PASSWORD}"
# Paperless
create_db_and_user "${PAPERLESS_POSTGRES_DB}" "${PAPERLESS_POSTGRES_USER}" "${PAPERLESS_POSTGRES_PASSWORD}"
# Matrix and co
create_db_and_user "${SYNAPSE_POSTGRES_DB}" "${SYNAPSE_POSTGRES_USER}" "${SYNAPSE_POSTGRES_PASSWORD}"
create_db_and_user "${MAS_POSTGRES_DB}" "${MAS_POSTGRES_USER}" "${MAS_POSTGRES_PASSWORD}"
create_db_and_user "${MAUTRIX_SIGNAL_POSTGRES_DB}" "${MAUTRIX_SIGNAL_POSTGRES_USER}" "${MAUTRIX_SIGNAL_POSTGRES_PASSWORD}"
create_db_and_user "${MAUTRIX_WHATSAPP_POSTGRES_DB}" "${MAUTRIX_WHATSAPP_POSTGRES_USER}" "${MAUTRIX_WHATSAPP_POSTGRES_PASSWORD}"
echo "Database initialization completed successfully!"

View File

@@ -1,35 +0,0 @@
#!/bin/bash
set -e
# Enable vector extensions for Immich database
echo "Enabling vector extensions for Immich database..."
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "${IMMICH_POSTGRES_DB}" <<-EOSQL
-- Create extensions as superuser
CREATE EXTENSION IF NOT EXISTS vectors;
CREATE EXTENSION IF NOT EXISTS earthdistance CASCADE;
-- Grant usage on the extension schemas to immich user
GRANT USAGE ON SCHEMA vectors TO ${IMMICH_POSTGRES_USER};
GRANT USAGE ON SCHEMA earthdistance TO ${IMMICH_POSTGRES_USER};
-- Grant all privileges on extension objects to immich user
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA vectors TO ${IMMICH_POSTGRES_USER};
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA vectors TO ${IMMICH_POSTGRES_USER};
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA vectors TO ${IMMICH_POSTGRES_USER};
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA earthdistance TO ${IMMICH_POSTGRES_USER};
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA earthdistance TO ${IMMICH_POSTGRES_USER};
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA earthdistance TO ${IMMICH_POSTGRES_USER};
-- Set default privileges for future extension objects
ALTER DEFAULT PRIVILEGES IN SCHEMA vectors GRANT ALL ON TABLES TO ${IMMICH_POSTGRES_USER};
ALTER DEFAULT PRIVILEGES IN SCHEMA vectors GRANT ALL ON SEQUENCES TO ${IMMICH_POSTGRES_USER};
ALTER DEFAULT PRIVILEGES IN SCHEMA vectors GRANT ALL ON FUNCTIONS TO ${IMMICH_POSTGRES_USER};
ALTER DEFAULT PRIVILEGES IN SCHEMA earthdistance GRANT ALL ON TABLES TO ${IMMICH_POSTGRES_USER};
ALTER DEFAULT PRIVILEGES IN SCHEMA earthdistance GRANT ALL ON SEQUENCES TO ${IMMICH_POSTGRES_USER};
ALTER DEFAULT PRIVILEGES IN SCHEMA earthdistance GRANT ALL ON FUNCTIONS TO ${IMMICH_POSTGRES_USER};
EOSQL
echo "Immich vector extensions enabled successfully!"

View File

@@ -1,27 +0,0 @@
services:
shlink:
image: shlinkio/shlink:4.6
container_name: shlink
restart: always
ports:
- "8085:8080"
environment:
DEFAULT_DOMAIN: ${SHLINK_DOMAIN}
IS_HTTPS_ENABLED: ${SHLINK_IS_HTTPS_ENABLED}
GEOLITE_LICENSE_KEY: ${SHLINK_GEOIP_LICENSE_KEY}
INITIAL_API_KEY: ${SHLINK_API_KEY}
DB_DRIVER: postgres
DB_HOST: shared-postgres
DB_PORT: 5432
DB_NAME: ${SHLINK_POSTGRES_DB}
DB_USER: ${SHLINK_POSTGRES_USER}
DB_PASSWORD: ${SHLINK_POSTGRES_PASSWORD}
depends_on:
shared-postgres:
condition: service_healthy
networks:
- postgres-network
networks:
postgres-network:
external: true

View File

@@ -1,24 +0,0 @@
services:
spliit:
image: ghcr.io/spliit-app/spliit:1.19.0
restart: always
ports:
- 3001:3000
depends_on:
shared-postgres:
condition: service_healthy
env_file:
- .env
environment:
TZ: ${TZ}
POSTGRES_HOST: shared-postgres
POSTGRES_PORT: 5432
POSTGRES_DB: ${SPLIIT_POSTGRES_DB}
POSTGRES_USER: ${SPLIIT_POSTGRES_USER}
POSTGRES_PASSWORD: ${SPLIIT_POSTGRES_PASSWORD}
networks:
- postgres-network
networks:
postgres-network:
external: true

View File

@@ -1,105 +0,0 @@
services:
element-call-auth-service:
image: ghcr.io/element-hq/lk-jwt-service:0.3.0
container_name: element-call-jwt
hostname: auth-server
environment:
- LK_JWT_PORT=8080
- LIVEKIT_URL=https://${LIVEKIT_DOMAIN}/livekit/sfu
- LIVEKIT_KEY=devkey
- LIVEKIT_SECRET=${LIVEKIT_SECRET_KEY}
- LIVEKIT_FULL_ACCESS_HOMESERVERS=${MATRIX_DOMAIN}
restart: always
ports:
- 8071:8080
element-call-livekit:
image: livekit/livekit-server:v1.9.3
command: --config /etc/livekit.yaml
ports:
- "7880:7880/tcp"
- "7881:7881/tcp"
- "7882:7882/tcp"
- "50100-50200:50100-50200/udp"
restart: always
volumes:
- ${LIVEKIT_CONFIG_DIR}/config.yaml:/etc/livekit.yaml:ro
mautrix-signal:
container_name: mautrix-signal
image: dock.mau.dev/mautrix/signal:v0.2511.0
restart: always
volumes:
- ${MAUTRIX_SIGNAL_DATA_DIR}:/data
depends_on:
shared-postgres:
condition: service_healthy
environment:
# Configure database connection for mautrix-signal
MAUTRIX_SIGNAL_DATABASE_TYPE: postgres
MAUTRIX_SIGNAL_DATABASE_URI: postgresql://${MAUTRIX_SIGNAL_POSTGRES_USER}:${MAUTRIX_SIGNAL_POSTGRES_PASSWORD}@shared-postgres:5432/${MAUTRIX_SIGNAL_POSTGRES_DB}?sslmode=disable
networks:
- postgres-network
mautrix-whatsapp:
container_name: mautrix-whatsapp
image: dock.mau.dev/mautrix/whatsapp:v0.2511.0
restart: always
volumes:
- ${MAUTRIX_WHATSAPP_DATA_DIR}:/data
depends_on:
shared-postgres:
condition: service_healthy
environment:
# Configure database connection for mautrix-whatsapp
MAUTRIX_WHATSAPP_DATABASE_TYPE: postgres
MAUTRIX_WHATSAPP_DATABASE_URI: postgresql://${MAUTRIX_WHATSAPP_POSTGRES_USER}:${MAUTRIX_WHATSAPP_POSTGRES_PASSWORD}@shared-postgres:5432/${MAUTRIX_WHATSAPP_POSTGRES_DB}?sslmode=disable
networks:
- postgres-network
mas:
image: ghcr.io/element-hq/matrix-authentication-service:1.6.0
restart: always
working_dir: /config
volumes:
- ${MAS_CONFIG_DIR}:/config
environment:
MAS_CONFIG: /config/config.yaml
# Database connection will be configured in the MAS config file
MAS_DATABASE_URL: postgresql://${MAS_POSTGRES_USER}:${MAS_POSTGRES_PASSWORD}@shared-postgres:5432/${MAS_POSTGRES_DB}?sslmode=disable
ports:
- "8090:8090"
depends_on:
shared-postgres:
condition: service_healthy
networks:
- postgres-network
synapse:
container_name: synapse
image: matrixdotorg/synapse:v1.142.1
restart: always
volumes:
- ${SYNAPSE_CONFIG_DIR}:/data
ports:
- "8008:8008"
environment:
# Synapse database connection will be configured in homeserver.yaml
SYNAPSE_DATABASE_HOST: shared-postgres
SYNAPSE_DATABASE_PORT: 5432
SYNAPSE_DATABASE_USER: ${SYNAPSE_POSTGRES_USER}
SYNAPSE_DATABASE_PASSWORD: ${SYNAPSE_POSTGRES_PASSWORD}
SYNAPSE_DATABASE_NAME: ${SYNAPSE_POSTGRES_DB}
depends_on:
shared-postgres:
condition: service_healthy
mas:
condition: service_started
mautrix-whatsapp:
condition: service_started
networks:
- postgres-network
networks:
postgres-network:
external: true