From 30303aac3d8fba48ad600221258ec7233ffebddb Mon Sep 17 00:00:00 2001 From: Peter Smit Date: Fri, 12 Dec 2025 16:56:39 +0100 Subject: [PATCH] Add audiomuse --- audiomuse/.env.example | 41 +++++++++++ audiomuse/docker-compose.yml | 127 +++++++++++++++++++++++++++++++++++ docker-compose.yml | 1 + 3 files changed, 169 insertions(+) create mode 100644 audiomuse/.env.example create mode 100644 audiomuse/docker-compose.yml diff --git a/audiomuse/.env.example b/audiomuse/.env.example new file mode 100644 index 0000000..d7bf835 --- /dev/null +++ b/audiomuse/.env.example @@ -0,0 +1,41 @@ +# Copy this file to `.env` and fill in the values that match your setup. +# Docker Compose files under deployment/ read these variables to keep settings in one place. +# +# IMPORTANT: +# 1. This file must be named exactly ".env" (not .env.txt or .env.example) +# 2. It must be in the SAME directory as your docker-compose-*.yaml file +# 3. Do NOT use spaces around the = sign +# 4. Do NOT use quotes around values (unless required by the value itself) +# 5. After editing, restart containers: docker-compose down && docker-compose up -d +# +# SPECIAL CHARACTERS IN VALUES: +# If your password or API key contains special characters like: $ ` " ' \ # ! & * ( ) [ ] { } | ; < > ? +# you may need to: +# - Avoid quotes entirely: GEMINI_API_KEY=AIza$pecial!Key (usually works) +# - OR use single quotes if the value has $: GEMINI_API_KEY='AIza$pecial!Key' +# - OR escape with backslash: GEMINI_API_KEY=AIza\$pecial\!Key +# Most problematic characters: $ (variable expansion), ` (command substitution), " (string delimiter) +# +# TROUBLESHOOTING: +# If API keys don't work, verify: +# - File is named ".env" exactly (check with: ls -la) +# - No spaces: GEMINI_API_KEY=AIza... (not GEMINI_API_KEY = "AIza...") +# - No unescaped special characters (especially $ ` " ') +# - Restart containers after changing this file +# If all else fails, try hardcoding the value directly in docker-compose-*.yaml to isolate the issue + +# --- Jellyfin --- +JELLYFIN_USER_ID= +JELLYFIN_TOKEN= +JELLYFIN_URL=https://jellyfin.smittenfeld.nl + +# --- Shared backend configuration --- +AUDIOMUSE_POSTGRES_USER=audiomuse +AUDIOMUSE_POSTGRES_PASSWORD= +AUDIOMUSE_POSTGRES_DB=audiomusedb +# +## --- Remote worker integration --- +#WORKER_URL=http://worker.example.com:8029/worker +#WORKER_POSTGRES_HOST=server.example.com +#WORKER_REDIS_URL=redis://server.example.com:6379/0 + diff --git a/audiomuse/docker-compose.yml b/audiomuse/docker-compose.yml new file mode 100644 index 0000000..563728f --- /dev/null +++ b/audiomuse/docker-compose.yml @@ -0,0 +1,127 @@ +version: '3.8' +services: + # Redis service for RQ (task queue) + audiomuse-ai-redis: + image: redis:7-alpine + container_name: audiomuse-redis + ports: + - "6379:6379" # Expose Redis port to the host + volumes: + - redis-data:/data # Persistent storage for Redis data + networks: + - audiomuse + restart: unless-stopped + + # PostgreSQL database service + audiomuse-ai-postgres: + image: postgres:15-alpine + container_name: audiomuse-postgres + env_file: + - .env + environment: + POSTGRES_USER: ${AUDIOMUSE_POSTGRES_USER} + POSTGRES_PASSWORD: ${AUDIOMUSE_POSTGRES_PASSWORD} + POSTGRES_DB: ${AUDIOMUSE_POSTGRES_DB} +# ports: +# - "5432:5432" # Expose PostgreSQL port to the host + volumes: + - postgres-data:/var/lib/postgresql/data # Persistent storage for PostgreSQL data + networks: + - audiomuse + restart: unless-stopped + + # AudioMuse-AI Flask application service + audiomuse-ai-flask: + image: ghcr.io/neptunehub/audiomuse-ai:latest # Reflects deployment.yaml + container_name: audiomuse-ai-flask-app + ports: + - "8013:8000" + env_file: + - .env + environment: + SERVICE_TYPE: "flask" # Tells the container to run the Flask app + MEDIASERVER_TYPE: "jellyfin" # Specify the media server type + POSTGRES_USER: ${AUDIOMUSE_POSTGRES_USER} + POSTGRES_PASSWORD: ${AUDIOMUSE_POSTGRES_PASSWORD} + POSTGRES_DB: ${AUDIOMUSE_POSTGRES_DB} + POSTGRES_PORT: "5432" + POSTGRES_HOST: "audiomuse-ai-postgres" # Service name of the postgres container + REDIS_URL: "redis://audiomuse-ai-redis:6379/0" # Connects to the 'redis' service + AI_MODEL_PROVIDER: "OPENAI" + OPENAI_API_KEY: "any-random-string" # Dummy key to enable local model usage + OPENAI_SERVER_URL: "http://172.17.0.1:12434/engines/llama.cpp/v1/chat/completions" #This is the API endpoint for local DMR model from within the Docker container. + OPENAI_MODEL_NAME: "ai/qwen3:0.6B-Q4_0" + TEMP_DIR: "/app/temp_audio" + # Use tmpfs to process audio files in memory for better performance. this reduuces disk I/O but might use more RAM. + # Mounted directories are not shared between containers, so each container gets its own tmpfs instance. + # Increase tmpfs size for very large audio files as needed. + # If host RAM is limited, use a Docker volume instead of tmpfs. + # For more info on tmpfs: https://docs.docker.com/engine/storage/tmpfs/ + tmpfs: + - /app/temp_audio:rw,size=1000m + depends_on: + - audiomuse-ai-redis + - audiomuse-ai-postgres + restart: unless-stopped + networks: + - audiomuse + models: + - llm # Specify that LLM models are used in this service + + # AudioMuse-AI RQ Worker service + audiomuse-ai-worker: + image: ghcr.io/neptunehub/audiomuse-ai:latest # Reflects deployment.yaml + container_name: audiomuse-ai-worker-instance + env_file: + - .env + environment: + SERVICE_TYPE: "worker" # Tells the container to run the RQ worker + MEDIASERVER_TYPE: "jellyfin" # Specify the media server type + JELLYFIN_USER_ID: "${JELLYFIN_USER_ID}" + JELLYFIN_TOKEN: "${JELLYFIN_TOKEN}" + JELLYFIN_URL: "${JELLYFIN_URL}" + # DATABASE_URL is now constructed by config.py from the following: + POSTGRES_USER: ${AUDIOMUSE_POSTGRES_USER} + POSTGRES_PASSWORD: ${AUDIOMUSE_POSTGRES_PASSWORD} + POSTGRES_DB: ${AUDIOMUSE_POSTGRES_DB} + POSTGRES_PORT: "5432" + POSTGRES_HOST: "audiomuse-ai-postgres" # Service name of the postgres container + REDIS_URL: "redis://audiomuse-ai-redis:6379/0" # Connects to the 'redis' service + AI_MODEL_PROVIDER: "OPENAI" + OPENAI_API_KEY: "any-random-string" # Dummy key to enable local model usage + OPENAI_SERVER_URL: "http://172.17.0.1:12434/engines/llama.cpp/v1/chat/completions" #This is the API endpoint for local DMR model from within the Docker container. + OPENAI_MODEL_NAME: "ai/qwen3:0.6B-Q4_0" + TEMP_DIR: "/app/temp_audio" + # Use tmpfs to process audio files in memory for better performance. this reduuces disk I/O but might use more RAM. + # Mounted directories are not shared between containers, so each container gets its own tmpfs instance. + # Increase tmpfs size for very large audio files as needed. + # If host RAM is limited, use a Docker volume instead of tmpfs. + # For more info on tmpfs: https://docs.docker.com/engine/storage/tmpfs/ + tmpfs: + - /app/temp_audio:rw,size=1000m + depends_on: + - audiomuse-ai-redis + - audiomuse-ai-postgres + restart: unless-stopped + networks: + - audiomuse + models: + - llm # Specify that LLM models are used in this service + +# Using Docker Model Runner (DMR) +# - Make sure your Docker Engine version supports the AI features and that the docker-model-plugin is installed. +# - Follow Docker's setup guide: https://docs.docker.com/ai/model-runner/get-started/#docker-engine +# - Once DMR is configured, you can download and run AI models locally just like Docker images — no code changes to this compose file are required. +# - For model integration with docker-compose, see: https://docs.docker.com/ai/compose/models-and-compose/ +models: + llm: + model: ai/qwen3:0.6B-Q4_0 # Lightweight local model for testing. Change as needed; if changed, ensure it matches OPENAI_MODEL_NAME. + + +# Define volumes for persistent data and temporary files +volumes: + redis-data: + postgres-data: + +networks: + audiomuse: \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 5431c02..d6a1842 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,6 @@ include: - audiobookshelf/docker-compose.yml + - audiomuse/docker-compose.yml - baikal/docker-compose.yml - gitea/docker-compose.yml - immich/docker-compose.yml