├── requirements.txt ├── docker ├── mopidy │ ├── pulse-client.conf │ └── mopidy.example.conf ├── docker-compose.yml └── entrypoint.sh ├── .github └── workflows │ ├── sync_develop_branch.yml │ ├── sync-description.yml │ ├── cleanup-registries.yml │ └── build-docker.yml ├── README.md └── Dockerfile /requirements.txt: -------------------------------------------------------------------------------- 1 | # Added only the most basic pip packages and dependencies 2 | # Note: All other packages can be installed by variable PIP_PACKAGES on runtime 3 | Mopidy-Local 4 | Mopidy-MPD -------------------------------------------------------------------------------- /docker/mopidy/pulse-client.conf: -------------------------------------------------------------------------------- 1 | # Connect to the host's server using the mounted UNIX socket 2 | default-server = unix:/tmp/pulseaudio.socket 3 | 4 | # Prevent a server running in the container 5 | autospawn = no 6 | daemon-binary = /bin/true 7 | 8 | # Prevent the use of shared memory 9 | enable-shm = false 10 | -------------------------------------------------------------------------------- /docker/mopidy/mopidy.example.conf: -------------------------------------------------------------------------------- 1 | [core] 2 | data_dir = /var/lib/mopidy 3 | 4 | [audio] 5 | #output = tee name=t ! queue ! autoaudiosink t. ! queue ! udpsink host=0.0.0.0 port=5555 6 | output = audioresample ! audioconvert ! audio/x-raw,rate=48000,channels=2,format=S16LE ! wavenc ! filesink location=/tmp/snapfifo 7 | 8 | [http] 9 | hostname = 0.0.0.0 10 | default_app = iris 11 | 12 | [mpd] 13 | hostname = 0.0.0.0 14 | 15 | [spotify] 16 | # Fast startup because we use the Spotify HTTP API to load these instead 17 | # Makes playlists unavailable under Browse > Spotify. 18 | allow_playlists = false -------------------------------------------------------------------------------- /.github/workflows/sync_develop_branch.yml: -------------------------------------------------------------------------------- 1 | # Workflow to keep the two branches main and develop in sync to follow the GitFlow methodology 2 | # https://www.atlassian.com/git/tutorials/comparing-workflows/gitflow-workflow 3 | name: Sync 4 | on: 5 | push: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | sync-branches: 11 | runs-on: ubuntu-latest 12 | name: Syncing branches main and develop 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v2 16 | - name: Set up Node 17 | uses: actions/setup-node@v1 18 | with: 19 | node-version: 12 20 | - name: Opening pull request 21 | id: pull 22 | uses: tretuna/sync-branches@1.4.0 23 | with: 24 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 25 | FROM_BRANCH: "main" 26 | TO_BRANCH: "develop" 27 | CONTENT_COMPARISON: true -------------------------------------------------------------------------------- /.github/workflows/sync-description.yml: -------------------------------------------------------------------------------- 1 | name: Sync description and README.md to Docker Hub 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'main' 7 | paths: 8 | - README.md 9 | - .github/workflows/sync-description.yml 10 | pull_request: 11 | branches: 12 | - 'main' 13 | paths: 14 | - README.md 15 | - .github/workflows/sync-description.yml 16 | #schedule: 17 | # - cron: '0 12 * * *' # everyday at 12am 18 | 19 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel 20 | jobs: 21 | # This workflow contains a single job called "sync-readme" 22 | sync-readme: 23 | # The type of runner that the job will run on 24 | runs-on: ubuntu-latest 25 | 26 | # Set environment variables 27 | env: 28 | DOCKERHUB_SLUG: ${{secrets.DOCKERHUB_USERNAME}}/mopidy 29 | 30 | # Steps represent a sequence of tasks that will be executed as part of the job 31 | steps: 32 | # Github action to get branch or tag information without the /ref/* prefix 33 | - name: Checkout 34 | uses: actions/checkout@v4 35 | 36 | # Github action to sync description files to docker hub 37 | - name: Docker Hub README & description sync 38 | uses: peter-evans/dockerhub-description@v4 39 | with: 40 | username: ${{ secrets.DOCKERHUB_USERNAME }} 41 | password: ${{ secrets.DOCKERHUB_TOKEN }} 42 | repository: ${{ env.DOCKERHUB_SLUG }} 43 | readme-filepath: ./README.md 44 | short-description: Mopidy music server with Iris Web-UI, Spotify support and many other extensions. 45 | 46 | -------------------------------------------------------------------------------- /.github/workflows/cleanup-registries.yml: -------------------------------------------------------------------------------- 1 | name: Cleanup Container Registries 2 | 3 | on: 4 | # Run weekly on Sunday at 3 AM UTC 5 | schedule: 6 | - cron: '0 3 * * 0' 7 | # Allow manual trigger from the Actions tab 8 | workflow_dispatch: 9 | inputs: 10 | dry_run: 11 | description: 'Dry run mode (true does not delete, only simulates)' 12 | required: true 13 | default: 'true' 14 | type: choice 15 | options: 16 | - 'true' 17 | - 'false' 18 | 19 | # 2. Environment variables available to all jobs in THIS workflow 20 | env: 21 | DOCKERHUB_SLUG: ${{ secrets.DOCKERHUB_USERNAME }}/mopidy 22 | GHCR_SLUG: ghcr.io/${{ github.repository_owner }}/mopidy 23 | 24 | jobs: 25 | cleanup: 26 | name: Run Registry Cleanup 27 | runs-on: ubuntu-latest 28 | permissions: 29 | packages: write # Required for the GHCR cleanup action 30 | 31 | steps: 32 | # Github action to get branch or tag information 33 | - name: Checkout 34 | uses: actions/checkout@v4 35 | 36 | - name: Clean up Docker Hub 37 | uses: lostlink/docker-cleanup@v1.0.1 38 | with: 39 | username: ${{ secrets.DOCKERHUB_USERNAME }} 40 | password: ${{ secrets.DOCKERHUB_TOKEN }} 41 | repositories: ${{ env.DOCKERHUB_SLUG }} 42 | # Custom rule to delete all tags starting with 'test_' older than 30 days 43 | custom-patterns: | 44 | { 45 | "^test_.*": 30 46 | } 47 | dry-run: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.dry_run || 'false' }} 48 | verbose: true 49 | 50 | - name: Clean up GitHub Container Registry (GHCR) 51 | uses: dataaxiom/ghcr-cleanup-action@v1 52 | with: 53 | # The repository to clean up. Optional, current will be retrieved dyn. from environment. 54 | # repository: ${{ env.GHCR_SLUG }} 55 | # Delete all tags starting with 'test_' 56 | delete-tags: 'test_*' 57 | # But only if they are older than 30 days 58 | older-than: '30 days' 59 | keep-n-untagged: 1 60 | dry-run: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.dry_run || 'false' }} -------------------------------------------------------------------------------- /docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | services: 3 | mopidy: 4 | container_name: mopidy 5 | restart: always 6 | 7 | ## Run container as root 8 | # This is required to modify the PUID and PGID by entrypoint script 9 | user: root 10 | 11 | ## Run cointainer in privileged mode (can help with permission issues) 12 | # privileged: true 13 | 14 | ## Add container to audio group 15 | # change group name or id to your system's audio group 16 | group_add: 17 | - audio 18 | 19 | ## Add audio device 20 | # Change to your system's audio device 21 | devices: 22 | - /dev/snd 23 | 24 | volumes: 25 | # Mopidy config files 26 | - './config:/config' 27 | 28 | # Local media dir 29 | #- './media:/media:ro' 30 | 31 | # Store mopidy library and images on host (persistent) 32 | #- './local:$HOME/local' 33 | 34 | # Keep spotify credetnials (persistent) 35 | #- './.spotify:$HOME/spotify' 36 | 37 | ## Host Audio Support 38 | # --- ALSA --- 39 | # Mopidy Config: [audio] output=alsasink 40 | # Optional: If you have specific configurations or adjustments defined 41 | #- /etc/asound.conf:/etc/asound.conf 42 | #- /usr/share/alsa:/usr/share/alsa 43 | 44 | # --- PulseAudio --- 45 | # Mopidy Config: [audio] output=pulsesink 46 | # PulseAudio config (path should match with PUID) 47 | - $XDG_RUNTIME_DIR/pulse/native:/tmp/pulseaudio.socket # socket 48 | ports: 49 | - '6600:6600' # JSON-RPC API and MPD protocol 50 | - '6680:6680' # HTTP WebUI 51 | 52 | image: 'jojo141185/mopidy:latest' # ImageName:Version 53 | 54 | environment: 55 | # Set mopidy user and audio group id to match host's specific permissions 56 | - PUID=1000 57 | - PGID=29 58 | 59 | # PulseAudio server socket for communication (path should match with PUID) 60 | - PULSE_SERVER=unix:/tmp/pulseaudio.socket 61 | #- PULSE_COOKIE=/tmp/pulseaudio.cookie # (optional) 62 | 63 | # Install additional pip packages 64 | #- PIP_PACKAGES=Mopidy-TuneIn Mopidy-Youtube yt-dlp 65 | -------------------------------------------------------------------------------- /docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Function to print section headers 4 | print_section() { 5 | echo -e "\n\033[1;34m=== $1 ===\033[0m" # Blue bold header 6 | } 7 | 8 | print_section "Starting Entrypoint Script" 9 | echo "The entrypoint script is being run as user \"$(whoami)\"." 10 | 11 | # The precreated user account "mopidy" and the group "audio" has a randomly generated UID/GID by default. 12 | # If the environment variables PGID and PUID are set, this will change the UID/GID of the container's user. 13 | # This helps matching the container's UID/GID with the host's to avoid permission issues on the mounted volume. 14 | # Note: To do the user id mapping, the container needs to be run as root 15 | 16 | # Only proceed with user and group mapping if the script is run as root 17 | if [ -n "$PGID" ] || [ -n "$PUID" ]; then 18 | print_section "Mapping UID/GID" 19 | if [ "$(whoami)" = "root" ]; then 20 | echo "Mapping UID/GID of user \"$DOCKER_USER\"." 21 | 22 | # Check and change user and group if necessary 23 | if [ -n "$PGID" ] && [ -n "$PUID" ]; then 24 | echo "Requested change of UID: $PUID and GID: $PGID for running user." 25 | elif [ -n "$PGID" ]; then 26 | echo "Requested change of GID: $PGID for running group." 27 | elif [ -n "$PUID" ]; then 28 | echo "Requested change of UID: $PUID for running user." 29 | fi 30 | 31 | # Check and change user UID if necessary 32 | DOCKER_USER_CURRENT_ID=$(id -u $DOCKER_USER) 33 | if [ -n "$PUID" ]; then 34 | if [ $DOCKER_USER_CURRENT_ID -eq $PUID ]; then 35 | echo "User $DOCKER_USER is already mapped to $DOCKER_USER_CURRENT_ID. Nice!" 36 | else 37 | DOCKER_USER_EXIST_NAME=$(getent passwd $PUID | cut -d: -f1) 38 | if [ -n "$DOCKER_USER_EXIST_NAME" ]; then 39 | echo "User ID is already taken by user: $DOCKER_USER_EXIST_NAME" 40 | else 41 | echo "Changing $DOCKER_USER user to UID $PUID" 42 | usermod --uid $PUID $DOCKER_USER 43 | fi 44 | fi 45 | fi 46 | 47 | # Check and change group GID if necessary 48 | if [ -n "$PGID" ]; then 49 | DOCKER_GROUP_CURRENT_ID=$(getent group $DOCKER_GROUP | cut -d: -f3) 50 | if [ $DOCKER_GROUP_CURRENT_ID -eq $PGID ]; then 51 | echo "Group $DOCKER_GROUP is already mapped to $DOCKER_GROUP_CURRENT_ID. Nice!" 52 | else 53 | DOCKER_GROUP_EXIST_NAME=$(getent group $PGID | cut -d: -f1) 54 | if [ -n "$DOCKER_GROUP_EXIST_NAME" ]; then 55 | echo "Group ID is already taken by group: $DOCKER_GROUP_EXIST_NAME" 56 | else 57 | echo "Changing $DOCKER_GROUP group to GID $PGID" 58 | groupmod --gid $PGID $DOCKER_GROUP 59 | fi 60 | fi 61 | fi 62 | 63 | # Change ownership of all relevant directories 64 | echo "Changing ownership of all relevant directories." 65 | chown -R $DOCKER_USER:$DOCKER_GROUP $HOME /iris /VERSION /entrypoint.sh 66 | else 67 | # If not root, skip user and group mapping 68 | echo "Skipping UID/GID mapping, because running as non-root user." 69 | fi 70 | fi 71 | 72 | # Update PulseAudio client.conf with PULSE_SERVER 73 | if [ -n "$PULSE_SERVER" ]; then 74 | print_section "Configuring PulseAudio" 75 | export PULSE_SERVER="$PULSE_SERVER" 76 | if [ "$(whoami)" = "root" ]; then 77 | echo "Setting default PulseAudio server to \"$PULSE_SERVER\" in /etc/pulse/client.conf" 78 | # Ensure the PULSE_SERVER line exists in the file, and replace it 79 | sed -i.bak "s|^default-server = .*|default-server = $PULSE_SERVER|" /etc/pulse/client.conf 80 | else 81 | # If not root, skip user and group mapping 82 | echo "Skipping default server setting in PulseAudio client config, because running as non-root user." 83 | fi 84 | fi 85 | 86 | # Set PULSE_COOKIE_DATA only if PULSE_COOKIE_DATA is not empty 87 | if [ -n "$PULSE_COOKIE_DATA" ]; then 88 | print_section "Setting Pulse Cookie" 89 | echo "Setting PULSE_COOKIE_DATA to \"$HOME/pulse.cookie\"" 90 | echo -ne $(echo $PULSE_COOKIE_DATA | sed -e 's/../\\x&/g') > $HOME/pulse.cookie 91 | chown $DOCKER_USER:$DOCKER_GROUP $HOME/pulse.cookie 92 | export PULSE_COOKIE="$HOME/pulse.cookie" 93 | elif [ -n "$PULSE_COOKIE" ]; then 94 | export PULSE_COOKIE="$PULSE_COOKIE" 95 | fi 96 | 97 | # Install additional PIP packages 98 | if [ -n "$PIP_PACKAGES" ]; then 99 | print_section "Installing Additional PIP-Packages" 100 | echo "-- INSTALLING PIP PACKAGES: $PIP_PACKAGES --" 101 | /opt/venv/bin/python3 -m pip install --no-cache $PIP_PACKAGES 102 | fi 103 | 104 | # Execute the passed command as the specified user 105 | print_section "Executing Main Process" 106 | if [ "$(whoami)" = "root" ]; then 107 | echo "Executing main process \"$@\" as non-root user \"$DOCKER_USER\" with UID $(id -u $DOCKER_USER)" 108 | if [[ $# -gt 0 ]]; then 109 | exec sudo -u $DOCKER_USER -H "$@" 110 | else 111 | exec sudo -u $DOCKER_USER -H bash 112 | fi 113 | else 114 | echo "Executing main process \"$@\" as docker user \"$(whoami)\" with UID $(id -u $DOCKER_USER)" 115 | if [[ $# -gt 0 ]]; then 116 | exec "$@" 117 | else 118 | exec bash 119 | fi 120 | fi -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mopidy-docker: A Docker image for mopidy 2 | 3 | [Mopidy](https://mopidy.com/) an extensible music server that plays music from local disk, Spotify, Tidal, Youtube, SoundCloud, TuneIn, and more. 4 | 5 | ## Links 6 | 7 | Source: [GitHub](https://github.com/jojo141185/mopidy-docker) 8 | Docker-Images: [DockerHub](https://hub.docker.com/r/jojo141185/mopidy) 9 | 10 | ## About 11 | 12 | Using Docker images built on top of this repo, mopidy with its extensions can be easily run on multiple machines with different architectures (amd64, arm). 13 | Besides the music server mopidy, the image includes the great web interface [IRIS](https://github.com/jaedb/Iris/) from jaedb. 14 | Please note that the image only includes a minimal set of the basic mopidy extensions: 15 | 16 | - Mopidy-Local 17 | - Mopidy-MPD 18 | 19 | Other useful extensions can be installed and keep updated by pip on container startup. Simply add them to the environment variable **PIP_PACKAGES** as a list of package names. 20 | 21 | ## Prerequisites 22 | 23 | You need to have Docker up and running on a Linux machine. 24 | See the official documentation to [install the Docker Engine](https://docs.docker.com/engine/install/) 25 | 26 | ## Get the image 27 | 28 | Here is the [repository](https://hub.docker.com/repository/docker/jojo141185/mopidy) on DockerHub. 29 | 30 | Getting the image from DockerHub is as simple as typing: 31 | 32 | `docker pull jojo141185/mopidy:release` 33 | 34 | You can pull other, probably less stable image builds by the following tags 35 | TAG|DESCRIPTION 36 | ---|--- 37 | latest | Image build from main / master branches 38 | develop | Image build from develop branches (probably untested) 39 | 40 | ## Usage 41 | 42 | You can start the mopidy container by simply using the command [`docker run`](https://docs.docker.com/engine/reference/commandline/run/) or the [docker compose](https://docs.docker.com/compose/) tool where you can store your docker configuration in a seperate yaml file. 43 | 44 | In both ways you need to adapt the command or docker-compose.yaml file to your specific host environment. 45 | 46 | ### Manage Docker as a non-root user 47 | 48 | If you need to run docker as non-root user then you need to add it to the docker group. 49 | 50 | 1. Create the docker group, if it does not already exist 51 | `sudo groupadd docker` 52 | 2. Add your user to the docker group 53 | `sudo usermod -aG docker $USER` 54 | 3. Log in to the new docker group (should avoid having to log out & log in again): 55 | `newgrp docker` 56 | 4. Check if docker can be run without root 57 | `docker run hello-world` 58 | Try to reboot if you still get permission error! 59 | 60 | **Warning:** 61 | The docker group grants privileges equivalent to the root user. For details on how this impacts security in your system, see [Docker Daemon Attack Surface](https://docs.docker.com/engine/security/#docker-daemon-attack-surface) 62 | 63 | ### docker run 64 | 65 | Start the mopidy docker container with the docker run command: 66 | 67 | docker run -d \ 68 | --name mopidy \ 69 | --user root \ 70 | --group-add audio \ 71 | --device /dev/snd \ 72 | -v "$PWD/config:/config" \ 73 | -v "$PWD/media:/media:ro" \ 74 | -v "$PWD/local:/var/lib/mopidy/local" \ 75 | -p 6600:6600 -p 6680:6680 \ 76 | jojo141185/mopidy:release 77 | 78 | The following table describes the docker arguments and environment variables: 79 | ARGUMENT|DEFAULT|DESCRIPTION 80 | ---|---|---| 81 | --user | root | (recommended) This container should be run as root to avoid permission issues. Although the container operates under root privileges, the main process is executed as a restricted user named "mopidy" to minimize security risks. 82 | --group-add | audio | (recommended) Add host's audio group to container. 83 | --device | /dev/snd | (optional) For ALSA share the hosts sound device /dev/snd. For pulseaudio see this [guide](https://github.com/mviereck/x11docker/wiki/Container-sound:-ALSA-or-Pulseaudio) or use [snapcast](https://github.com/badaix/snapcast) for network / multiroom audio solution. 84 | -v | $PWD/config:/config | (essential) Cange $PWD/config path to the directory on host where your mopidy.conf is located. 85 | -v | $PWD/media:/media:ro | (recommended) Cange $PWD/media path to directory with local media files (ro=read only). 86 | -v | $PWD/local:/var/lib/mopidy/local | (recommended) Cange $PWD/local path to directory to store local metadata, libraries and playlists. 87 | -p | 6600:6600 | (recommended) Exposes MPD server to port 6600 on host (if you use for example ncmpcpp client). 88 | -p | 6680:6680 | (recommended) Exposes HTTP server to port 6680 on host (this is essential, if you use the WebUI as client). 89 | -p | 5555:5555/udp | (optional) Exposes UDP streaming on port 5555 for FIFE sink (e.g. for visualizers). 90 | -e | PIP_PACKAGES= | (optional) Environment variable to inject some pip packages and mopidy extensions (i.e. Mopidy-Tidal) on upstart of container. 91 | -e | PUID= | (optional) Environment variable to define the user ID of the mopidy user to match with host's user ID. By default it is running with user mopidy (UID 102). 92 | -e | PGID= | (optional) Environment variable to define the group ID of the mopidy user to match with host's group ID. By default it is running with group audio (GID 29). 93 | -e | PULSE_SERVER= | (optional) Environment variable to define the PulseAudio socket to match with host's. This is optional and only needed if you use PulseAudio. 94 | -e | PULSE_COOKIE= | (optional) Environment variable to pass PulseAudio cookie path. This is optional and only needed if you use PulseAudio. 95 | -e | PULSE_COOKIE_DATA= | (optional) Environment variable to pass PulseAudio cookie data. This is optional and only needed if you use PulseAudio. 96 | 97 | Note: 98 | 99 | - The host user specified by PUID should have access to the local volume mounts and its group specified by PGID must be a member of the system audio group to avoid permission issues with the audio device. 100 | - Depending on the number and size of PIP_PACKAGES you have, it may take a while to start on first run. Please be patient and look at the logs. 101 | - On problems accessing the web interface, check mopidy.conf using the correct IP address. Try "hostname: 0.0.0.0" to listen to any interface and check that "allowed_origins = " has no restrictions (is empty). 102 | 103 | ### docker compose 104 | 105 | First check that Docker compose is already [installed](https://docs.docker.com/compose/install/) on your host. 106 | 107 | 1. Copy the [docker-compose.yml](https://github.com/jojo141185/mopidy-docker/blob/main/docker/docker-compose.yml) file from this repository to the current directory. 108 | 2. Make sure that your mopidy config file (mopidy.conf) is placed in a subfolder named "config". 109 | You can also add / modify the volume paths in the yaml file, i.e. to your local media folder or the directory where the metadata information will be stored on host (see table above). 110 | 3. Start the mopidy container with the following command 111 | Compose V1: `docker-compose up -d` 112 | Compose V2: `docker compose up -d` 113 | 114 | ## Build 115 | 116 | You can build (or rebuild) the image by opening a terminal from the root of the repository and issuing the following command: 117 | 118 | `docker build --build-arg IMG_VERSION="release" -t jojo141185/mopidy .` 119 | 120 | It will take a long time, espacialy on a Raspberry Pi. When it's finished, you can run the container following the previous instructions. 121 | Just be careful to use the tag from your own built. 122 | 123 | ## References 124 | 125 | Spotify disabled access to libspotify on May 16 2022. To be able to use Spotify as audio source, the mopidy-Spotify-Plugin was tweaked by @kingosticks. It now uses the GStreamer plugin "gst-plugins-spotify" in the background to play Spotify songs. 126 | For this reason this mopidy container should be seen as an alpha version with limited features in interaction with Spotify (i.e. no seeking support). 127 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Stage 1: Build GStreamer plugins written in Rust 3 | # 4 | # This stage uses a Rust environment to compile the custom GStreamer plugins 5 | # from source. The only output is the compiled shared library (.so file). 6 | ################################################################################ 7 | FROM rust:slim-bullseye AS rust-builder 8 | 9 | LABEL org.opencontainers.image.authors="jojo141185" 10 | LABEL org.opencontainers.image.source="https://github.com/jojo141185/mopidy-docker/" 11 | 12 | # Automatic platform ARGs for BuildKit 13 | # This feature is only available when using the BuildKit backend. 14 | ARG TARGETPLATFORM 15 | ARG TARGETARCH 16 | ARG TARGETVARIANT 17 | # Define Image version [latest, develop, release] 18 | ARG IMG_VERSION 19 | 20 | # Print Info about current build 21 | RUN printf "I'm building for TARGETPLATFORM=${TARGETPLATFORM}" \ 22 | && printf ", TARGETARCH=${TARGETARCH}" \ 23 | && printf ", TARGETVARIANT=${TARGETVARIANT} \n" \ 24 | && printf "With uname -s : " && uname -s \ 25 | && printf "and uname -m : " && uname -mm \ 26 | && printf "\n --------------------------- \n" \ 27 | && printf "Build Image in version: ${IMG_VERSION}" 28 | 29 | # Install build dependencies for the Rust plugin 30 | RUN apt-get update && apt-get install -yq --no-install-recommends \ 31 | build-essential \ 32 | cmake \ 33 | curl \ 34 | jq \ 35 | git \ 36 | patch \ 37 | libgstreamer-plugins-base1.0-dev \ 38 | libgstreamer1.0-dev \ 39 | libcsound64-dev \ 40 | libclang-11-dev \ 41 | libpango1.0-dev \ 42 | libdav1d-dev \ 43 | # libgtk-4-dev \ Only in bookworm 44 | && rm -rf /var/lib/apt/lists/* 45 | 46 | WORKDIR /usr/src/gst-plugins-rs 47 | 48 | # --------------------------------- 49 | # --- GStreamer Plugins --- 50 | # 51 | # Get source of gst-plugins-rs 52 | # 53 | # # - Select the branch or tag to use 54 | # RUN if [ "$IMG_VERSION" = "latest" ]; then \ 55 | # GST_PLUGINS_RS_TAG=main; \ 56 | # elif [ "$IMG_VERSION" = "develop" ]; then \ 57 | # GST_PLUGINS_RS_TAG=main; \ 58 | # elif [ "$IMG_VERSION" = "release" ]; then \ 59 | # GST_PLUGINS_RS_TAG=$(curl -s https://gitlab.freedesktop.org/api/v4/projects/gstreamer%2Fgst-plugins-rs/repository/tags | jq -r '.[0].name'); \ 60 | # else \ 61 | # echo "Invalid version info for gst-plugins-rs: $IMG_VERSION"; \ 62 | # exit 1; \ 63 | # fi \ 64 | # && echo "Selected branch or tag for gst-plugins-rs: $GST_PLUGINS_RS_TAG" \ 65 | # # - Clone repository of gst-plugins-rs to workdir 66 | # && git clone -c advice.detachedHead=false \ 67 | # --single-branch --depth 1 \ 68 | # --branch ${GST_PLUGINS_RS_TAG} \ 69 | # https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs.git ./ 70 | # 71 | # # - EXPERIMENTAL: For gstreamer-spotify set upgraded version number of dependency librespot to 0.4.2 72 | # RUN sed -i 's/librespot = { version = "0.4", default-features = false }/librespot = { version = "0.4.2", default-features = false }/g' audio/spotify/Cargo.toml 73 | 74 | # We currently require a forked version of gstreamer-spotify plugin which supports token-based login 75 | RUN GST_PLUGINS_RS_TAG="spotify-logging-librespot-ba3d501b" \ 76 | && echo "Selected branch or tag for gst-plugins-rs: $GST_PLUGINS_RS_TAG" \ 77 | # - Clone repository of gst-plugins-rs to workdir 78 | && git clone -c advice.detachedHead=false \ 79 | --single-branch --depth 1 \ 80 | --branch ${GST_PLUGINS_RS_TAG} \ 81 | https://gitlab.freedesktop.org/kingosticks/gst-plugins-rs.git ./ 82 | 83 | 84 | # Build GStreamer plugins written in Rust 85 | # 86 | # Set Cargo environment variables 87 | # Enabling cargo's sparse registry protocol is the easiest fix for 88 | # Error "Value too large for defined data type;" on arm/v7 and linux/386 89 | # https://github.com/rust-lang/cargo/issues/8719 90 | #ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL sparse 91 | ENV DEST_DIR="/target/gst-plugins-rs" 92 | ENV CARGO_PROFILE_RELEASE_DEBUG="false" 93 | # Cargo Build, with options: 94 | # --release: do a release (not dev) build 95 | # --no-default-features: disables the default features of the package (optional) 96 | # --config net.git-fetch-with-cli=true: Uses command-line git instead of built-in libgit2 to fix OOM Problem (exit code: 137) 97 | RUN export CSOUND_LIB_DIR="/usr/lib/$(uname -m)-linux-gnu" \ 98 | && export PLUGINS_DIR=$(pkg-config --variable=pluginsdir gstreamer-1.0) \ 99 | && export SO_SUFFIX=so \ 100 | && cargo build --release --no-default-features --config net.git-fetch-with-cli=true \ 101 | # List of packages to build 102 | --package gst-plugin-spotify \ 103 | # Use install command to create directory (-d), copy and print filenames (-v), and set attributes/permissions (-m) 104 | && install -v -d ${DEST_DIR}/${PLUGINS_DIR} \ 105 | && install -v -m 755 target/release/*.${SO_SUFFIX} ${DEST_DIR}/${PLUGINS_DIR} \ 106 | && cargo clean 107 | 108 | # --------------------------------- 109 | # 110 | ################################################################# 111 | 112 | ################################################################################ 113 | # Stage 2: Build Iris Web UI frontend 114 | # 115 | # This stage only builds the static assets (JS/CSS) for the Iris web interface. 116 | ################################################################################ 117 | FROM node:18-slim AS frontend-builder 118 | 119 | ARG IMG_VERSION 120 | 121 | # Install build dependencies 122 | RUN apt-get update && apt-get install -y --no-install-recommends \ 123 | git curl jq ca-certificates \ 124 | && rm -rf /var/lib/apt/lists/* 125 | 126 | # --- Install Iris WebUI from source --- 127 | 128 | # ADD a remote file to act as a cache invalidator. Its content is not important, 129 | # but if the remote file changes, this layer's cache will break. 130 | # We place it in /tmp so it doesn't interfere with the git clone command. 131 | ADD https://api.github.com/repos/jaedb/Iris/git/refs/heads/master /tmp/version.json 132 | 133 | # Clone the Iris repository into a new directory named /iris 134 | RUN \ 135 | # Step 1: Determine the correct branch or tag based on IMG_VERSION 136 | if [ "$IMG_VERSION" = "latest" ]; then \ 137 | IRIS_BRANCH_OR_TAG=master; \ 138 | elif [ "$IMG_VERSION" = "develop" ]; then \ 139 | IRIS_BRANCH_OR_TAG=develop; \ 140 | elif [ "$IMG_VERSION" = "release" ]; then \ 141 | IRIS_BRANCH_OR_TAG=$(curl -s https://api.github.com/repos/jaedb/Iris/releases/latest | jq -r .tag_name); \ 142 | else \ 143 | echo "Invalid version info for Iris: $IMG_VERSION"; \ 144 | exit 1; \ 145 | fi \ 146 | && echo "Selected branch or tag for Iris: $IRIS_BRANCH_OR_TAG" \ 147 | # Step 2: Clone Iris into a new directory /iris 148 | && git clone --depth 1 --single-branch -b "$IRIS_BRANCH_OR_TAG" https://github.com/jaedb/Iris.git /iris; 149 | 150 | # Now, set the working directory to the newly created /iris folder 151 | WORKDIR /iris 152 | 153 | # Build the frontend assets and then remove build dependencies 154 | RUN npm install && npm run prod && rm -rf node_modules 155 | 156 | ################################################################################ 157 | # Stage 3: Build Python wheels 158 | # 159 | # This stage acts as a "wheel factory". It downloads and builds all Python 160 | # packages and their dependencies into a single folder of .whl files. 161 | ################################################################################ 162 | FROM python:3.13-slim-bookworm AS python-builder 163 | 164 | ARG IMG_VERSION 165 | 166 | # Install build-time dependencies needed for Python packages. 167 | RUN apt-get update && apt-get install -y --no-install-recommends \ 168 | build-essential \ 169 | git \ 170 | curl \ 171 | jq \ 172 | graphviz-dev \ 173 | pkg-config \ 174 | gobject-introspection \ 175 | libgirepository1.0-dev \ 176 | libglib2.0-dev \ 177 | libffi-dev \ 178 | libcairo2-dev \ 179 | libasound2-dev \ 180 | libdbus-glib-1-dev \ 181 | meson \ 182 | ninja-build \ 183 | && rm -rf /var/lib/apt/lists/* 184 | 185 | # Create a directory to store all our wheels 186 | WORKDIR /wheels 187 | 188 | # --- Collect all Python sources --- 189 | 190 | # --- Mopidy source --- 191 | RUN \ 192 | # Step 1: Determine the correct branch or tag based on IMG_VERSION 193 | if [ "$IMG_VERSION" = "release" ]; then \ 194 | echo "Determining latest stable release tag from GitHub..." \ 195 | && MOPIDY_BRANCH_OR_TAG=$(curl -s https://api.github.com/repos/mopidy/mopidy/releases/latest | jq -r '.tag_name'); \ 196 | elif [ "$IMG_VERSION" = "latest" ]; then \ 197 | echo "Determining latest pre-release tag from GitHub..." \ 198 | && MOPIDY_BRANCH_OR_TAG=$(curl -s https://api.github.com/repos/mopidy/mopidy/releases | jq -r 'map(select(.draft == false)) | .[0].tag_name'); \ 199 | elif [ "$IMG_VERSION" = "develop" ]; then \ 200 | MOPIDY_BRANCH_OR_TAG=main; \ 201 | else \ 202 | echo "Invalid version info for Mopidy: $IMG_VERSION" && exit 1; \ 203 | fi \ 204 | && echo "Cloning Mopidy tag: $MOPIDY_BRANCH_OR_TAG" \ 205 | && git clone --depth 1 --single-branch -b ${MOPIDY_BRANCH_OR_TAG} https://github.com/mopidy/mopidy.git /src/mopidy 206 | 207 | # --- Mopidy-Spotify source --- 208 | RUN \ 209 | if [ "$IMG_VERSION" = "release" ]; then \ 210 | MOPSPOT_BRANCH_OR_TAG="v5.0.0a3"; \ 211 | elif [ "$IMG_VERSION" = "latest" ]; then \ 212 | echo "Determining latest pre-release tag from GitHub..." \ 213 | && MOPSPOT_BRANCH_OR_TAG=$(curl -s https://api.github.com/repos/mopidy/mopidy-spotify/releases | jq -r 'map(select(.draft == false)) | .[0].tag_name'); \ 214 | elif [ "$IMG_VERSION" = "develop" ]; then \ 215 | MOPSPOT_BRANCH_OR_TAG=main; \ 216 | else \ 217 | echo "Invalid version info for Mopidy-Spotify: $IMG_VERSION" && exit 1; \ 218 | fi \ 219 | && echo "Cloning Mopidy-Spotify tag: $MOPSPOT_BRANCH_OR_TAG" \ 220 | && git clone --depth 1 --single-branch -b ${MOPSPOT_BRANCH_OR_TAG} https://github.com/mopidy/mopidy-spotify.git /src/mopidy-spotify 221 | 222 | # --- Iris source --- 223 | COPY --from=frontend-builder /iris /src/iris 224 | 225 | # --- Other Python dependencies source --- 226 | COPY requirements.txt /src/requirements.txt 227 | 228 | # --- Create constraints for specific build versions --- 229 | RUN \ 230 | # We define a variable for the Mopidy source with a potential version override 231 | MOPIDY_SOURCE="/src/mopidy" \ 232 | # Bugfix pin pygobject==3.50.0 to resolve Mopidy dependency conflict with pygobject (see pyproject.toml) 233 | # This prevents pip from trying to install a newer, incompatible version. 234 | && echo "pygobject==3.50.0" > /src/constraints.txt \ 235 | # --- Build ALL packages and dependencies as wheels in a single step --- 236 | # We use 'pip wheel' to build .whl files from the local source directories 237 | # and download wheels for all other dependencies. 238 | && python3 -m pip wheel \ 239 | --no-cache-dir \ 240 | --wheel-dir=/wheels \ 241 | --constraint /src/constraints.txt \ 242 | --requirement /src/requirements.txt \ 243 | PyGObject \ 244 | $MOPIDY_SOURCE \ 245 | /src/mopidy-spotify \ 246 | /src/iris 247 | 248 | ################################################################################ 249 | # Stage 4: Final Release Image 250 | # 251 | # This is the final, optimized image. It only contains runtime dependencies 252 | # and copies pre-built artifacts from the builder stages. 253 | ################################################################################ 254 | FROM python:3.13-slim-bookworm AS release 255 | 256 | ARG IMG_VERSION 257 | WORKDIR / 258 | 259 | # Install only essential runtime packages 260 | RUN apt-get update && apt-get install -y --no-install-recommends \ 261 | sudo \ 262 | dumb-init \ 263 | graphviz \ 264 | # For installing pip packages from git 265 | git \ 266 | # Python and GObject/GStreamer integration 267 | gir1.2-glib-2.0 \ 268 | gir1.2-gstreamer-1.0 \ 269 | gir1.2-gst-plugins-base-1.0 \ 270 | gir1.2-gst-plugins-bad-1.0 \ 271 | # Audio & GStreamer Plugins 272 | gstreamer1.0-pulseaudio \ 273 | gstreamer1.0-alsa \ 274 | gstreamer1.0-tools \ 275 | gstreamer1.0-plugins-good \ 276 | gstreamer1.0-plugins-bad \ 277 | gstreamer1.0-plugins-ugly \ 278 | gstreamer1.0-libav \ 279 | pulseaudio \ 280 | # Venv module ist in python images schon drin, aber falls wir system tools brauchen: 281 | python3-venv \ 282 | && rm -rf /var/lib/apt/lists/* 283 | 284 | # --- Create a portable venv and install packages from local wheels --- 285 | ENV VENV_PATH=/opt/venv 286 | # 1. Create a fresh venv IN the final image, allowing it to access system packages. 287 | # This is crucial for pygobject/gi to find the system's GStreamer bindings. 288 | RUN python3 -m venv --system-site-packages ${VENV_PATH} 289 | 290 | # 2. Copy the pre-built wheels from our "wheel factory" 291 | COPY --from=python-builder /wheels /wheels 292 | COPY --from=python-builder /src/requirements.txt /wheels/requirements.txt 293 | 294 | # 3. Install ALL required packages from the local wheels folder, without network access. 295 | RUN ${VENV_PATH}/bin/pip install --no-index --find-links=/wheels \ 296 | -r /wheels/requirements.txt \ 297 | mopidy \ 298 | mopidy-spotify \ 299 | mopidy-iris \ 300 | && rm -rf /wheels 301 | 302 | # Copy the pre-built GStreamer plugin 303 | COPY --from=rust-builder /target/gst-plugins-rs/ / 304 | 305 | # Copy the Iris directory which contains the static web assets 306 | COPY --from=frontend-builder /iris /iris 307 | 308 | # Set the PATH to use the virtual environment 309 | ENV PATH="${VENV_PATH}/bin:$PATH" 310 | 311 | # --- Final Setup and Configuration --- 312 | 313 | # Enable container mode for Iris and copy version file 314 | RUN echo "1" >> /iris/IS_CONTAINER \ 315 | && cp /iris/VERSION / 316 | 317 | # Define user and group to run mopidy 318 | ENV DOCKER_USER=mopidy 319 | ENV DOCKER_GROUP=audio 320 | 321 | # Start helper script. 322 | COPY docker/entrypoint.sh /entrypoint.sh 323 | COPY docker/mopidy/mopidy.example.conf /mopidy/config/mopidy.conf 324 | COPY docker/mopidy/pulse-client.conf /etc/pulse/client.conf 325 | 326 | # Set environment variables for Home and local music directory 327 | ENV HOME=/var/lib/mopidy 328 | ENV XDG_MUSIC_DIR=/media 329 | 330 | # Create user, set permissions and create necessary directories 331 | RUN set -ex \ 332 | # Create the group only if it does not already exist 333 | && (getent group $DOCKER_GROUP || groupadd -r $DOCKER_GROUP) \ 334 | # Create the user 335 | && useradd -r -ms /bin/bash -g $DOCKER_GROUP -d $HOME $DOCKER_USER \ 336 | # Add user to supplementary groups 337 | && usermod -aG audio,sudo,pulse-access $DOCKER_USER \ 338 | # Create volume mount points so we can set permissions on them 339 | && mkdir -p /config /media "$HOME/local" \ 340 | # Create mopidy config directory and symlink it 341 | && mkdir -p "$HOME/.config" \ 342 | && ln -s /config "$HOME/.config/mopidy" \ 343 | # Create local music directory 344 | && mkdir -p "$HOME/local" \ 345 | # Add XDG_MUSIC_DIR to user-dirs to make it available for user 346 | && echo "XDG_MUSIC_DIR=\"$XDG_MUSIC_DIR\"" >> "$HOME/.config/user-dirs.dirs" \ 347 | # Allow docker user to run system commands with sudo 348 | && echo "$DOCKER_USER ALL=NOPASSWD: /iris/mopidy_iris/system.sh" >> /etc/sudoers \ 349 | # Configure sudo to keep XDG_MUSIC_DIR 350 | && echo "Defaults env_keep += \"XDG_MUSIC_DIR\"" >> /etc/sudoers \ 351 | # Set ownership and permissions 352 | && chmod +x /entrypoint.sh \ 353 | && chown -R $DOCKER_USER:$DOCKER_GROUP $HOME /config /media \ 354 | # Set permissions that allows any user to run mopidy 355 | && chmod go+rwx -R /iris /VERSION 356 | 357 | # Switch to the non-root user 358 | USER $DOCKER_USER:$DOCKER_GROUP 359 | 360 | # Define volumes 361 | VOLUME ["/config", "/var/lib/mopidy/local", "/media"] 362 | 363 | # Port-List to expose: 364 | # 6600 - (optional) Exposes MPD server (if you use for example ncmpcpp client). 365 | # 6680 - (optional) Exposes HTTP server (if you use your browser as client). 366 | # 5555/udp - (optional) Exposes UDP streaming for FIFE sink (e.g. for visualizers). 367 | EXPOSE 6600 6680 5555/udp 368 | 369 | # Set the entrypoint to use dumb-init for proper signal handling 370 | ENTRYPOINT ["/usr/bin/dumb-init", "/entrypoint.sh"] 371 | CMD ["/opt/venv/bin/mopidy"] 372 | 373 | # 374 | ################################################################# -------------------------------------------------------------------------------- /.github/workflows/build-docker.yml: -------------------------------------------------------------------------------- 1 | name: Build multi-arch docker images and publish on DockerHub and GHCR 2 | 3 | # 1. Controls when the action will run 4 | on: 5 | # -> Manually from the Actions tab 6 | workflow_dispatch: 7 | inputs: 8 | logLevel: 9 | type: choice 10 | description: Log level 11 | required: false 12 | default: info 13 | options: 14 | - info 15 | - debug 16 | image: 17 | type: choice 18 | description: 'Image tag to build' 19 | required: false 20 | default: all 21 | options: 22 | - all 23 | - latest 24 | - develop 25 | - release 26 | platform: 27 | type: choice 28 | description: 'Platform to build' 29 | required: false 30 | default: all 31 | options: 32 | - all 33 | - linux/amd64 34 | - linux/arm64 35 | - linux/arm/v7 36 | 37 | # -> On push to main/develop branches or version tags 38 | push: 39 | branches: 40 | - main 41 | - develop 42 | tags: 43 | - 'v*' 44 | paths-ignore: 45 | - '**.md' 46 | #- '.github/workflows/**' 47 | - 'docs/**' 48 | 49 | # -> On pull requests targeting the main branch 50 | pull_request: 51 | branches: 52 | - main 53 | paths-ignore: 54 | - '**.md' 55 | #- '.github/workflows/**' 56 | - 'docs/**' 57 | 58 | # -> On a weekly schedule 59 | schedule: 60 | - cron: '0 2 * * 0' # every Sunday at 2am UTC 61 | 62 | # 2. Environment variables available to all jobs 63 | env: 64 | DOCKERHUB_SLUG: ${{ secrets.DOCKERHUB_USERNAME }}/mopidy 65 | GHCR_SLUG: ghcr.io/${{ github.repository_owner }}/mopidy 66 | 67 | # 3. Permissions for the GITHUB_TOKEN 68 | permissions: 69 | contents: read 70 | packages: write # Required to publish packages to GHCR 71 | 72 | # 4. A workflow run is made up of one or more jobs 73 | jobs: 74 | # Job to define the build matrix for all subsequent jobs. This is the single source of truth. 75 | prepare_build: 76 | name: Prepare Build Matrix 77 | runs-on: ubuntu-latest 78 | outputs: 79 | images: ${{ steps.set-matrix.outputs.images }} 80 | platforms: ${{ steps.set-matrix.outputs.platforms }} 81 | steps: 82 | - id: set-matrix 83 | run: | 84 | # Define the full lists of images and platforms 85 | ALL_IMAGES='["latest", "develop", "release"]' 86 | ALL_PLATFORMS='["linux/amd64", "linux/arm64", "linux/arm/v7"]' 87 | 88 | # Process the 'image' input from the manual trigger 89 | if [ -z "${{ github.event.inputs.image }}" ] || [ "${{ github.event.inputs.image }}" == "all" ]; then 90 | echo "images=${ALL_IMAGES}" >> $GITHUB_OUTPUT 91 | else 92 | SINGLE_IMAGE="[\"${{ github.event.inputs.image }}\"]" 93 | echo "images=${SINGLE_IMAGE}" >> $GITHUB_OUTPUT 94 | fi 95 | 96 | # Process the 'platform' input from the manual trigger 97 | if [ -z "${{ github.event.inputs.platform }}" ] || [ "${{ github.event.inputs.platform }}" == "all" ]; then 98 | echo "platforms=${ALL_PLATFORMS}" >> $GITHUB_OUTPUT 99 | else 100 | SINGLE_PLATFORM="[\"${{ github.event.inputs.platform }}\"]" 101 | echo "platforms=${SINGLE_PLATFORM}" >> $GITHUB_OUTPUT 102 | fi 103 | 104 | # Job to build the docker images for each platform on a dedicated runner using the matrix strategy 105 | build: 106 | name: Build 107 | needs: prepare_build 108 | # The type of runner that the job will run on 109 | runs-on: ubuntu-latest 110 | strategy: 111 | fail-fast: false 112 | # List of platforms and images on which the job will be run in parallel. 113 | # The lists are provided by the 'prepare_build' job. 114 | matrix: 115 | platform: ${{ fromJson(needs.prepare_build.outputs.platforms) }} 116 | image: ${{ fromJson(needs.prepare_build.outputs.images) }} 117 | 118 | # Steps represent a sequence of tasks that will be executed as part of the job 119 | steps: 120 | # Store current platform from matrix as variable $PLATFORM_PAIR (with "/" replaced by "-") 121 | - name: Prepare 122 | run: | 123 | platform=${{ matrix.platform }} 124 | echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV 125 | 126 | # Github action to provide runner with OS information 127 | - name: Get GitHub Actions runner OS information 128 | uses: kenchan0130/actions-system-info@master 129 | id: system-info 130 | 131 | - name: Output System information 132 | run: | 133 | OUTPUTS=( 134 | "CPU Core: ${{ steps.system-info.outputs.cpu-core }}" 135 | "CPU Model: ${{ steps.system-info.outputs.cpu-model }}" 136 | "Hostname: ${{ steps.system-info.outputs.hostname }}" 137 | "Kernel release: ${{ steps.system-info.outputs.kernel-release }}" 138 | "Kernel version: ${{ steps.system-info.outputs.kernel-version }}" 139 | "Name: ${{ steps.system-info.outputs.name }}" 140 | "Platform: ${{ steps.system-info.outputs.platform }}" 141 | "Release: ${{ steps.system-info.outputs.release }}" 142 | "Total memory bytes: ${{ steps.system-info.outputs.totalmem }}" 143 | ) 144 | 145 | for OUTPUT in "${OUTPUTS[@]}";do 146 | echo "${OUTPUT}" 147 | done 148 | 149 | echo "Disk Space:" 150 | df -h 151 | 152 | # Free up disk space by removing unused toolchains to prevent "No space left on device" 153 | - name: Free Disk Space (Ubuntu) 154 | if: matrix.platform == 'linux/arm/v7' 155 | uses: jlumbroso/free-disk-space@main 156 | with: 157 | # this might remove tools that are actually needed, 158 | # if set to "true" but frees about 6 GB 159 | tool-cache: false 160 | 161 | # all of these default to true, but feel free to set to 162 | # "false" if necessary for your workflow 163 | android: true 164 | dotnet: true 165 | haskell: true 166 | large-packages: true 167 | docker-images: true 168 | 169 | # Set to false. Let the 'docker-on-tmpfs' action manage its own swapfile. 170 | # This prevents the "swapoff failed: No such file or directory" error. 171 | swap-storage: false 172 | 173 | # Github Action to get branch or tag information without the /ref/* prefix 174 | - name: Get branch names 175 | id: branch-name 176 | uses: tj-actions/branch-names@v6 177 | 178 | # Github Action to check-out your repository under $GITHUB_WORKSPACE, so your job can access it 179 | - name: Checkout 180 | uses: actions/checkout@v4.1.1 181 | 182 | # GitHub Action to install QEMU static binaries (optional for more platform options in buildx) 183 | - name: Set up QEMU 184 | uses: docker/setup-qemu-action@v3 185 | 186 | # GitHub Action to set up Docker Buildx. 187 | - name: Set up Docker Buildx 188 | uses: docker/setup-buildx-action@v3 189 | with: 190 | buildkitd-flags: ${{ github.event.inputs.logLevel == 'debug' && '--debug' }} 191 | 192 | # Hack to fix Errors on arm/v7 and linux/386 193 | # Work around for qemu bug on 32bit systems caused during rust compilation (https://github.com/JonasAlfredsson/docker-on-tmpfs?tab=readme-ov-file) 194 | # - "Value too large for defined data type;" https://github.com/crazy-max/ghaction-docker-buildx/issues/172 195 | # - "object not found - no match for id (SOME_HASH)" on git update 196 | - name: Run Docker on tmpfs 197 | if: matrix.platform == 'linux/arm/v7' || matrix.platform == 'linux/i386' 198 | uses: JonasAlfredsson/docker-on-tmpfs@v1 199 | with: 200 | tmpfs_size: 10 201 | swap_size: 10 202 | swap_location: '/mnt/swapfile' 203 | 204 | # GitHub Action to extract metadata (tags, labels) for Docker. 205 | - name: Docker meta 206 | id: docker_meta 207 | uses: docker/metadata-action@v5 208 | with: 209 | github-token: ${{ secrets.GITHUB_TOKEN }} 210 | images: | 211 | ${{ env.DOCKERHUB_SLUG }} 212 | ${{ env.GHCR_SLUG }} 213 | labels: | 214 | org.opencontainers.image.title=${{ env.DOCKERHUB_SLUG }} 215 | org.opencontainers.image.description='Mopidy music server with Iris Web-UI, Spotify support and many other extensions.' 216 | org.opencontainers.image.vendor=${{ secrets.DOCKERHUB_USERNAME }} 217 | 218 | # GitHub Action to login to DockerHub. 219 | - name: Login to DockerHub 220 | uses: docker/login-action@v3 221 | with: 222 | username: ${{ secrets.DOCKERHUB_USERNAME }} 223 | password: ${{ secrets.DOCKERHUB_TOKEN }} 224 | 225 | # GitHub Action to login to GitHub Container Registry. 226 | - name: Login to GitHub Container Registry 227 | uses: docker/login-action@v3 228 | with: 229 | registry: ghcr.io 230 | username: ${{ github.repository_owner }} 231 | password: ${{ secrets.GITHUB_TOKEN }} 232 | 233 | # --- This step is only for single item manual runs --- # 234 | # Build and push with a specific tag for manual runs that don't build everything. 235 | - name: Build and push with specific tag (for single item manual run) 236 | if: github.event_name == 'workflow_dispatch' && (github.event.inputs.platform != 'all' || github.event.inputs.image != 'all') 237 | uses: docker/build-push-action@v5 238 | with: 239 | context: . 240 | file: ./Dockerfile 241 | platforms: ${{ matrix.platform }} 242 | provenance: false 243 | labels: ${{ steps.docker_meta.outputs.labels }} 244 | push: true 245 | tags: | 246 | ${{ env.DOCKERHUB_SLUG }}:${{ matrix.image }}-${{ env.PLATFORM_PAIR }} 247 | ${{ env.GHCR_SLUG }}:${{ matrix.image }}-${{ env.PLATFORM_PAIR }} 248 | build-args: | 249 | IMG_VERSION=${{ matrix.image }} 250 | BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') 251 | VCS_REF=$(echo $GITHUB_SHA | cut -c1-8) 252 | 253 | # --- Next steps only for multi-arch/multi-image runs --- # 254 | # Build and push by digest for multi-item runs 255 | - name: Build and push by digest (for multi-item run) 256 | id: docker_build_digest 257 | if: github.event_name != 'workflow_dispatch' || (github.event.inputs.platform == 'all' && github.event.inputs.image == 'all') 258 | uses: docker/build-push-action@v5 259 | with: 260 | context: . 261 | file: ./Dockerfile 262 | platforms: ${{ matrix.platform }} 263 | provenance: false 264 | labels: ${{ steps.docker_meta.outputs.labels }} 265 | tags: | 266 | ${{ env.DOCKERHUB_SLUG }} 267 | ${{ env.GHCR_SLUG }} 268 | #cache-from: type=gha,scope=${{ github.repository }}-${{ github.ref_name }}-${{ matrix.platform }}-${{ matrix.image }} 269 | #cache-to: type=gha,mode=max,scope=${{ github.repository }}-${{ github.ref_name }}-${{ matrix.platform }}-${{ matrix.image }} 270 | outputs: type=image,name=${{ env.DOCKERHUB_SLUG }},name=${{ env.GHCR_SLUG }},push-by-digest=true,name-canonical=true,push=true 271 | build-args: | 272 | IMG_VERSION=${{ matrix.image }} 273 | BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') 274 | VCS_REF=$(echo $GITHUB_SHA | cut -c1-8) 275 | 276 | # Export digest and store in GitHub artefact storage for later use in other jobs 277 | - name: Export digest 278 | if: github.event_name != 'workflow_dispatch' || (github.event.inputs.platform == 'all' && github.event.inputs.image == 'all') 279 | run: | 280 | mkdir -p /tmp/digests/${{ matrix.image }} 281 | digest="${{ steps.docker_build_digest.outputs.digest }}" 282 | touch "/tmp/digests/${{ matrix.image }}/${digest#sha256:}" 283 | 284 | # Upload digest artifact 285 | - name: Upload digest 286 | if: github.event_name != 'workflow_dispatch' || (github.event.inputs.platform == 'all' && github.event.inputs.image == 'all') 287 | uses: actions/upload-artifact@v4 288 | with: 289 | name: digests-${{ matrix.image }}-${{ env.PLATFORM_PAIR }} 290 | path: /tmp/digests/${{ matrix.image }}/* 291 | if-no-files-found: error 292 | retention-days: 1 293 | 294 | - name: Clear digest 295 | if: github.event_name != 'workflow_dispatch' || (github.event.inputs.platform == 'all' && github.event.inputs.image == 'all') 296 | run: | 297 | rm -rf /tmp/digests/${{ matrix.image }} 298 | 299 | # Job to merge the multi-arch images into a single manifest 300 | merge: 301 | # Only run this job for full multi-arch and multi-image builds 302 | if: | 303 | github.event_name != 'pull_request' && 304 | ( 305 | github.event_name != 'workflow_dispatch' || 306 | (github.event.inputs.platform == 'all' && github.event.inputs.image == 'all') 307 | ) 308 | name: Merge Docker manifests 309 | needs: [build, prepare_build] 310 | runs-on: ubuntu-latest 311 | strategy: 312 | fail-fast: false 313 | matrix: 314 | image: ${{ fromJson(needs.prepare_build.outputs.images) }} 315 | steps: 316 | # Download all digest artifacts 317 | - name: Download digests 318 | uses: actions/download-artifact@v4 319 | with: 320 | path: /tmp/digests/${{ matrix.image }} 321 | pattern: digests-${{ matrix.image }}-* 322 | merge-multiple: true 323 | 324 | # Set up Docker Buildx 325 | - name: Set up Docker Buildx 326 | uses: docker/setup-buildx-action@v3 327 | 328 | # GitHub Action to extract metadata (final tags) for Docker. 329 | - name: Docker meta 330 | id: meta 331 | uses: docker/metadata-action@v5 332 | with: 333 | images: | 334 | ${{ env.DOCKERHUB_SLUG }} 335 | ${{ env.GHCR_SLUG }} 336 | tags: | 337 | type=raw,value=${{ matrix.image }},enable=${{ github.ref_name == github.event.repository.default_branch }} 338 | type=raw,value=dev_${{ matrix.image }},enable=${{ github.ref == format('refs/heads/{0}', 'develop') }} 339 | type=raw,value=test_${{ matrix.image }},enable=${{ github.ref_name != github.event.repository.default_branch && github.ref != format('refs/heads/{0}', 'develop') }} 340 | 341 | # Login to Docker Hub 342 | - name: Login to Docker Hub 343 | uses: docker/login-action@v3 344 | with: 345 | username: ${{ secrets.DOCKERHUB_USERNAME }} 346 | password: ${{ secrets.DOCKERHUB_TOKEN }} 347 | 348 | # Login to GitHub Container Registry 349 | - name: Login to GitHub Container Registry 350 | uses: docker/login-action@v3 351 | with: 352 | registry: ghcr.io 353 | username: ${{ github.repository_owner }} 354 | password: ${{ secrets.GITHUB_TOKEN }} 355 | 356 | # Create and push the multi-arch manifest 357 | - name: Create manifest list and push 358 | working-directory: /tmp/digests/${{ matrix.image }} 359 | run: | 360 | docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< '${{ steps.meta.outputs.json }}') \ 361 | $(printf '${{ env.DOCKERHUB_SLUG }}@sha256:%s ' *) 362 | 363 | # Inspect the final image on Docker Hub and GHCR 364 | - name: Inspect final images 365 | run: | 366 | docker buildx imagetools inspect ${{ fromJson(steps.meta.outputs.json).tags[0] }} 367 | docker buildx imagetools inspect ${{ fromJson(steps.meta.outputs.json).tags[1] }} 368 | 369 | # Job to clean up single-arch tags from Docker Hub 370 | cleanup_dockerhub_tags: 371 | name: Cleanup Docker Hub Tags 372 | needs: [merge, prepare_build] 373 | runs-on: ubuntu-latest 374 | # Only run this job after a successful full build 375 | if: success() && (github.event_name != 'workflow_dispatch' || (github.event.inputs.platform == 'all' && github.event.inputs.image == 'all')) 376 | steps: 377 | # This step installs dependencies (jq), downloads hub-tool, and logs in ONCE. 378 | - name: Install dependencies and Login to Docker Hub 379 | run: | 380 | sudo apt-get update && sudo apt-get install -y jq 381 | VERSION=v0.4.6 382 | curl -sL "https://github.com/docker/hub-tool/releases/download/${VERSION}/hub-tool-linux-amd64.tar.gz" -o hub-tool.tar.gz 383 | tar --strip-components=1 -xzf ./hub-tool.tar.gz && ./hub-tool --version 384 | HUB_TOKEN=$(curl -s -H "Content-Type: application/json" -X POST -d '{"username": "${{ secrets.DOCKERHUB_USERNAME }}", "password": "${{ secrets.DOCKERHUB_TOKEN }}"}' https://hub.docker.com/v2/users/login/ | jq -r .token) 385 | USERNAME_B64="$(printf '%s:' "${{ secrets.DOCKERHUB_USERNAME }}" | base64 -w0)" 386 | USER_PASS_B64="$(printf '%s:%s' "${{ secrets.DOCKERHUB_USERNAME }}" "${{ secrets.DOCKERHUB_TOKEN }}" | base64 -w0)" 387 | mkdir -p ~/.docker/ && printf '{"auths": {"hub-tool": {"auth": "%s"}, "hub-tool-refresh-token": {"auth": "%s"}, "hub-tool-token": { "auth": "%s", "identitytoken": "%s"}}}' "$USER_PASS_B64" "$USERNAME_B64" "$USERNAME_B64" "$HUB_TOKEN" > ~/.docker/config.json 388 | 389 | # This step uses the lists from 'prepare_build' to loop through all tags and remove them from Docker Hub. 390 | - name: Remove all tags from Docker Hub 391 | run: | 392 | mapfile -t images < <(echo '${{ needs.prepare_build.outputs.images }}' | jq -r '.[]') 393 | mapfile -t platforms < <(echo '${{ needs.prepare_build.outputs.platforms }}' | jq -r '.[]') 394 | for image in "${images[@]}"; do 395 | for platform in "${platforms[@]}"; do 396 | platform_slug=${platform//\//-} 397 | tag_to_delete="${image}-${platform_slug}" 398 | echo "Attempting to delete tag from Docker Hub: ${{ env.DOCKERHUB_SLUG }}:${tag_to_delete}" 399 | ./hub-tool tag rm --force ${{ env.DOCKERHUB_SLUG }}:${tag_to_delete} || true 400 | done 401 | done 402 | 403 | # Always run this step to ensure credentials are cleaned up. 404 | - name: Logout from Docker Hub 405 | if: always() 406 | run: rm -f ~/.docker/config.json 407 | 408 | # Job to clean up single-arch tags from GitHub Container Registry 409 | cleanup_ghcr_tags: 410 | name: Cleanup GHCR Tags 411 | needs: [merge, prepare_build] 412 | runs-on: ubuntu-latest 413 | # Only run this job after a successful full build 414 | if: success() && (github.event_name != 'workflow_dispatch' || (github.event.inputs.platform == 'all' && github.event.inputs.image == 'all')) 415 | strategy: 416 | fail-fast: false 417 | # This job-level matrix is the correct way to run the delete action for each combination 418 | matrix: 419 | image: ${{ fromJson(needs.prepare_build.outputs.images) }} 420 | platform: ${{ fromJson(needs.prepare_build.outputs.platforms) }} 421 | steps: 422 | # This step creates the tag name using shell substitution, which is reliable and avoids linter issues. 423 | - name: Construct tag to delete 424 | id: construct 425 | run: | 426 | platform=${{ matrix.platform }} 427 | platform_slug=${platform//\//-} 428 | echo "tag_to_delete=${{ matrix.image }}-${platform_slug}" >> $GITHUB_OUTPUT 429 | 430 | - name: Remove tag from GitHub Container Registry 431 | uses: bots-house/ghcr-delete-image-action@v1.1.0 432 | with: 433 | owner: ${{ github.repository_owner }} 434 | name: mopidy 435 | token: ${{ secrets.WORKFLOW_PAT }} 436 | tag: ${{ steps.construct.outputs.tag_to_delete }} 437 | continue-on-error: true --------------------------------------------------------------------------------