├── .github ├── .github_liveness.txt ├── FUNDING.yml └── workflows │ ├── docker.yml │ └── keepalive.yml ├── .gitmodules ├── Dockerfile ├── LICENSE ├── README.md ├── docker-compose.yml ├── docker-entrypoint.sh ├── empty.sh ├── env.sample ├── healthcheck.sh ├── hooks └── build ├── ls.sh └── trap.sh /.github/.github_liveness.txt: -------------------------------------------------------------------------------- 1 | # Last GitHub activity at: 2025-04-27T02:49:02+00:00 2 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: 4 | - efrecon 5 | #patreon: # Replace with a single Patreon username 6 | #open_collective: # Replace with a single Open Collective username 7 | #ko_fi: # Replace with a single Ko-fi username 8 | #tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 9 | #community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 10 | #liberapay: # Replace with a single Liberapay username 11 | #issuehunt: # Replace with a single IssueHunt username 12 | #lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | #polar: # Replace with a single Polar username 14 | #buy_me_a_coffee: # Replace with a single Buy Me a Coffee username 15 | #custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 16 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Docker Images 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | - master 9 | schedule: 10 | - cron: "0 0 * * *" 11 | 12 | jobs: 13 | ghcr: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - 17 | name: Checkout 18 | uses: actions/checkout@v3 19 | with: 20 | submodules: true 21 | - 22 | name: Login to GHCR 23 | uses: docker/login-action@v2 24 | with: 25 | registry: ghcr.io 26 | username: ${{ github.actor }} 27 | password: ${{ secrets.GITHUB_TOKEN }} 28 | - 29 | name: Set up QEMU 30 | uses: docker/setup-qemu-action@v2 31 | - 32 | name: Set up Docker Buildx 33 | uses: docker/setup-buildx-action@v2 34 | with: 35 | driver-opts: image=moby/buildkit:master 36 | - 37 | name: Build GHCR images 38 | env: 39 | DOCKER_REPO: ghcr.io/${{ github.actor }}/s3fs 40 | SOURCE_COMMIT: ${{ github.sha }} 41 | run: ./hooks/build 42 | hub: 43 | runs-on: ubuntu-latest 44 | steps: 45 | - 46 | name: Checkout 47 | uses: actions/checkout@v3 48 | with: 49 | submodules: true 50 | - 51 | name: Login to Docker Hub 52 | uses: docker/login-action@v2 53 | with: 54 | registry: docker.io 55 | username: ${{ secrets.DOCKERHUB_USERNAME }} 56 | password: ${{ secrets.DOCKERHUB_TOKEN }} 57 | - 58 | name: Set up QEMU 59 | uses: docker/setup-qemu-action@v2 60 | - 61 | name: Set up Docker Buildx 62 | uses: docker/setup-buildx-action@v2 63 | with: 64 | driver-opts: image=moby/buildkit:master 65 | - 66 | name: Build Docker Hub images 67 | env: 68 | DOCKER_REPO: docker.io/efrecon/s3fs 69 | SOURCE_COMMIT: ${{ github.sha }} 70 | run: ./hooks/build 71 | - 72 | # Note: This uses the password, not the token as this action would 73 | # otherwise not work. 74 | name: Update repo description at Docker Hub 75 | uses: peter-evans/dockerhub-description@v2 76 | with: 77 | username: ${{ secrets.DOCKERHUB_USERNAME }} 78 | password: ${{ secrets.DOCKERHUB_PASSWORD }} 79 | repository: efrecon/s3fs 80 | short-description: Mount S3 buckets from within a container and expose them to host/containers 81 | -------------------------------------------------------------------------------- /.github/workflows/keepalive.yml: -------------------------------------------------------------------------------- 1 | name: keepalive 2 | 3 | on: 4 | schedule: 5 | # Run every sunday at 1:27 UTC 6 | - cron: '27 1 * * SUN' 7 | 8 | jobs: 9 | keepalive: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: keepalive 13 | uses: efrecon/gh-action-keepalive@main 14 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "hooks/reg-tags"] 2 | path = hooks/reg-tags 3 | url = https://github.com/efrecon/reg-tags.git 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ALPINE_VERSION=3.20.3 2 | FROM alpine:$ALPINE_VERSION AS build 3 | 4 | ARG S3FS_VERSION=v1.91 5 | 6 | RUN apk --no-cache add \ 7 | ca-certificates \ 8 | build-base \ 9 | git \ 10 | alpine-sdk \ 11 | libcurl \ 12 | automake \ 13 | autoconf \ 14 | libxml2-dev \ 15 | mailcap \ 16 | fuse-dev \ 17 | curl-dev && \ 18 | git clone https://github.com/s3fs-fuse/s3fs-fuse.git && \ 19 | cd s3fs-fuse && \ 20 | git checkout tags/${S3FS_VERSION} && \ 21 | ./autogen.sh && \ 22 | ./configure --prefix=/usr && \ 23 | make -j && \ 24 | make install 25 | 26 | FROM alpine:$ALPINE_VERSION 27 | 28 | # Metadata 29 | LABEL MAINTAINER=efrecon+github@gmail.com 30 | LABEL org.opencontainers.image.title="efrecon/s3fs" 31 | LABEL org.opencontainers.image.description="Mount S3 buckets from within a container and expose them to host/containers" 32 | LABEL org.opencontainers.image.authors="Emmanuel Frécon " 33 | LABEL org.opencontainers.image.url="https://github.com/efrecon/docker-s3fs-client" 34 | LABEL org.opencontainers.image.documentation="https://github.com/efrecon/docker-s3fs-client/README.md" 35 | LABEL org.opencontainers.image.source="https://github.com/efrecon/docker-s3fs-client/Dockerfile" 36 | 37 | COPY --from=build /usr/bin/s3fs /usr/bin/s3fs 38 | 39 | # Specify URL and secrets. When using AWS_S3_SECRET_ACCESS_KEY_FILE, the secret 40 | # key will be read from that file itself, which helps passing further passwords 41 | # using Docker secrets. You can either specify the path to an authorisation 42 | # file, set environment variables with the key and the secret. 43 | ENV AWS_S3_URL=https://s3.amazonaws.com 44 | ENV AWS_S3_ACCESS_KEY_ID= 45 | ENV AWS_S3_ACCESS_KEY_ID_FILE= 46 | ENV AWS_S3_SECRET_ACCESS_KEY= 47 | ENV AWS_S3_SECRET_ACCESS_KEY_FILE= 48 | ENV AWS_S3_AUTHFILE= 49 | ENV AWS_S3_BUCKET= 50 | 51 | # User and group ID of S3 mount owner 52 | ENV RUN_AS= 53 | ENV UID=0 54 | ENV GID=0 55 | 56 | # Location of directory where to mount the drive into the container. 57 | ENV AWS_S3_MOUNT=/opt/s3fs/bucket 58 | 59 | # s3fs tuning 60 | ENV S3FS_DEBUG=0 61 | ENV S3FS_ARGS= 62 | 63 | RUN mkdir /opt/s3fs && \ 64 | apk --no-cache add \ 65 | ca-certificates \ 66 | mailcap \ 67 | fuse \ 68 | libxml2 \ 69 | libcurl \ 70 | libgcc \ 71 | libstdc++ \ 72 | tini && \ 73 | if id -u xfs >/dev/null 2>&1; then deluser xfs; fi && \ 74 | s3fs --version 75 | 76 | # allow access to volume by different user to enable UIDs other than root when 77 | # using volumes 78 | RUN echo user_allow_other >> /etc/fuse.conf 79 | 80 | COPY *.sh /usr/local/bin/ 81 | 82 | WORKDIR /opt/s3fs 83 | 84 | # Following should match the AWS_S3_MOUNT environment variable. 85 | VOLUME [ "/opt/s3fs/bucket" ] 86 | 87 | HEALTHCHECK \ 88 | --interval=15s \ 89 | --timeout=5s \ 90 | --start-period=15s \ 91 | --retries=2 \ 92 | CMD [ "/usr/local/bin/healthcheck.sh" ] 93 | 94 | # The default is to perform all system-level mounting as part of the entrypoint 95 | # to then have a command that will keep listing the files under the main share. 96 | # Listing the files will keep the share active and avoid that the remote server 97 | # closes the connection. 98 | ENTRYPOINT [ "tini", "-g", "--", "docker-entrypoint.sh" ] 99 | CMD [ "empty.sh" ] 100 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2019, Emmanuel Frecon 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Dockerised s3fs Client 2 | 3 | This Docker [image] (and associated github [project]) facilitates mounting of 4 | remote S3 buckets resources into containers. Mounting is performed through the 5 | fuse [s3fs] implementation. The image basically implements a docker [volume] on 6 | the cheap: Used with the proper creation options (see below) , you should be 7 | able to bind-mount back the remote bucket onto a host directory. This directory 8 | will make the content of the bucket available to processes, but also all other 9 | containers on the host. The image automatically unmount the remote bucket on 10 | container termination. 11 | 12 | [image]: https://hub.docker.com/r/efrecon/s3fs 13 | [project]: https://github.com/efrecon/docker-s3fs-client 14 | [s3fs]: https://github.com/s3fs-fuse/s3fs-fuse 15 | [volume]: https://docs.docker.com/storage/ 16 | 17 | The image [tags] follow the versions from the [s3fs] implementation, there will 18 | be **no** `latest`, only tags matching released versions of [s3fs]. New versions 19 | of [s3fs] will automatically be picked up when [rebuilding]. [s3fs] is compiled 20 | from the tagged git versions from the main repository. The image is 21 | automatically built using a github [workflow] and pushed to both the Docker 22 | [Hub][image] and to the [GHCR]. Detection of new releases happens once every 23 | day. 24 | 25 | [tags]: https://cloud.docker.com/repository/docker/efrecon/s3fs/tags 26 | [rebuilding]: ./hooks/build 27 | [workflow]: ./.github/workflows/docker.yml 28 | [GHCR]: https://github.com/efrecon/docker-s3fs-client/pkgs/container/s3fs 29 | 30 | ## Example 31 | 32 | Provided the existence of a directory called `/mnt/tmp` on the host, the 33 | following command would mount a remote S3 bucket and bind-mount the remote 34 | resource onto the host's `/mnt/tmp` in a way that makes the remote files 35 | accessible to processes and/or other containers running on the same host. 36 | 37 | ```Shell 38 | docker run -it --rm \ 39 | --device /dev/fuse \ 40 | --cap-add SYS_ADMIN \ 41 | --security-opt "apparmor=unconfined" \ 42 | --env "AWS_S3_BUCKET=" \ 43 | --env "AWS_S3_ACCESS_KEY_ID=" \ 44 | --env "AWS_S3_SECRET_ACCESS_KEY=" \ 45 | --env UID=$(id -u) \ 46 | --env GID=$(id -g) \ 47 | -v /mnt/tmp:/opt/s3fs/bucket:rshared \ 48 | efrecon/s3fs 49 | ``` 50 | 51 | The `--device`, `--cap-add` and `--security-opt` options and their values are to 52 | make sure that the container will be able to make available the S3 bucket 53 | using FUSE. `rshared` is what ensures that bind mounting makes the files and 54 | directories available back to the host and recursively to other containers. 55 | 56 | Note that there are reports of mount propagation not working with the compose 57 | plugin. See [#42](https://github.com/efrecon/docker-s3fs-client/issues/42). 58 | Reverting to standalone `docker-compose` seems to work. 59 | 60 | ## Container Options 61 | 62 | A series of environment variables, most led by `AWS_S3_` can be used to 63 | parametrise the container: 64 | 65 | * `AWS_S3_BUCKET` should be the name of the bucket, this is mandatory. 66 | * `AWS_S3_AUTHFILE` is the path to an authorisation file compatible with the 67 | format specified by [s3fs]. This can be empty, in which case data will be 68 | taken from the other authorisation-related environment variables. 69 | * `AWS_S3_ACCESS_KEY_ID` is the access key to the S3 bucket, this is only used 70 | whenever `AWS_S3_AUTHFILE` is empty. Note however that the variable 71 | `AWS_S3_ACCESS_KEY_ID_FILE` has precedence over this one. 72 | * `AWS_S3_ACCESS_KEY_ID_FILE` points instead to a file that will contain the 73 | access key id to the S3 bucket. When this is present, the password will be 74 | taken from the file instead of from the `AWS_S3_ACCESS_KEY_ID` variable. 75 | If that variable existed, it will be disregarded. This makes it easy to pass 76 | passwords using Docker [secrets]. This is only ever used whenever 77 | `AWS_S3_AUTHFILE` is empty. 78 | * `AWS_S3_SECRET_ACCESS_KEY` is the secret access key to the S3 bucket, this is 79 | only used whenever `AWS_S3_AUTHFILE` is empty. Note however that the variable 80 | `AWS_S3_SECRET_ACCESS_KEY_FILE` has precedence over this one. 81 | * `AWS_S3_SECRET_ACCESS_KEY_FILE` points instead to a file that will contain the 82 | secret access key to the S3 bucket. When this is present, the password will be 83 | taken from the file instead of from the `AWS_S3_SECRET_ACCESS_KEY` variable. 84 | If that variable existed, it will be disregarded. This makes it easy to pass 85 | passwords using Docker [secrets]. This is only ever used whenever 86 | `AWS_S3_AUTHFILE` is empty. 87 | * `AWS_S3_URL` is the URL to the Amazon service. This can be used to mount 88 | external services that implement a compatible API. 89 | * `AWS_S3_MOUNT` is the location within the container where to mount the 90 | WebDAV resource. This defaults to `/opt/s3fs/bucket` and is not really meant to 91 | be changed. 92 | * `AWS_S3_ENVFILE` is the location of a `.env` file, within the container, from 93 | where to read the content of environment variables. Only lines starting with 94 | `AWS_S3_` or `S3FS_` will be recognised. Content will be expanded by shell. 95 | Usually, you will want to bind mount that file from the host in read-only 96 | mode. 97 | * `UID` is the user ID for the owner of the share inside the container. 98 | * `GID` is the group ID for the owner of the share inside the container. 99 | * `S3FS_DEBUG` can be set to `1` to get some debugging information from [s3fs]. 100 | * `S3FS_ARGS` can contain some additional options to be blindly passed to 101 | [s3fs]. options are supposed to be given comma-separated, e.g. 102 | `use_path_request_style,allow_other,default_acl=public-read` 103 | 104 | [secrets]: https://docs.docker.com/engine/swarm/secrets/ 105 | 106 | ## Commands 107 | 108 | By default, this container will be silent and running `empty.sh` as its command. 109 | If you wanted to check for liveness, you can pass the command `ls.sh` instead, 110 | which will keep listing the content of the mounted directory at regular 111 | intervals. Both these commands ensure that the remote bucket is unmounted from 112 | the mountpoint at termination, so you should really pick one or the other to 113 | allow for proper operation. If the mountpoint was not unmounted, your mount 114 | system will be unstable as it will contain an unknown entry. 115 | 116 | Automatic unmounting is achieved through a combination of a `trap` in the 117 | command being executed and [tini]. [tini] is made available directly in this 118 | image to make it possible to run in [Swarm][swarm] or [kubernetes] environments. 119 | 120 | [tini]: https://github.com/krallin/tini 121 | [swarm]: https://docs.docker.com/engine/swarm/ 122 | [kubernetes]: https://kubernetes.io/ 123 | 124 | ## Versions and Tags 125 | 126 | The docker [image] has [tags] that automatically match the list of official 127 | [versions] of [s3fs]. This is achieved through using the github API to discover 128 | the list of tags starting with `v` and building a separate image for each of 129 | them. The image itself builds upon [alpine] and it will pick the latest Alpine 130 | with major version number `3` at the time of the build. There is no release for 131 | version 1.87 as it contains a regression that was [fixed] after the release. 132 | 133 | [image]: https://cloud.docker.com/repository/docker/efrecon/s3fs 134 | [tags]: https://cloud.docker.com/repository/docker/efrecon/s3fs/tags 135 | [versions]: https://github.com/s3fs-fuse/s3fs-fuse/tags 136 | [alpine]: https://hub.docker.com/_/alpine 137 | [fixed]: https://github.com/s3fs-fuse/s3fs-fuse/pull/1365 138 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | services: 3 | s3fs: 4 | container_name: s3fs 5 | image: efrecon/s3fs:1.91 6 | restart: unless-stopped 7 | cap_add: 8 | - SYS_ADMIN 9 | security_opt: 10 | - 'apparmor:unconfined' 11 | devices: 12 | - /dev/fuse 13 | volumes: 14 | - './bucket:/opt/s3fs/bucket:rshared' 15 | environment: 16 | AWS_S3_BUCKET: '${AWS_S3_BUCKET}' 17 | AWS_S3_ACCESS_KEY_ID: '${AWS_S3_ACCESS_KEY_ID}' 18 | AWS_S3_SECRET_ACCESS_KEY: '${AWS_S3_SECRET_ACCESS_KEY}' 19 | AWS_S3_AUTHFILE: '${AWS_S3_AUTHFILE}' 20 | AWS_S3_ACCESS_KEY_ID_FILE: '${AWS_S3_ACCESS_KEY_ID_FILE}' 21 | AWS_S3_SECRET_ACCESS_KEY_FILE: '${AWS_S3_SECRET_ACCESS_KEY_FILE}' 22 | AWS_S3_URL: '${AWS_S3_URL}' 23 | AWS_S3_MOUNT: '/opt/s3fs/bucket' 24 | S3FS_ARGS: '' 25 | S3FS_DEBUG: 0 26 | UID: 1000 27 | GID: 1000 28 | -------------------------------------------------------------------------------- /docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Failsafe: Stop on errors and unset variables. 4 | set -eu 5 | 6 | # Debug 7 | S3FS_DEBUG=${S3FS_DEBUG:-"0"} 8 | 9 | # Env file 10 | AWS_S3_ENVFILE=${AWS_S3_ENVFILE:-""} 11 | 12 | _verbose() { 13 | if [ "$S3FS_DEBUG" = "1" ]; then 14 | printf %s\\n "$1" >&2 15 | fi 16 | } 17 | 18 | _error() { 19 | printf %s\\n "$1" >&2 20 | exit 1 21 | } 22 | 23 | # Read the content of the environment file, i.e. a file used to set the value of 24 | # all/some variables. 25 | if [ -n "$AWS_S3_ENVFILE" ]; then 26 | # Read and export lines that set variables in all-caps and starting with 27 | # S3FS_ or AWS_ from the configuration file. This is a security measure to 28 | # crudly protect against evaluating some evil code (but it will still 29 | # evaluate code as part of the value, so use it with care!) 30 | _verbose "Reading configuration from $AWS_S3_ENVFILE" 31 | while IFS= read -r line; do 32 | eval export "$line" 33 | done <