├── .gitignore
├── modules
├── echo
│ ├── build-deps
│ ├── source
│ └── prebuild
├── Dockerfile.alpine
├── Dockerfile
└── README.md
├── .test
├── tests
│ ├── workers
│ │ ├── expected-std-out.txt
│ │ ├── server.conf.template
│ │ └── run.sh
│ ├── ipv6
│ │ ├── expected-std-out.txt
│ │ └── run.sh
│ ├── templates
│ │ ├── server.conf.template
│ │ └── run.sh
│ ├── templates-resolver
│ │ ├── server.conf.template
│ │ └── run.sh
│ ├── modules
│ │ ├── nginx.conf.sme
│ │ └── run.sh
│ └── static
│ │ └── run.sh
└── config.sh
├── entrypoint
├── 15-local-resolvers.envsh
├── docker-entrypoint.sh
├── 10-listen-on-ipv6-by-default.sh
├── 20-envsubst-on-templates.sh
└── 30-tune-worker-processes.sh
├── stable
├── debian
│ ├── 15-local-resolvers.envsh
│ ├── docker-entrypoint.sh
│ ├── 10-listen-on-ipv6-by-default.sh
│ ├── 20-envsubst-on-templates.sh
│ ├── 30-tune-worker-processes.sh
│ └── Dockerfile
├── alpine-slim
│ ├── 15-local-resolvers.envsh
│ ├── docker-entrypoint.sh
│ ├── 10-listen-on-ipv6-by-default.sh
│ ├── 20-envsubst-on-templates.sh
│ ├── 30-tune-worker-processes.sh
│ └── Dockerfile
├── alpine-perl
│ └── Dockerfile
├── alpine
│ └── Dockerfile
└── debian-perl
│ └── Dockerfile
├── mainline
├── debian
│ ├── 15-local-resolvers.envsh
│ ├── docker-entrypoint.sh
│ ├── 10-listen-on-ipv6-by-default.sh
│ ├── 20-envsubst-on-templates.sh
│ ├── 30-tune-worker-processes.sh
│ └── Dockerfile
├── alpine-slim
│ ├── 15-local-resolvers.envsh
│ ├── docker-entrypoint.sh
│ ├── 10-listen-on-ipv6-by-default.sh
│ ├── 20-envsubst-on-templates.sh
│ ├── 30-tune-worker-processes.sh
│ └── Dockerfile
├── alpine-perl
│ └── Dockerfile
├── alpine
│ └── Dockerfile
└── debian-perl
│ └── Dockerfile
├── .github
├── ISSUE_TEMPLATE
│ ├── feature_request.md
│ └── bug_report.md
├── pull_request_template.md
└── workflows
│ └── ci.yml
├── SECURITY.md
├── LICENSE
├── SUPPORT.md
├── CONTRIBUTING.md
├── CODE_OF_CONDUCT.md
├── Dockerfile-alpine-perl.template
├── Dockerfile-alpine.template
├── generate-stackbrew-library.sh
├── sync-awsecr.sh
├── Dockerfile-debian-perl.template
├── README.md
├── Dockerfile-alpine-slim.template
├── Dockerfile-debian.template
└── update.sh
/.gitignore:
--------------------------------------------------------------------------------
1 | *.bak
2 |
--------------------------------------------------------------------------------
/modules/echo/build-deps:
--------------------------------------------------------------------------------
1 | make gcc
2 |
--------------------------------------------------------------------------------
/modules/echo/source:
--------------------------------------------------------------------------------
1 | https://github.com/openresty/echo-nginx-module/archive/v0.62.tar.gz
2 |
--------------------------------------------------------------------------------
/.test/tests/workers/expected-std-out.txt:
--------------------------------------------------------------------------------
1 | example.com - OK
2 | # Commented out by 30-tune-worker-processes.sh
3 |
--------------------------------------------------------------------------------
/.test/tests/ipv6/expected-std-out.txt:
--------------------------------------------------------------------------------
1 |
Welcome to nginx!
2 | 10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
3 |
--------------------------------------------------------------------------------
/.test/config.sh:
--------------------------------------------------------------------------------
1 | imageTests+=(
2 | [nginx]='
3 | ipv6
4 | static
5 | templates
6 | templates-resolver
7 | workers
8 | modules
9 | '
10 | )
11 |
--------------------------------------------------------------------------------
/.test/tests/workers/server.conf.template:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name ${NGINX_MY_SERVER_NAME};
4 | default_type text/plain;
5 | location = / { return 200 'OK\n'; }
6 | location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
7 | }
8 |
--------------------------------------------------------------------------------
/.test/tests/templates/server.conf.template:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name ${NGINX_MY_SERVER_NAME};
4 | default_type text/plain;
5 | location = / { return 200 'OK\n'; }
6 | location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
7 | }
8 |
--------------------------------------------------------------------------------
/.test/tests/templates-resolver/server.conf.template:
--------------------------------------------------------------------------------
1 | resolver ${NGINX_LOCAL_RESOLVERS};
2 |
3 | server {
4 | listen 80;
5 | server_name ${NGINX_MY_SERVER_NAME};
6 | default_type text/plain;
7 | location = / { return 200 'OK\n'; }
8 | location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
9 | }
10 |
--------------------------------------------------------------------------------
/entrypoint/15-local-resolvers.envsh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
8 |
9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
10 |
11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print $2}' /etc/resolv.conf)
12 | export NGINX_LOCAL_RESOLVERS
13 |
--------------------------------------------------------------------------------
/stable/debian/15-local-resolvers.envsh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
8 |
9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
10 |
11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print $2}' /etc/resolv.conf)
12 | export NGINX_LOCAL_RESOLVERS
13 |
--------------------------------------------------------------------------------
/mainline/debian/15-local-resolvers.envsh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
8 |
9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
10 |
11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print $2}' /etc/resolv.conf)
12 | export NGINX_LOCAL_RESOLVERS
13 |
--------------------------------------------------------------------------------
/stable/alpine-slim/15-local-resolvers.envsh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
8 |
9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
10 |
11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print $2}' /etc/resolv.conf)
12 | export NGINX_LOCAL_RESOLVERS
13 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/15-local-resolvers.envsh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
8 |
9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
10 |
11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print $2}' /etc/resolv.conf)
12 | export NGINX_LOCAL_RESOLVERS
13 |
--------------------------------------------------------------------------------
/modules/echo/prebuild:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # if a module has a build dependency that is not in debian/alpine
4 | # use this script to fetch/build/install them
5 | #
6 | # note that shared libraries produced as a result of this script will
7 | # not be copied from the builder image to the resulting one, so you need to
8 | # build them statically
9 |
10 | echo "No prebuild stage required - all dependencies are satisfied already!"
11 |
12 | exit 0
13 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ""
5 | labels: ""
6 | assignees: ""
7 | ---
8 |
9 | ### Is your feature request related to a problem? Please describe
10 |
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when ...
12 |
13 | ### Describe the solution you'd like
14 |
15 | A clear and concise description of what you want to happen.
16 |
17 | ### Describe alternatives you've considered
18 |
19 | A clear and concise description of any alternative solutions or features you've considered.
20 |
21 | ### Additional context
22 |
23 | Add any other context or screenshots about the feature request here.
24 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ""
5 | labels: ""
6 | assignees: ""
7 | ---
8 |
9 | ### Describe the bug
10 |
11 | A clear and concise description of what the bug is.
12 |
13 | ### To reproduce
14 |
15 | Steps to reproduce the behavior:
16 |
17 | 1. Deploy the NGINX Docker image using ...
18 | 2. View output/logs/configuration on ...
19 | 3. See error
20 |
21 | ### Expected behavior
22 |
23 | A clear and concise description of what you expected to happen.
24 |
25 | ### Your environment
26 |
27 | - Version/release of Docker and method of installation (e.g. Docker Desktop / Docker Server)
28 | - Version/tag of the NGINX Docker image (e.g. `nginx:alpine`)
29 | - Target deployment platform (e.g. OpenShift / Kubernetes / Docker Compose / etc...)
30 |
31 | ### Additional context
32 |
33 | Add any other context about the problem here.
34 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Latest Versions
4 |
5 | We advise users to run or update to the most recent release of the NGINX Docker image. Older versions of the NGINX Docker image may not have all enhancements and/or bug fixes applied to them.
6 |
7 | ## Reporting a Vulnerability
8 |
9 | The F5 Security Incident Response Team (F5 SIRT) has an email alias that makes it easy to report potential security vulnerabilities.
10 |
11 | - If you’re an F5 customer with an active support contract, please contact [F5 Technical Support](https://www.f5.com/services/support).
12 | - If you aren’t an F5 customer, please report any potential or current instances of security vulnerabilities with any F5 product to the F5 Security Incident Response Team at .
13 |
14 | For more information visit [https://www.f5.com/services/support/report-a-vulnerability](https://www.f5.com/services/support/report-a-vulnerability)
15 |
--------------------------------------------------------------------------------
/.test/tests/modules/nginx.conf.sme:
--------------------------------------------------------------------------------
1 | user nginx;
2 | worker_processes auto;
3 |
4 | load_module modules/ndk_http_module.so;
5 | load_module modules/ngx_http_echo_module.so;
6 | load_module modules/ngx_http_set_misc_module.so;
7 |
8 | error_log /var/log/nginx/error.log notice;
9 | pid /var/run/nginx.pid;
10 |
11 | events {
12 | worker_connections 1024;
13 | }
14 |
15 | http {
16 | include /etc/nginx/mime.types;
17 | default_type application/octet-stream;
18 |
19 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
20 | '$status $body_bytes_sent "$http_referer" '
21 | '"$http_user_agent" "$http_x_forwarded_for"';
22 |
23 | access_log /var/log/nginx/access.log main;
24 |
25 | server {
26 | listen 80 default_server;
27 | location /hello {
28 | set $raw "hello";
29 | set_sha1 $digest $raw;
30 |
31 | echo $digest;
32 | }
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ### Proposed changes
2 |
3 | Describe the use case and detail of the change. If this PR addresses an issue on GitHub, make sure to include a link to that issue using one of the [supported keywords](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue) here in this description (not in the title of the PR).
4 |
5 | ### Checklist
6 |
7 | Before creating a PR, run through this checklist and mark each as complete:
8 | - [ ] I have read the [`CONTRIBUTING`](https://github.com/nginxinc/docker-nginx/blob/master/CONTRIBUTING.md) document
9 | - [ ] I have run `./update.sh` and ensured all entrypoint/Dockerfile template changes have been applied to the relevant image entrypoint scripts & Dockerfiles
10 | - [ ] If applicable, I have added tests that prove my fix is effective or that my feature works
11 | - [ ] If applicable, I have checked that any relevant tests pass after adding my changes
12 | - [ ] I have updated any relevant documentation
13 |
--------------------------------------------------------------------------------
/.test/tests/static/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ "$DEBUG" ] && set -x
4 |
5 | set -eo pipefail
6 |
7 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
8 |
9 | image="$1"
10 |
11 | clientImage='buildpack-deps:buster-curl'
12 | # ensure the clientImage is ready and available
13 | if ! docker image inspect "$clientImage" &> /dev/null; then
14 | docker pull "$clientImage" > /dev/null
15 | fi
16 |
17 | # Create an instance of the container-under-test
18 | cid="$(docker run -d "$image")"
19 | trap "docker rm -vf $cid > /dev/null" EXIT
20 |
21 | _request() {
22 | local method="$1"
23 | shift
24 |
25 | local proto="$1"
26 | shift
27 |
28 | local url="${1#/}"
29 | shift
30 |
31 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
32 | echo >&2 "$image stopped unexpectedly!"
33 | ( set -x && docker logs "$cid" ) >&2 || true
34 | false
35 | fi
36 |
37 | docker run --rm \
38 | --link "$cid":nginx \
39 | "$clientImage" \
40 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
41 | }
42 |
43 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
44 |
45 | # Check that we can request /
46 | _request GET http '/index.html' | grep 'Welcome to nginx!
'
47 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (C) 2011-2023 F5, Inc.
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions
6 | are met:
7 | 1. Redistributions of source code must retain the above copyright
8 | notice, this list of conditions and the following disclaimer.
9 | 2. Redistributions in binary form must reproduce the above copyright
10 | notice, this list of conditions and the following disclaimer in the
11 | documentation and/or other materials provided with the distribution.
12 |
13 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 | ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 | OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 | OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 | SUCH DAMAGE.
24 |
--------------------------------------------------------------------------------
/.test/tests/ipv6/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ "$DEBUG" ] && set -x
4 |
5 | set -eo pipefail
6 |
7 | # check if we have ipv6 available
8 | if [ ! -f "/proc/net/if_inet6" ]; then
9 | exit 0
10 | fi
11 |
12 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
13 |
14 | image="$1"
15 |
16 | clientImage='buildpack-deps:buster-curl'
17 | # ensure the clientImage is ready and available
18 | if ! docker image inspect "$clientImage" &> /dev/null; then
19 | docker pull "$clientImage" > /dev/null
20 | fi
21 |
22 | cid="$(docker run -d "$image")"
23 | trap "docker rm -vf $cid > /dev/null" EXIT
24 |
25 | _request() {
26 | local method="$1"
27 | shift
28 |
29 | local proto="$1"
30 | shift
31 |
32 | local url="${1#/}"
33 | shift
34 |
35 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
36 | echo >&2 "$image stopped unexpectedly!"
37 | ( set -x && docker logs "$cid" ) >&2 || true
38 | false
39 | fi
40 |
41 | docker run --rm \
42 | --link "$cid":nginx \
43 | "$clientImage" \
44 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
45 | }
46 |
47 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
48 |
49 | # Check that we can request /
50 | _request GET http '/index.html' | grep 'Welcome to nginx!
'
51 |
52 | docker logs $cid 2>&1 | grep "Enabled listen on IPv6"
53 |
--------------------------------------------------------------------------------
/.test/tests/templates/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ "$DEBUG" ] && set -x
4 |
5 | set -eo pipefail
6 |
7 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
8 |
9 | image="$1"
10 |
11 | clientImage='buildpack-deps:buster-curl'
12 | # ensure the clientImage is ready and available
13 | if ! docker image inspect "$clientImage" &> /dev/null; then
14 | docker pull "$clientImage" > /dev/null
15 | fi
16 |
17 | # Create an instance of the container-under-test
18 | serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
19 | "$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
25 |
26 | _request() {
27 | local method="$1"
28 | shift
29 |
30 | local proto="$1"
31 | shift
32 |
33 | local url="${1#/}"
34 | shift
35 |
36 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
37 | echo >&2 "$image stopped unexpectedly!"
38 | ( set -x && docker logs "$cid" ) >&2 || true
39 | false
40 | fi
41 |
42 | docker run --rm \
43 | --link "$cid":nginx \
44 | "$clientImage" \
45 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
46 | }
47 |
48 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
49 |
50 | # Check that we can request /
51 | _request GET http '/templates' | grep 'example.com - OK'
52 |
--------------------------------------------------------------------------------
/.test/tests/templates-resolver/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ "$DEBUG" ] && set -x
4 |
5 | set -eo pipefail
6 |
7 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
8 |
9 | image="$1"
10 |
11 | clientImage='buildpack-deps:buster-curl'
12 | # ensure the clientImage is ready and available
13 | if ! docker image inspect "$clientImage" &> /dev/null; then
14 | docker pull "$clientImage" > /dev/null
15 | fi
16 |
17 | # Create an instance of the container-under-test
18 | serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
19 | "$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
25 |
26 | _request() {
27 | local method="$1"
28 | shift
29 |
30 | local proto="$1"
31 | shift
32 |
33 | local url="${1#/}"
34 | shift
35 |
36 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
37 | echo >&2 "$image stopped unexpectedly!"
38 | ( set -x && docker logs "$cid" ) >&2 || true
39 | false
40 | fi
41 |
42 | docker run --rm \
43 | --link "$cid":nginx \
44 | "$clientImage" \
45 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
46 | }
47 |
48 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
49 |
50 | # Check that we can request /
51 | _request GET http '/resolver-templates' | grep 'example.com - OK'
52 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: GitHub CI
2 |
3 | on:
4 | pull_request:
5 | push:
6 | schedule:
7 | - cron: 0 10 * * Mon
8 |
9 | defaults:
10 | run:
11 | shell: 'bash -Eeuo pipefail -x {0}'
12 |
13 | jobs:
14 |
15 | generate-jobs:
16 | name: Generate Jobs
17 | runs-on: ubuntu-latest
18 | outputs:
19 | strategy: ${{ steps.generate-jobs.outputs.strategy }}
20 | steps:
21 | - uses: actions/checkout@v3
22 | - uses: docker-library/bashbrew@v0.1.8
23 | - id: generate-jobs
24 | name: Generate Jobs
25 | run: |
26 | strategy="$(GITHUB_REPOSITORY=nginx "$BASHBREW_SCRIPTS/github-actions/generate.sh")"
27 | strategy="$(GITHUB_REPOSITORY=nginx "$BASHBREW_SCRIPTS/github-actions/munge-i386.sh" -c <<<"$strategy")"
28 | echo "strategy=$strategy" >> "$GITHUB_OUTPUT"
29 | jq . <<<"$strategy" # sanity check / debugging aid
30 |
31 | test:
32 | needs: generate-jobs
33 | strategy: ${{ fromJson(needs.generate-jobs.outputs.strategy) }}
34 | name: ${{ matrix.name }}
35 | runs-on: ${{ matrix.os }}
36 | steps:
37 | - uses: actions/checkout@v3
38 | - name: Prepare Environment
39 | run: ${{ matrix.runs.prepare }}
40 | - name: Pull Dependencies
41 | run: ${{ matrix.runs.pull }}
42 | - name: Build ${{ matrix.name }}
43 | run: ${{ matrix.runs.build }}
44 | - name: History ${{ matrix.name }}
45 | run: ${{ matrix.runs.history }}
46 | - name: Test ${{ matrix.name }}
47 | run: ${{ matrix.runs.test }}
48 | - name: '"docker images"'
49 | run: ${{ matrix.runs.images }}
50 |
--------------------------------------------------------------------------------
/.test/tests/workers/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ "$DEBUG" ] && set -x
4 |
5 | set -eo pipefail
6 |
7 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
8 |
9 | image="$1"
10 |
11 | clientImage='buildpack-deps:buster-curl'
12 | # ensure the clientImage is ready and available
13 | if ! docker image inspect "$clientImage" &> /dev/null; then
14 | docker pull "$clientImage" > /dev/null
15 | fi
16 |
17 | # Create an instance of the container-under-test
18 | serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
19 | "$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
25 |
26 | _request() {
27 | local method="$1"
28 | shift
29 |
30 | local proto="$1"
31 | shift
32 |
33 | local url="${1#/}"
34 | shift
35 |
36 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
37 | echo >&2 "$image stopped unexpectedly!"
38 | ( set -x && docker logs "$cid" ) >&2 || true
39 | false
40 | fi
41 |
42 | docker run --rm \
43 | --link "$cid":nginx \
44 | "$clientImage" \
45 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
46 | }
47 |
48 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
49 |
50 | # Check that we can request /
51 | _request GET http '/worker-templates' | grep 'example.com - OK'
52 |
53 | result="$(docker exec $cid grep "Commented out by" /etc/nginx/nginx.conf)"
54 |
55 | echo "$result" | cut -d\ -f 1-5
56 |
--------------------------------------------------------------------------------
/SUPPORT.md:
--------------------------------------------------------------------------------
1 | # Support
2 |
3 | ## Ask a Question
4 |
5 | We use GitHub for tracking bugs and feature requests related to all the Docker NGINX images (including all variants and container registries).
6 |
7 | Don't know how something in this project works? Curious if this project can achieve your desired functionality? Please open an issue on GitHub with the label `question`.
8 |
9 | ## NGINX Specific Questions and/or Issues
10 |
11 | This isn't the right place to get support for NGINX specific questions, but the following resources are available below. Thanks for your understanding!
12 |
13 | ### Community Slack
14 |
15 | We have a community [Slack](https://nginxcommunity.slack.com/)!
16 |
17 | If you are not a member, click [here](https://community.nginx.org/joinslack) to sign up (and let us know if the link does not seem to be working!)
18 |
19 | Once you join, check out the `#beginner-questions` and `nginx-users` channels :)
20 |
21 | ### Documentation
22 |
23 | For a comprehensive list of all NGINX directives, check out .
24 |
25 | For a comprehensive list of admin and deployment guides for all NGINX products, check out .
26 |
27 | ### Mailing List
28 |
29 | Want to get in touch with the NGINX development team directly? Try using the relevant mailing list found at !
30 |
31 | ## Contributing
32 |
33 | Please see the [contributing guide](https://github.com/nginxinc/docker-nginx/blob/master/CONTRIBUTING.md) for guidelines on how to best contribute to this project.
34 |
35 | ## Commercial Support
36 |
37 | Commercial support for this project may be available. Please get in touch with [NGINX sales](https://www.nginx.com/contact-sales/) or check your contract details for more info!
38 |
--------------------------------------------------------------------------------
/entrypoint/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
15 |
16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
18 | case "$f" in
19 | *.envsh)
20 | if [ -x "$f" ]; then
21 | entrypoint_log "$0: Sourcing $f";
22 | . "$f"
23 | else
24 | # warn on shell scripts without exec bit
25 | entrypoint_log "$0: Ignoring $f, not executable";
26 | fi
27 | ;;
28 | *.sh)
29 | if [ -x "$f" ]; then
30 | entrypoint_log "$0: Launching $f";
31 | "$f"
32 | else
33 | # warn on shell scripts without exec bit
34 | entrypoint_log "$0: Ignoring $f, not executable";
35 | fi
36 | ;;
37 | *) entrypoint_log "$0: Ignoring $f";;
38 | esac
39 | done
40 |
41 | entrypoint_log "$0: Configuration complete; ready for start up"
42 | else
43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
44 | fi
45 | fi
46 |
47 | exec "$@"
48 |
--------------------------------------------------------------------------------
/mainline/debian/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
15 |
16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
18 | case "$f" in
19 | *.envsh)
20 | if [ -x "$f" ]; then
21 | entrypoint_log "$0: Sourcing $f";
22 | . "$f"
23 | else
24 | # warn on shell scripts without exec bit
25 | entrypoint_log "$0: Ignoring $f, not executable";
26 | fi
27 | ;;
28 | *.sh)
29 | if [ -x "$f" ]; then
30 | entrypoint_log "$0: Launching $f";
31 | "$f"
32 | else
33 | # warn on shell scripts without exec bit
34 | entrypoint_log "$0: Ignoring $f, not executable";
35 | fi
36 | ;;
37 | *) entrypoint_log "$0: Ignoring $f";;
38 | esac
39 | done
40 |
41 | entrypoint_log "$0: Configuration complete; ready for start up"
42 | else
43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
44 | fi
45 | fi
46 |
47 | exec "$@"
48 |
--------------------------------------------------------------------------------
/stable/debian/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
15 |
16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
18 | case "$f" in
19 | *.envsh)
20 | if [ -x "$f" ]; then
21 | entrypoint_log "$0: Sourcing $f";
22 | . "$f"
23 | else
24 | # warn on shell scripts without exec bit
25 | entrypoint_log "$0: Ignoring $f, not executable";
26 | fi
27 | ;;
28 | *.sh)
29 | if [ -x "$f" ]; then
30 | entrypoint_log "$0: Launching $f";
31 | "$f"
32 | else
33 | # warn on shell scripts without exec bit
34 | entrypoint_log "$0: Ignoring $f, not executable";
35 | fi
36 | ;;
37 | *) entrypoint_log "$0: Ignoring $f";;
38 | esac
39 | done
40 |
41 | entrypoint_log "$0: Configuration complete; ready for start up"
42 | else
43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
44 | fi
45 | fi
46 |
47 | exec "$@"
48 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
15 |
16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
18 | case "$f" in
19 | *.envsh)
20 | if [ -x "$f" ]; then
21 | entrypoint_log "$0: Sourcing $f";
22 | . "$f"
23 | else
24 | # warn on shell scripts without exec bit
25 | entrypoint_log "$0: Ignoring $f, not executable";
26 | fi
27 | ;;
28 | *.sh)
29 | if [ -x "$f" ]; then
30 | entrypoint_log "$0: Launching $f";
31 | "$f"
32 | else
33 | # warn on shell scripts without exec bit
34 | entrypoint_log "$0: Ignoring $f, not executable";
35 | fi
36 | ;;
37 | *) entrypoint_log "$0: Ignoring $f";;
38 | esac
39 | done
40 |
41 | entrypoint_log "$0: Configuration complete; ready for start up"
42 | else
43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
44 | fi
45 | fi
46 |
47 | exec "$@"
48 |
--------------------------------------------------------------------------------
/stable/alpine-slim/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
15 |
16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
18 | case "$f" in
19 | *.envsh)
20 | if [ -x "$f" ]; then
21 | entrypoint_log "$0: Sourcing $f";
22 | . "$f"
23 | else
24 | # warn on shell scripts without exec bit
25 | entrypoint_log "$0: Ignoring $f, not executable";
26 | fi
27 | ;;
28 | *.sh)
29 | if [ -x "$f" ]; then
30 | entrypoint_log "$0: Launching $f";
31 | "$f"
32 | else
33 | # warn on shell scripts without exec bit
34 | entrypoint_log "$0: Ignoring $f, not executable";
35 | fi
36 | ;;
37 | *) entrypoint_log "$0: Ignoring $f";;
38 | esac
39 | done
40 |
41 | entrypoint_log "$0: Configuration complete; ready for start up"
42 | else
43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
44 | fi
45 | fi
46 |
47 | exec "$@"
48 |
--------------------------------------------------------------------------------
/.test/tests/modules/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eo pipefail
4 |
5 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
6 |
7 | echo $dir
8 |
9 | image="$1"
10 |
11 | case "$image" in
12 | *-perl)
13 | ;;
14 | *)
15 | echo >&2 "skipping non-leaf image: $image"
16 | exit
17 | ;;
18 | esac
19 |
20 | dockerfile="Dockerfile"
21 | case "$image" in
22 | *alpine*)
23 | dockerfile="$dockerfile.alpine"
24 | ;;
25 | esac
26 |
27 | clientImage='buildpack-deps:buster-curl'
28 | # ensure the clientImage is ready and available
29 | if ! docker image inspect "$clientImage" &> /dev/null; then
30 | docker pull "$clientImage" > /dev/null
31 | fi
32 |
33 | # Create an instance of the container-under-test
34 | modulesImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
35 | DOCKER_BUILDKIT=0 docker build --build-arg NGINX_FROM_IMAGE="$image" --build-arg ENABLED_MODULES="ndk set-misc echo" -t "$modulesImage" -f "modules/$dockerfile" "$GITHUB_WORKSPACE/modules"
36 |
37 | serverImage="${modulesImage}-sme"
38 | "$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
45 |
46 | _request() {
47 | local method="$1"
48 | shift
49 |
50 | local proto="$1"
51 | shift
52 |
53 | local url="${1#/}"
54 | shift
55 |
56 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
57 | echo >&2 "$image stopped unexpectedly!"
58 | ( set -x && docker logs "$cid" ) >&2 || true
59 | false
60 | fi
61 |
62 | docker run --rm \
63 | --link "$cid":nginx \
64 | "$clientImage" \
65 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
66 | }
67 |
68 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
69 |
70 | # Check that we can request /
71 | _request GET http '/hello' | grep 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
72 |
--------------------------------------------------------------------------------
/entrypoint/10-listen-on-ipv6-by-default.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | ME=$(basename "$0")
13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
14 |
15 | # check if we have ipv6 available
16 | if [ ! -f "/proc/net/if_inet6" ]; then
17 | entrypoint_log "$ME: info: ipv6 not available"
18 | exit 0
19 | fi
20 |
21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
23 | exit 0
24 | fi
25 |
26 | # check if the file can be modified, e.g. not on a r/o filesystem
27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
28 |
29 | # check if the file is already modified, e.g. on a container restart
30 | grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
31 |
32 | if [ -f "/etc/os-release" ]; then
33 | . /etc/os-release
34 | else
35 | entrypoint_log "$ME: info: can not guess the operating system"
36 | exit 0
37 | fi
38 |
39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
40 |
41 | case "$ID" in
42 | "debian")
43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
46 | exit 0
47 | }
48 | ;;
49 | "alpine")
50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
53 | exit 0
54 | }
55 | ;;
56 | *)
57 | entrypoint_log "$ME: info: Unsupported distribution"
58 | exit 0
59 | ;;
60 | esac
61 |
62 | # enable ipv6 on default.conf listen sockets
63 | sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
64 |
65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
66 |
67 | exit 0
68 |
--------------------------------------------------------------------------------
/mainline/debian/10-listen-on-ipv6-by-default.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | ME=$(basename "$0")
13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
14 |
15 | # check if we have ipv6 available
16 | if [ ! -f "/proc/net/if_inet6" ]; then
17 | entrypoint_log "$ME: info: ipv6 not available"
18 | exit 0
19 | fi
20 |
21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
23 | exit 0
24 | fi
25 |
26 | # check if the file can be modified, e.g. not on a r/o filesystem
27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
28 |
29 | # check if the file is already modified, e.g. on a container restart
30 | grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
31 |
32 | if [ -f "/etc/os-release" ]; then
33 | . /etc/os-release
34 | else
35 | entrypoint_log "$ME: info: can not guess the operating system"
36 | exit 0
37 | fi
38 |
39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
40 |
41 | case "$ID" in
42 | "debian")
43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
46 | exit 0
47 | }
48 | ;;
49 | "alpine")
50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
53 | exit 0
54 | }
55 | ;;
56 | *)
57 | entrypoint_log "$ME: info: Unsupported distribution"
58 | exit 0
59 | ;;
60 | esac
61 |
62 | # enable ipv6 on default.conf listen sockets
63 | sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
64 |
65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
66 |
67 | exit 0
68 |
--------------------------------------------------------------------------------
/stable/debian/10-listen-on-ipv6-by-default.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | ME=$(basename "$0")
13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
14 |
15 | # check if we have ipv6 available
16 | if [ ! -f "/proc/net/if_inet6" ]; then
17 | entrypoint_log "$ME: info: ipv6 not available"
18 | exit 0
19 | fi
20 |
21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
23 | exit 0
24 | fi
25 |
26 | # check if the file can be modified, e.g. not on a r/o filesystem
27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
28 |
29 | # check if the file is already modified, e.g. on a container restart
30 | grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
31 |
32 | if [ -f "/etc/os-release" ]; then
33 | . /etc/os-release
34 | else
35 | entrypoint_log "$ME: info: can not guess the operating system"
36 | exit 0
37 | fi
38 |
39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
40 |
41 | case "$ID" in
42 | "debian")
43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
46 | exit 0
47 | }
48 | ;;
49 | "alpine")
50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
53 | exit 0
54 | }
55 | ;;
56 | *)
57 | entrypoint_log "$ME: info: Unsupported distribution"
58 | exit 0
59 | ;;
60 | esac
61 |
62 | # enable ipv6 on default.conf listen sockets
63 | sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
64 |
65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
66 |
67 | exit 0
68 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/10-listen-on-ipv6-by-default.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | ME=$(basename "$0")
13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
14 |
15 | # check if we have ipv6 available
16 | if [ ! -f "/proc/net/if_inet6" ]; then
17 | entrypoint_log "$ME: info: ipv6 not available"
18 | exit 0
19 | fi
20 |
21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
23 | exit 0
24 | fi
25 |
26 | # check if the file can be modified, e.g. not on a r/o filesystem
27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
28 |
29 | # check if the file is already modified, e.g. on a container restart
30 | grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
31 |
32 | if [ -f "/etc/os-release" ]; then
33 | . /etc/os-release
34 | else
35 | entrypoint_log "$ME: info: can not guess the operating system"
36 | exit 0
37 | fi
38 |
39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
40 |
41 | case "$ID" in
42 | "debian")
43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
46 | exit 0
47 | }
48 | ;;
49 | "alpine")
50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
53 | exit 0
54 | }
55 | ;;
56 | *)
57 | entrypoint_log "$ME: info: Unsupported distribution"
58 | exit 0
59 | ;;
60 | esac
61 |
62 | # enable ipv6 on default.conf listen sockets
63 | sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
64 |
65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
66 |
67 | exit 0
68 |
--------------------------------------------------------------------------------
/stable/alpine-slim/10-listen-on-ipv6-by-default.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | ME=$(basename "$0")
13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
14 |
15 | # check if we have ipv6 available
16 | if [ ! -f "/proc/net/if_inet6" ]; then
17 | entrypoint_log "$ME: info: ipv6 not available"
18 | exit 0
19 | fi
20 |
21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
23 | exit 0
24 | fi
25 |
26 | # check if the file can be modified, e.g. not on a r/o filesystem
27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
28 |
29 | # check if the file is already modified, e.g. on a container restart
30 | grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
31 |
32 | if [ -f "/etc/os-release" ]; then
33 | . /etc/os-release
34 | else
35 | entrypoint_log "$ME: info: can not guess the operating system"
36 | exit 0
37 | fi
38 |
39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
40 |
41 | case "$ID" in
42 | "debian")
43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
46 | exit 0
47 | }
48 | ;;
49 | "alpine")
50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
53 | exit 0
54 | }
55 | ;;
56 | *)
57 | entrypoint_log "$ME: info: Unsupported distribution"
58 | exit 0
59 | ;;
60 | esac
61 |
62 | # enable ipv6 on default.conf listen sockets
63 | sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
64 |
65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
66 |
67 | exit 0
68 |
--------------------------------------------------------------------------------
/entrypoint/20-envsubst-on-templates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | ME=$(basename "$0")
6 |
7 | entrypoint_log() {
8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
9 | echo "$@"
10 | fi
11 | }
12 |
13 | add_stream_block() {
14 | local conffile="/etc/nginx/nginx.conf"
15 |
16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then
17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
18 | else
19 | # check if the file can be modified, e.g. not on a r/o filesystem
20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
22 | cat << END >> "$conffile"
23 | # added by "$ME" on "$(date)"
24 | stream {
25 | include $stream_output_dir/*.conf;
26 | }
27 | END
28 | fi
29 | }
30 |
31 | auto_envsubst() {
32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
37 | local filter="${NGINX_ENVSUBST_FILTER:-}"
38 |
39 | local template defined_envs relative_path output_path subdir
40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
41 | [ -d "$template_dir" ] || return 0
42 | if [ ! -w "$output_dir" ]; then
43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
44 | return 0
45 | fi
46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
47 | relative_path="${template#"$template_dir/"}"
48 | output_path="$output_dir/${relative_path%"$suffix"}"
49 | subdir=$(dirname "$relative_path")
50 | # create a subdirectory where the template file exists
51 | mkdir -p "$output_dir/$subdir"
52 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
53 | envsubst "$defined_envs" < "$template" > "$output_path"
54 | done
55 |
56 | # Print the first file with the stream suffix, this will be false if there are none
57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
58 | mkdir -p "$stream_output_dir"
59 | if [ ! -w "$stream_output_dir" ]; then
60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
61 | return 0
62 | fi
63 | add_stream_block
64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
65 | relative_path="${template#"$template_dir/"}"
66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
67 | subdir=$(dirname "$relative_path")
68 | # create a subdirectory where the template file exists
69 | mkdir -p "$stream_output_dir/$subdir"
70 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
71 | envsubst "$defined_envs" < "$template" > "$output_path"
72 | done
73 | fi
74 | }
75 |
76 | auto_envsubst
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/mainline/debian/20-envsubst-on-templates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | ME=$(basename "$0")
6 |
7 | entrypoint_log() {
8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
9 | echo "$@"
10 | fi
11 | }
12 |
13 | add_stream_block() {
14 | local conffile="/etc/nginx/nginx.conf"
15 |
16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then
17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
18 | else
19 | # check if the file can be modified, e.g. not on a r/o filesystem
20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
22 | cat << END >> "$conffile"
23 | # added by "$ME" on "$(date)"
24 | stream {
25 | include $stream_output_dir/*.conf;
26 | }
27 | END
28 | fi
29 | }
30 |
31 | auto_envsubst() {
32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
37 | local filter="${NGINX_ENVSUBST_FILTER:-}"
38 |
39 | local template defined_envs relative_path output_path subdir
40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
41 | [ -d "$template_dir" ] || return 0
42 | if [ ! -w "$output_dir" ]; then
43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
44 | return 0
45 | fi
46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
47 | relative_path="${template#"$template_dir/"}"
48 | output_path="$output_dir/${relative_path%"$suffix"}"
49 | subdir=$(dirname "$relative_path")
50 | # create a subdirectory where the template file exists
51 | mkdir -p "$output_dir/$subdir"
52 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
53 | envsubst "$defined_envs" < "$template" > "$output_path"
54 | done
55 |
56 | # Print the first file with the stream suffix, this will be false if there are none
57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
58 | mkdir -p "$stream_output_dir"
59 | if [ ! -w "$stream_output_dir" ]; then
60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
61 | return 0
62 | fi
63 | add_stream_block
64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
65 | relative_path="${template#"$template_dir/"}"
66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
67 | subdir=$(dirname "$relative_path")
68 | # create a subdirectory where the template file exists
69 | mkdir -p "$stream_output_dir/$subdir"
70 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
71 | envsubst "$defined_envs" < "$template" > "$output_path"
72 | done
73 | fi
74 | }
75 |
76 | auto_envsubst
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/stable/debian/20-envsubst-on-templates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | ME=$(basename "$0")
6 |
7 | entrypoint_log() {
8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
9 | echo "$@"
10 | fi
11 | }
12 |
13 | add_stream_block() {
14 | local conffile="/etc/nginx/nginx.conf"
15 |
16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then
17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
18 | else
19 | # check if the file can be modified, e.g. not on a r/o filesystem
20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
22 | cat << END >> "$conffile"
23 | # added by "$ME" on "$(date)"
24 | stream {
25 | include $stream_output_dir/*.conf;
26 | }
27 | END
28 | fi
29 | }
30 |
31 | auto_envsubst() {
32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
37 | local filter="${NGINX_ENVSUBST_FILTER:-}"
38 |
39 | local template defined_envs relative_path output_path subdir
40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
41 | [ -d "$template_dir" ] || return 0
42 | if [ ! -w "$output_dir" ]; then
43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
44 | return 0
45 | fi
46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
47 | relative_path="${template#"$template_dir/"}"
48 | output_path="$output_dir/${relative_path%"$suffix"}"
49 | subdir=$(dirname "$relative_path")
50 | # create a subdirectory where the template file exists
51 | mkdir -p "$output_dir/$subdir"
52 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
53 | envsubst "$defined_envs" < "$template" > "$output_path"
54 | done
55 |
56 | # Print the first file with the stream suffix, this will be false if there are none
57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
58 | mkdir -p "$stream_output_dir"
59 | if [ ! -w "$stream_output_dir" ]; then
60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
61 | return 0
62 | fi
63 | add_stream_block
64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
65 | relative_path="${template#"$template_dir/"}"
66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
67 | subdir=$(dirname "$relative_path")
68 | # create a subdirectory where the template file exists
69 | mkdir -p "$stream_output_dir/$subdir"
70 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
71 | envsubst "$defined_envs" < "$template" > "$output_path"
72 | done
73 | fi
74 | }
75 |
76 | auto_envsubst
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/20-envsubst-on-templates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | ME=$(basename "$0")
6 |
7 | entrypoint_log() {
8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
9 | echo "$@"
10 | fi
11 | }
12 |
13 | add_stream_block() {
14 | local conffile="/etc/nginx/nginx.conf"
15 |
16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then
17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
18 | else
19 | # check if the file can be modified, e.g. not on a r/o filesystem
20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
22 | cat << END >> "$conffile"
23 | # added by "$ME" on "$(date)"
24 | stream {
25 | include $stream_output_dir/*.conf;
26 | }
27 | END
28 | fi
29 | }
30 |
31 | auto_envsubst() {
32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
37 | local filter="${NGINX_ENVSUBST_FILTER:-}"
38 |
39 | local template defined_envs relative_path output_path subdir
40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
41 | [ -d "$template_dir" ] || return 0
42 | if [ ! -w "$output_dir" ]; then
43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
44 | return 0
45 | fi
46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
47 | relative_path="${template#"$template_dir/"}"
48 | output_path="$output_dir/${relative_path%"$suffix"}"
49 | subdir=$(dirname "$relative_path")
50 | # create a subdirectory where the template file exists
51 | mkdir -p "$output_dir/$subdir"
52 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
53 | envsubst "$defined_envs" < "$template" > "$output_path"
54 | done
55 |
56 | # Print the first file with the stream suffix, this will be false if there are none
57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
58 | mkdir -p "$stream_output_dir"
59 | if [ ! -w "$stream_output_dir" ]; then
60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
61 | return 0
62 | fi
63 | add_stream_block
64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
65 | relative_path="${template#"$template_dir/"}"
66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
67 | subdir=$(dirname "$relative_path")
68 | # create a subdirectory where the template file exists
69 | mkdir -p "$stream_output_dir/$subdir"
70 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
71 | envsubst "$defined_envs" < "$template" > "$output_path"
72 | done
73 | fi
74 | }
75 |
76 | auto_envsubst
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/stable/alpine-slim/20-envsubst-on-templates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | ME=$(basename "$0")
6 |
7 | entrypoint_log() {
8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
9 | echo "$@"
10 | fi
11 | }
12 |
13 | add_stream_block() {
14 | local conffile="/etc/nginx/nginx.conf"
15 |
16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then
17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
18 | else
19 | # check if the file can be modified, e.g. not on a r/o filesystem
20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
22 | cat << END >> "$conffile"
23 | # added by "$ME" on "$(date)"
24 | stream {
25 | include $stream_output_dir/*.conf;
26 | }
27 | END
28 | fi
29 | }
30 |
31 | auto_envsubst() {
32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
37 | local filter="${NGINX_ENVSUBST_FILTER:-}"
38 |
39 | local template defined_envs relative_path output_path subdir
40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
41 | [ -d "$template_dir" ] || return 0
42 | if [ ! -w "$output_dir" ]; then
43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
44 | return 0
45 | fi
46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
47 | relative_path="${template#"$template_dir/"}"
48 | output_path="$output_dir/${relative_path%"$suffix"}"
49 | subdir=$(dirname "$relative_path")
50 | # create a subdirectory where the template file exists
51 | mkdir -p "$output_dir/$subdir"
52 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
53 | envsubst "$defined_envs" < "$template" > "$output_path"
54 | done
55 |
56 | # Print the first file with the stream suffix, this will be false if there are none
57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
58 | mkdir -p "$stream_output_dir"
59 | if [ ! -w "$stream_output_dir" ]; then
60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
61 | return 0
62 | fi
63 | add_stream_block
64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
65 | relative_path="${template#"$template_dir/"}"
66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
67 | subdir=$(dirname "$relative_path")
68 | # create a subdirectory where the template file exists
69 | mkdir -p "$stream_output_dir/$subdir"
70 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
71 | envsubst "$defined_envs" < "$template" > "$output_path"
72 | done
73 | fi
74 | }
75 |
76 | auto_envsubst
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | The following is a set of guidelines for contributing to the Docker NGINX image. We really appreciate that you are considering contributing!
4 |
5 | #### Table Of Contents
6 |
7 | [Getting Started](#getting-started)
8 |
9 | [Contributing](#contributing)
10 |
11 | [Code Guidelines](#code-guidelines)
12 |
13 | [Code of Conduct](https://github.com/nginxinc/docker-nginx/blob/master/CODE_OF_CONDUCT.md)
14 |
15 | ## Getting Started
16 |
17 | Follow our [how to use this image guide](https://hub.docker.com/_/nginx/) to get the Docker NGINX image up and running.
18 |
19 | ## Contributing
20 |
21 | ### Report a Bug
22 |
23 | To report a bug, open an issue on GitHub with the label `bug` using the available bug report issue template. Please ensure the bug has not already been reported. **If the bug is a potential security vulnerability, please report it using our [security policy](https://github.com/nginxinc/docker-nginx/blob/master/SECURITY.md).**
24 |
25 | ### Suggest a Feature or Enhancement
26 |
27 | To suggest a feature or enhancement, please create an issue on GitHub with the label `enhancement` using the available [feature request template](https://github.com/nginxinc/docker-nginx/blob/master/.github/feature_request_template.md). Please ensure the feature or enhancement has not already been suggested.
28 |
29 | ### Open a Pull Request
30 |
31 | - Fork the repo, create a branch, implement your changes, add any relevant tests, submit a PR when your changes are **tested** and ready for review.
32 | - Fill in [our pull request template](https://github.com/nginxinc/docker-nginx/blob/master/.github/pull_request_template.md).
33 |
34 | Note: if you'd like to implement a new feature, please consider creating a [feature request issue](https://github.com/nginxinc/docker-nginx/blob/master/.github/feature_request_template.md) first to start a discussion about the feature.
35 |
36 | ## Code Guidelines
37 |
38 | ### Git Guidelines
39 |
40 | - Keep a clean, concise and meaningful git commit history on your branch (within reason), rebasing locally and squashing before submitting a PR.
41 | - If possible and/or relevant, use the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format when writing a commit message, so that changelogs can be automatically generated
42 | - Follow the guidelines of writing a good commit message as described here and summarised in the next few points:
43 | - In the subject line, use the present tense ("Add feature" not "Added feature").
44 | - In the subject line, use the imperative mood ("Move cursor to..." not "Moves cursor to...").
45 | - Limit the subject line to 72 characters or less.
46 | - Reference issues and pull requests liberally after the subject line.
47 | - Add more detailed description in the body of the git message (`git commit -a` to give you more space and time in your text editor to write a good message instead of `git commit -am`).
48 |
49 | ### Docker Guidelines
50 |
51 | - Update any entrypoint scripts via the the scripts contained in the `/entrypoint` directory.
52 | - Update any Dockerfiles via the Dockerfile templates in the root directory (e.g. `Dockerfile-alpine.template`).
53 | - Run the `./update.sh` script to apply all entrypoint/Dockerfile template changes to the relevant image entrypoints & Dockerfiles.
54 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, sex characteristics, gender identity and expression,
9 | level of experience, education, socio-economic status, nationality, personal
10 | appearance, race, religion, or sexual identity and orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | - Using welcoming and inclusive language
18 | - Being respectful of differing viewpoints and experiences
19 | - Gracefully accepting constructive criticism
20 | - Focusing on what is best for the community
21 | - Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | - The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | - Trolling, insulting/derogatory comments, and personal or political attacks
28 | - Public or private harassment
29 | - Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | - Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the moderation team at . All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4,
71 | available at
72 |
73 | For answers to common questions about this code of conduct, see
74 |
75 |
--------------------------------------------------------------------------------
/modules/Dockerfile.alpine:
--------------------------------------------------------------------------------
1 | ARG NGINX_FROM_IMAGE=nginx:mainline-alpine
2 | FROM ${NGINX_FROM_IMAGE} as builder
3 |
4 | ARG ENABLED_MODULES
5 |
6 | RUN set -ex \
7 | && if [ "$ENABLED_MODULES" = "" ]; then \
8 | echo "No additional modules enabled, exiting"; \
9 | exit 1; \
10 | fi
11 |
12 | COPY ./ /modules/
13 |
14 | RUN set -ex \
15 | && apk update \
16 | && apk add linux-headers openssl-dev pcre2-dev zlib-dev openssl abuild \
17 | musl-dev libxslt libxml2-utils make mercurial gcc unzip git \
18 | xz g++ coreutils \
19 | # allow abuild as a root user \
20 | && printf "#!/bin/sh\\nSETFATTR=true /usr/bin/abuild -F \"\$@\"\\n" > /usr/local/bin/abuild \
21 | && chmod +x /usr/local/bin/abuild \
22 | && hg clone -r ${NGINX_VERSION}-${PKG_RELEASE} https://hg.nginx.org/pkg-oss/ \
23 | && cd pkg-oss \
24 | && mkdir /tmp/packages \
25 | && for module in $ENABLED_MODULES; do \
26 | echo "Building $module for nginx-$NGINX_VERSION"; \
27 | if [ -d /modules/$module ]; then \
28 | echo "Building $module from user-supplied sources"; \
29 | # check if module sources file is there and not empty
30 | if [ ! -s /modules/$module/source ]; then \
31 | echo "No source file for $module in modules/$module/source, exiting"; \
32 | exit 1; \
33 | fi; \
34 | # some modules require build dependencies
35 | if [ -f /modules/$module/build-deps ]; then \
36 | echo "Installing $module build dependencies"; \
37 | apk update && apk add $(cat /modules/$module/build-deps | xargs); \
38 | fi; \
39 | # if a module has a build dependency that is not in a distro, provide a
40 | # shell script to fetch/build/install those
41 | # note that shared libraries produced as a result of this script will
42 | # not be copied from the builder image to the main one so build static
43 | if [ -x /modules/$module/prebuild ]; then \
44 | echo "Running prebuild script for $module"; \
45 | /modules/$module/prebuild; \
46 | fi; \
47 | /pkg-oss/build_module.sh -v $NGINX_VERSION -f -y -o /tmp/packages -n $module $(cat /modules/$module/source); \
48 | BUILT_MODULES="$BUILT_MODULES $(echo $module | tr '[A-Z]' '[a-z]' | tr -d '[/_\-\.\t ]')"; \
49 | elif make -C /pkg-oss/alpine list | grep -E "^$module\s+\d+" > /dev/null; then \
50 | echo "Building $module from pkg-oss sources"; \
51 | cd /pkg-oss/alpine; \
52 | make abuild-module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
53 | apk add $(. ./abuild-module-$module/APKBUILD; echo $makedepends;); \
54 | make module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
55 | find ~/packages -type f -name "*.apk" -exec mv -v {} /tmp/packages/ \;; \
56 | BUILT_MODULES="$BUILT_MODULES $module"; \
57 | else \
58 | echo "Don't know how to build $module module, exiting"; \
59 | exit 1; \
60 | fi; \
61 | done \
62 | && echo "BUILT_MODULES=\"$BUILT_MODULES\"" > /tmp/packages/modules.env
63 |
64 | FROM ${NGINX_FROM_IMAGE}
65 | COPY --from=builder /tmp/packages /tmp/packages
66 | RUN set -ex \
67 | && . /tmp/packages/modules.env \
68 | && for module in $BUILT_MODULES; do \
69 | apk add --no-cache --allow-untrusted /tmp/packages/nginx-module-${module}-${NGINX_VERSION}*.apk; \
70 | done \
71 | && rm -rf /tmp/packages
72 |
--------------------------------------------------------------------------------
/Dockerfile-alpine-perl.template:
--------------------------------------------------------------------------------
1 | FROM nginx:%%NGINX_VERSION%%-alpine
2 |
3 | RUN set -x \
4 | && apkArch="$(cat /etc/apk/arch)" \
5 | && nginxPackages="%%PACKAGES%%
6 | " \
7 | # install prerequisites for public key and pkg-oss checks
8 | && apk add --no-cache --virtual .checksum-deps \
9 | openssl \
10 | && case "$apkArch" in \
11 | x86_64|aarch64) \
12 | # arches officially built by upstream
13 | set -x \
14 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
15 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
16 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
17 | echo "key verification succeeded!"; \
18 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
19 | else \
20 | echo "key verification failed!"; \
21 | exit 1; \
22 | fi \
23 | && apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
24 | ;; \
25 | *) \
26 | # we're on an architecture upstream doesn't officially build for
27 | # let's build binaries from the published packaging sources
28 | set -x \
29 | && tempDir="$(mktemp -d)" \
30 | && chown nobody:nobody $tempDir \
31 | && apk add --no-cache --virtual .build-deps \
32 | gcc \
33 | libc-dev \
34 | make \
35 | openssl-dev \
36 | pcre2-dev \
37 | zlib-dev \
38 | linux-headers \
39 | perl-dev \
40 | bash \
41 | alpine-sdk \
42 | findutils \
43 | && su nobody -s /bin/sh -c " \
44 | export HOME=${tempDir} \
45 | && cd ${tempDir} \
46 | && curl -f -O https://hg.nginx.org/pkg-oss/archive/%%REVISION%%.tar.gz \
47 | && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
48 | && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
49 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
50 | else \
51 | echo \"pkg-oss tarball checksum verification failed!\"; \
52 | exit 1; \
53 | fi \
54 | && tar xzvf %%REVISION%%.tar.gz \
55 | && cd pkg-oss-%%REVISION%% \
56 | && cd alpine \
57 | && make %%BUILDTARGET%% \
58 | && apk index -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
59 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
60 | " \
61 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
62 | && apk del --no-network .build-deps \
63 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
64 | ;; \
65 | esac \
66 | # remove checksum deps
67 | && apk del --no-network .checksum-deps \
68 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
69 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
70 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
71 | && if [ -f "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi
72 |
--------------------------------------------------------------------------------
/Dockerfile-alpine.template:
--------------------------------------------------------------------------------
1 | FROM nginx:%%NGINX_VERSION%%-alpine-slim
2 |
3 | ENV NJS_VERSION %%NJS_VERSION%%
4 |
5 | RUN set -x \
6 | && apkArch="$(cat /etc/apk/arch)" \
7 | && nginxPackages="%%PACKAGES%%
8 | " \
9 | # install prerequisites for public key and pkg-oss checks
10 | && apk add --no-cache --virtual .checksum-deps \
11 | openssl \
12 | && case "$apkArch" in \
13 | x86_64|aarch64) \
14 | # arches officially built by upstream
15 | set -x \
16 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
17 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
18 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
19 | echo "key verification succeeded!"; \
20 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
21 | else \
22 | echo "key verification failed!"; \
23 | exit 1; \
24 | fi \
25 | && apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
26 | ;; \
27 | *) \
28 | # we're on an architecture upstream doesn't officially build for
29 | # let's build binaries from the published packaging sources
30 | set -x \
31 | && tempDir="$(mktemp -d)" \
32 | && chown nobody:nobody $tempDir \
33 | && apk add --no-cache --virtual .build-deps \
34 | gcc \
35 | libc-dev \
36 | make \
37 | openssl-dev \
38 | pcre2-dev \
39 | zlib-dev \
40 | linux-headers \
41 | libxslt-dev \
42 | gd-dev \
43 | geoip-dev \
44 | libedit-dev \
45 | bash \
46 | alpine-sdk \
47 | findutils \
48 | && su nobody -s /bin/sh -c " \
49 | export HOME=${tempDir} \
50 | && cd ${tempDir} \
51 | && curl -f -O https://hg.nginx.org/pkg-oss/archive/%%REVISION%%.tar.gz \
52 | && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
53 | && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
54 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
55 | else \
56 | echo \"pkg-oss tarball checksum verification failed!\"; \
57 | exit 1; \
58 | fi \
59 | && tar xzvf %%REVISION%%.tar.gz \
60 | && cd pkg-oss-%%REVISION%% \
61 | && cd alpine \
62 | && make %%BUILDTARGET%% \
63 | && apk index -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
64 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
65 | " \
66 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
67 | && apk del --no-network .build-deps \
68 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
69 | ;; \
70 | esac \
71 | # remove checksum deps
72 | && apk del --no-network .checksum-deps \
73 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
74 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
75 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
76 | && if [ -f "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi \
77 | # Bring in curl and ca-certificates to make registering on DNS SD easier
78 | && apk add --no-cache curl ca-certificates
79 |
--------------------------------------------------------------------------------
/generate-stackbrew-library.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eu
3 |
4 | declare -A aliases
5 | aliases=(
6 | [mainline]='1 1.25 latest'
7 | [stable]='1.24'
8 | )
9 |
10 | self="$(basename "$BASH_SOURCE")"
11 | cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
12 | base=debian
13 |
14 | versions=( mainline stable )
15 |
16 | # get the most recent commit which modified any of "$@"
17 | fileCommit() {
18 | git log -1 --format='format:%H' HEAD -- "$@"
19 | }
20 |
21 | # get the most recent commit which modified "$1/Dockerfile" or any file COPY'd from "$1/Dockerfile"
22 | dirCommit() {
23 | local dir="$1"; shift
24 | (
25 | cd "$dir"
26 | fileCommit \
27 | Dockerfile \
28 | $(git show HEAD:./Dockerfile | awk '
29 | toupper($1) == "COPY" {
30 | for (i = 2; i < NF; i++) {
31 | print $i
32 | }
33 | }
34 | ')
35 | )
36 | }
37 |
38 | cat <<-EOH
39 | # this file is generated via https://github.com/nginxinc/docker-nginx/blob/$(fileCommit "$self")/$self
40 |
41 | Maintainers: NGINX Docker Maintainers (@nginxinc)
42 | GitRepo: https://github.com/nginxinc/docker-nginx.git
43 | EOH
44 |
45 | # prints "$2$1$3$1...$N"
46 | join() {
47 | local sep="$1"; shift
48 | local out; printf -v out "${sep//%/%%}%s" "$@"
49 | echo "${out#$sep}"
50 | }
51 |
52 | for version in "${versions[@]}"; do
53 | commit="$(dirCommit "$version/$base")"
54 |
55 | fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
56 |
57 | versionAliases=( $fullVersion )
58 | if [ "$version" != "$fullVersion" ]; then
59 | versionAliases+=( $version )
60 | fi
61 | versionAliases+=( ${aliases[$version]:-} )
62 |
63 | debianVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk -F"[-:]" '$1 == "FROM debian" { print $2; exit }')"
64 | debianAliases=( ${versionAliases[@]/%/-$debianVersion} )
65 | debianAliases=( "${debianAliases[@]//latest-/}" )
66 |
67 | echo
68 | cat <<-EOE
69 | Tags: $(join ', ' "${versionAliases[@]}"), $(join ', ' "${debianAliases[@]}")
70 | Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x
71 | GitCommit: $commit
72 | Directory: $version/$base
73 | EOE
74 |
75 | for variant in debian-perl; do
76 | commit="$(dirCommit "$version/$variant")"
77 |
78 | variantAliases=( "${versionAliases[@]/%/-perl}" )
79 | variantAliases+=( "${versionAliases[@]/%/-${variant/debian/$debianVersion}}" )
80 | variantAliases=( "${variantAliases[@]//latest-/}" )
81 |
82 | echo
83 | cat <<-EOE
84 | Tags: $(join ', ' "${variantAliases[@]}")
85 | Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x
86 | GitCommit: $commit
87 | Directory: $version/$variant
88 | EOE
89 | done
90 |
91 | alpineVersion="$(git show "$commit":"$version/alpine-slim/Dockerfile" | awk -F: '$1 == "FROM alpine" { print $2; exit }')"
92 |
93 | for variant in alpine alpine-perl; do
94 | commit="$(dirCommit "$version/$variant")"
95 |
96 | variantAliases=( "${versionAliases[@]/%/-$variant}" )
97 | variantAliases+=( "${versionAliases[@]/%/-${variant/alpine/alpine$alpineVersion}}" )
98 | variantAliases=( "${variantAliases[@]//latest-/}" )
99 |
100 | echo
101 | cat <<-EOE
102 | Tags: $(join ', ' "${variantAliases[@]}")
103 | Architectures: arm64v8, arm32v6, arm32v7, ppc64le, s390x, i386, amd64
104 | GitCommit: $commit
105 | Directory: $version/$variant
106 | EOE
107 | done
108 |
109 | for variant in alpine-slim; do
110 | commit="$(dirCommit "$version/$variant")"
111 |
112 | variantAliases=( "${versionAliases[@]/%/-$variant}" )
113 | variantAliases+=( "${versionAliases[@]/%/-${variant/alpine/alpine$alpineVersion}}" )
114 | variantAliases=( "${variantAliases[@]//latest-/}" )
115 |
116 | echo
117 | cat <<-EOE
118 | Tags: $(join ', ' "${variantAliases[@]}")
119 | Architectures: arm64v8, arm32v6, arm32v7, ppc64le, s390x, i386, amd64
120 | GitCommit: $commit
121 | Directory: $version/$variant
122 | EOE
123 | done
124 |
125 | done
126 |
--------------------------------------------------------------------------------
/stable/alpine-perl/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.24.0-alpine
7 |
8 | RUN set -x \
9 | && apkArch="$(cat /etc/apk/arch)" \
10 | && nginxPackages=" \
11 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
12 | nginx-module-xslt=${NGINX_VERSION}-r${PKG_RELEASE} \
13 | nginx-module-geoip=${NGINX_VERSION}-r${PKG_RELEASE} \
14 | nginx-module-image-filter=${NGINX_VERSION}-r${PKG_RELEASE} \
15 | nginx-module-perl=${NGINX_VERSION}-r${PKG_RELEASE} \
16 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${PKG_RELEASE} \
17 | " \
18 | # install prerequisites for public key and pkg-oss checks
19 | && apk add --no-cache --virtual .checksum-deps \
20 | openssl \
21 | && case "$apkArch" in \
22 | x86_64|aarch64) \
23 | # arches officially built by upstream
24 | set -x \
25 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
26 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
27 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
28 | echo "key verification succeeded!"; \
29 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
30 | else \
31 | echo "key verification failed!"; \
32 | exit 1; \
33 | fi \
34 | && apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
35 | ;; \
36 | *) \
37 | # we're on an architecture upstream doesn't officially build for
38 | # let's build binaries from the published packaging sources
39 | set -x \
40 | && tempDir="$(mktemp -d)" \
41 | && chown nobody:nobody $tempDir \
42 | && apk add --no-cache --virtual .build-deps \
43 | gcc \
44 | libc-dev \
45 | make \
46 | openssl-dev \
47 | pcre2-dev \
48 | zlib-dev \
49 | linux-headers \
50 | perl-dev \
51 | bash \
52 | alpine-sdk \
53 | findutils \
54 | && su nobody -s /bin/sh -c " \
55 | export HOME=${tempDir} \
56 | && cd ${tempDir} \
57 | && curl -f -O https://hg.nginx.org/pkg-oss/archive/e5d85b3424bb.tar.gz \
58 | && PKGOSSCHECKSUM=\"4f33347bf05e7d7dd42a52b6e7af7ec21e3ed71df05a8ec16dd1228425f04e4318d88b1340370ccb6ad02cde590fc102094ddffbb1fc86d2085295a43f02f67b *e5d85b3424bb.tar.gz\" \
59 | && if [ \"\$(openssl sha512 -r e5d85b3424bb.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
60 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
61 | else \
62 | echo \"pkg-oss tarball checksum verification failed!\"; \
63 | exit 1; \
64 | fi \
65 | && tar xzvf e5d85b3424bb.tar.gz \
66 | && cd pkg-oss-e5d85b3424bb \
67 | && cd alpine \
68 | && make module-perl \
69 | && apk index -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
70 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
71 | " \
72 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
73 | && apk del --no-network .build-deps \
74 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
75 | ;; \
76 | esac \
77 | # remove checksum deps
78 | && apk del --no-network .checksum-deps \
79 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
80 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
81 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
82 | && if [ -f "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi
83 |
--------------------------------------------------------------------------------
/sync-awsecr.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eu
3 |
4 | image="nginx"
5 | registry="public.ecr.aws/z9d2n7e1"
6 |
7 | declare -A aliases
8 | aliases=(
9 | [mainline]='1 1.25 latest'
10 | [stable]='1.24'
11 | )
12 |
13 | architectures=( amd64 arm64v8 )
14 |
15 | self="$(basename "$BASH_SOURCE")"
16 | cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
17 | base=debian
18 |
19 | versions=( mainline stable )
20 |
21 | pulllist=()
22 | declare -A taglist
23 | taglist=()
24 |
25 | # get the most recent commit which modified any of "$@"
26 | fileCommit() {
27 | git log -1 --format='format:%H' HEAD -- "$@"
28 | }
29 |
30 | # get the most recent commit which modified "$1/Dockerfile" or any file COPY'd from "$1/Dockerfile"
31 | dirCommit() {
32 | local dir="$1"; shift
33 | (
34 | cd "$dir"
35 | fileCommit \
36 | Dockerfile \
37 | $(git show HEAD:./Dockerfile | awk '
38 | toupper($1) == "COPY" {
39 | for (i = 2; i < NF; i++) {
40 | print $i
41 | }
42 | }
43 | ')
44 | )
45 | }
46 |
47 | # prints "$2$1$3$1...$N"
48 | join() {
49 | local sep="$1"; shift
50 | local out; printf -v out "${sep//%/%%}%s" "$@"
51 | echo "${out#$sep}"
52 | }
53 |
54 | for version in "${versions[@]}"; do
55 | commit="$(dirCommit "$version/$base")"
56 | fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
57 | pulllist+=( "$image:$fullVersion" )
58 | for variant in perl alpine alpine-perl alpine-slim; do
59 | pulllist+=( "$image:$fullVersion-$variant" )
60 | done
61 | done
62 |
63 | for version in "${versions[@]}"; do
64 | commit="$(dirCommit "$version/$base")"
65 |
66 | fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
67 |
68 | versionAliases=( $fullVersion )
69 | if [ "$version" != "$fullVersion" ]; then
70 | versionAliases+=( $version )
71 | fi
72 | versionAliases+=( ${aliases[$version]:-} )
73 |
74 | for tag in ${versionAliases[@]:1}; do
75 | taglist["$image:$tag"]="$image:$fullVersion"
76 | done
77 |
78 | for variant in debian-perl; do
79 | variantAliases=( "${versionAliases[@]/%/-perl}" )
80 | variantAliases=( "${variantAliases[@]//latest-/}" )
81 |
82 | for tag in ${variantAliases[@]}; do
83 | if [ "$tag" != "${fullVersion}-perl" ]; then
84 | taglist["$image:$tag"]="$image:$fullVersion-perl"
85 | fi
86 | done
87 | done
88 |
89 | for variant in alpine alpine-perl alpine-slim; do
90 | commit="$(dirCommit "$version/$variant")"
91 |
92 | variantAliases=( "${versionAliases[@]/%/-$variant}" )
93 | variantAliases=( "${variantAliases[@]//latest-/}" )
94 |
95 | for tag in ${variantAliases[@]}; do
96 | if [ "$tag" != "${fullVersion}-$variant" ]; then
97 | taglist["$image:$tag"]="$image:${fullVersion}-$variant"
98 | fi
99 | done
100 | done
101 |
102 | done
103 |
104 | echo "#!/bin/sh"
105 | echo "set -ex"
106 | echo
107 | echo "export DOCKER_CLI_EXPERIMENTAL=enabled"
108 | echo
109 | echo "# pulling stuff"
110 | for arch in ${architectures[@]}; do
111 | for tag in ${pulllist[@]}; do
112 | echo "docker pull $arch/$tag";
113 | done
114 | done
115 |
116 | echo
117 |
118 | echo "# tagging stuff"
119 |
120 | for arch in ${architectures[@]}; do
121 | for tag in ${pulllist[@]}; do
122 | echo "docker tag $arch/$tag $registry/$tag-$arch"
123 | done
124 | for tag in ${!taglist[@]}; do
125 | echo "docker tag $arch/${taglist[$tag]} $registry/$tag-$arch"
126 | done
127 | done
128 |
129 | echo "# pushing stuff"
130 |
131 | for arch in ${architectures[@]}; do
132 | for tag in ${pulllist[@]}; do
133 | echo "docker push $registry/$tag-$arch"
134 | done
135 | for tag in ${!taglist[@]}; do
136 | echo "docker push $registry/$tag-$arch"
137 | done
138 | done
139 |
140 | echo
141 | echo "# manifesting stuff"
142 | for tag in ${pulllist[@]} ${!taglist[@]}; do
143 | string="docker manifest create --amend $registry/$tag"
144 | for arch in ${architectures[@]}; do
145 | string+=" $registry/$tag-$arch"
146 | done
147 | echo $string
148 | done
149 |
150 | echo
151 | echo "# pushing manifests"
152 | for tag in ${pulllist[@]} ${!taglist[@]}; do
153 | echo "docker manifest push --purge $registry/$tag"
154 | done
155 |
--------------------------------------------------------------------------------
/mainline/alpine-perl/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.25.3-alpine
7 |
8 | RUN set -x \
9 | && apkArch="$(cat /etc/apk/arch)" \
10 | && nginxPackages=" \
11 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
12 | nginx-module-xslt=${NGINX_VERSION}-r${PKG_RELEASE} \
13 | nginx-module-geoip=${NGINX_VERSION}-r${PKG_RELEASE} \
14 | nginx-module-image-filter=${NGINX_VERSION}-r${PKG_RELEASE} \
15 | nginx-module-perl=${NGINX_VERSION}-r${PKG_RELEASE} \
16 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${PKG_RELEASE} \
17 | " \
18 | # install prerequisites for public key and pkg-oss checks
19 | && apk add --no-cache --virtual .checksum-deps \
20 | openssl \
21 | && case "$apkArch" in \
22 | x86_64|aarch64) \
23 | # arches officially built by upstream
24 | set -x \
25 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
26 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
27 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
28 | echo "key verification succeeded!"; \
29 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
30 | else \
31 | echo "key verification failed!"; \
32 | exit 1; \
33 | fi \
34 | && apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
35 | ;; \
36 | *) \
37 | # we're on an architecture upstream doesn't officially build for
38 | # let's build binaries from the published packaging sources
39 | set -x \
40 | && tempDir="$(mktemp -d)" \
41 | && chown nobody:nobody $tempDir \
42 | && apk add --no-cache --virtual .build-deps \
43 | gcc \
44 | libc-dev \
45 | make \
46 | openssl-dev \
47 | pcre2-dev \
48 | zlib-dev \
49 | linux-headers \
50 | perl-dev \
51 | bash \
52 | alpine-sdk \
53 | findutils \
54 | && su nobody -s /bin/sh -c " \
55 | export HOME=${tempDir} \
56 | && cd ${tempDir} \
57 | && curl -f -O https://hg.nginx.org/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
58 | && PKGOSSCHECKSUM=\"00b217979265cc9d66c991c9c89427558936dbaa568d175ca45780589171d94f1866217be09a83438d95494cf38baaa6788320f6d8d23f2fb29c03117391ff88 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
59 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
60 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
61 | else \
62 | echo \"pkg-oss tarball checksum verification failed!\"; \
63 | exit 1; \
64 | fi \
65 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
66 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
67 | && cd alpine \
68 | && make module-perl \
69 | && apk index -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
70 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
71 | " \
72 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
73 | && apk del --no-network .build-deps \
74 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
75 | ;; \
76 | esac \
77 | # remove checksum deps
78 | && apk del --no-network .checksum-deps \
79 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
80 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
81 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
82 | && if [ -f "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi
83 |
--------------------------------------------------------------------------------
/modules/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG NGINX_FROM_IMAGE=nginx:mainline
2 | FROM ${NGINX_FROM_IMAGE} as builder
3 |
4 | ARG ENABLED_MODULES
5 |
6 | RUN set -ex \
7 | && if [ "$ENABLED_MODULES" = "" ]; then \
8 | echo "No additional modules enabled, exiting"; \
9 | exit 1; \
10 | fi
11 |
12 | COPY ./ /modules/
13 |
14 | RUN set -ex \
15 | && apt update \
16 | && apt install -y --no-install-suggests --no-install-recommends \
17 | patch make wget mercurial devscripts debhelper dpkg-dev \
18 | quilt lsb-release build-essential libxml2-utils xsltproc \
19 | equivs git g++ libparse-recdescent-perl \
20 | && XSLSCRIPT_SHA512="f7194c5198daeab9b3b0c3aebf006922c7df1d345d454bd8474489ff2eb6b4bf8e2ffe442489a45d1aab80da6ecebe0097759a1e12cc26b5f0613d05b7c09ffa *stdin" \
21 | && wget -O /tmp/xslscript.pl https://hg.nginx.org/xslscript/raw-file/01dc9ba12e1b/xslscript.pl \
22 | && if [ "$(cat /tmp/xslscript.pl | openssl sha512 -r)" = "$XSLSCRIPT_SHA512" ]; then \
23 | echo "XSLScript checksum verification succeeded!"; \
24 | chmod +x /tmp/xslscript.pl; \
25 | mv /tmp/xslscript.pl /usr/local/bin/; \
26 | else \
27 | echo "XSLScript checksum verification failed!"; \
28 | exit 1; \
29 | fi \
30 | && hg clone -r ${NGINX_VERSION}-${PKG_RELEASE%%~*} https://hg.nginx.org/pkg-oss/ \
31 | && cd pkg-oss \
32 | && mkdir /tmp/packages \
33 | && for module in $ENABLED_MODULES; do \
34 | echo "Building $module for nginx-$NGINX_VERSION"; \
35 | if [ -d /modules/$module ]; then \
36 | echo "Building $module from user-supplied sources"; \
37 | # check if module sources file is there and not empty
38 | if [ ! -s /modules/$module/source ]; then \
39 | echo "No source file for $module in modules/$module/source, exiting"; \
40 | exit 1; \
41 | fi; \
42 | # some modules require build dependencies
43 | if [ -f /modules/$module/build-deps ]; then \
44 | echo "Installing $module build dependencies"; \
45 | apt update && apt install -y --no-install-suggests --no-install-recommends $(cat /modules/$module/build-deps | xargs); \
46 | fi; \
47 | # if a module has a build dependency that is not in a distro, provide a
48 | # shell script to fetch/build/install those
49 | # note that shared libraries produced as a result of this script will
50 | # not be copied from the builder image to the main one so build static
51 | if [ -x /modules/$module/prebuild ]; then \
52 | echo "Running prebuild script for $module"; \
53 | /modules/$module/prebuild; \
54 | fi; \
55 | /pkg-oss/build_module.sh -v $NGINX_VERSION -f -y -o /tmp/packages -n $module $(cat /modules/$module/source); \
56 | BUILT_MODULES="$BUILT_MODULES $(echo $module | tr '[A-Z]' '[a-z]' | tr -d '[/_\-\.\t ]')"; \
57 | elif make -C /pkg-oss/debian list | grep -P "^$module\s+\d" > /dev/null; then \
58 | echo "Building $module from pkg-oss sources"; \
59 | cd /pkg-oss/debian; \
60 | make rules-module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
61 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" debuild-module-$module/nginx-$NGINX_VERSION/debian/control; \
62 | make module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
63 | find ../../ -maxdepth 1 -mindepth 1 -type f -name "*.deb" -exec mv -v {} /tmp/packages/ \;; \
64 | BUILT_MODULES="$BUILT_MODULES $module"; \
65 | else \
66 | echo "Don't know how to build $module module, exiting"; \
67 | exit 1; \
68 | fi; \
69 | done \
70 | && echo "BUILT_MODULES=\"$BUILT_MODULES\"" > /tmp/packages/modules.env
71 |
72 | FROM ${NGINX_FROM_IMAGE}
73 | COPY --from=builder /tmp/packages /tmp/packages
74 | RUN set -ex \
75 | && apt update \
76 | && . /tmp/packages/modules.env \
77 | && for module in $BUILT_MODULES; do \
78 | apt install --no-install-suggests --no-install-recommends -y /tmp/packages/nginx-module-${module}_${NGINX_VERSION}*.deb; \
79 | done \
80 | && rm -rf /tmp/packages \
81 | && rm -rf /var/lib/apt/lists/
82 |
--------------------------------------------------------------------------------
/Dockerfile-debian-perl.template:
--------------------------------------------------------------------------------
1 | FROM nginx:%%NGINX_VERSION%%
2 |
3 | RUN set -x \
4 | && apt-get update \
5 | && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
6 | && \
7 | NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
8 | NGINX_GPGKEY_PATH=/usr/share/keyrings/nginx-archive-keyring.gpg; \
9 | export GNUPGHOME="$(mktemp -d)"; \
10 | found=''; \
11 | for server in \
12 | hkp://keyserver.ubuntu.com:80 \
13 | pgp.mit.edu \
14 | ; do \
15 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
16 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
17 | done; \
18 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
19 | gpg1 --export "$NGINX_GPGKEY" > "$NGINX_GPGKEY_PATH" ; \
20 | rm -rf "$GNUPGHOME"; \
21 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
22 | && dpkgArch="$(dpkg --print-architecture)" \
23 | && nginxPackages="%%PACKAGES%%
24 | " \
25 | && case "$dpkgArch" in \
26 | amd64|arm64) \
27 | # arches officialy built by upstream
28 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
29 | && apt-get update \
30 | ;; \
31 | *) \
32 | # we're on an architecture upstream doesn't officially build for
33 | # let's build binaries from the published source packages
34 | echo "deb-src [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
35 | \
36 | # new directory for storing sources and .deb files
37 | && tempDir="$(mktemp -d)" \
38 | && chmod 777 "$tempDir" \
39 | # (777 to ensure APT's "_apt" user can access it too)
40 | \
41 | # save list of currently-installed packages so build dependencies can be cleanly removed later
42 | && savedAptMark="$(apt-mark showmanual)" \
43 | \
44 | # build .deb files from upstream's source packages (which are verified by apt-get)
45 | && apt-get update \
46 | && apt-get build-dep -y %%BUILDTARGET%% \
47 | && ( \
48 | cd "$tempDir" \
49 | && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
50 | apt-get source --compile %%BUILDTARGET%% \
51 | ) \
52 | # we don't remove APT lists here because they get re-downloaded and removed later
53 | \
54 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
55 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
56 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
57 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
58 | \
59 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
60 | && ls -lAFh "$tempDir" \
61 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
62 | && grep '^Package: ' "$tempDir/Packages" \
63 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
64 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
65 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
66 | # ...
67 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
68 | && apt-get -o Acquire::GzipIndexes=false update \
69 | ;; \
70 | esac \
71 | \
72 | && apt-get install --no-install-recommends --no-install-suggests -y \
73 | $nginxPackages \
74 | gettext-base \
75 | curl \
76 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
77 | \
78 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
79 | && if [ -n "$tempDir" ]; then \
80 | apt-get purge -y --auto-remove \
81 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
82 | fi
83 |
--------------------------------------------------------------------------------
/stable/alpine/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.24.0-alpine-slim
7 |
8 | ENV NJS_VERSION 0.8.0
9 |
10 | RUN set -x \
11 | && apkArch="$(cat /etc/apk/arch)" \
12 | && nginxPackages=" \
13 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
14 | nginx-module-xslt=${NGINX_VERSION}-r${PKG_RELEASE} \
15 | nginx-module-geoip=${NGINX_VERSION}-r${PKG_RELEASE} \
16 | nginx-module-image-filter=${NGINX_VERSION}-r${PKG_RELEASE} \
17 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${PKG_RELEASE} \
18 | " \
19 | # install prerequisites for public key and pkg-oss checks
20 | && apk add --no-cache --virtual .checksum-deps \
21 | openssl \
22 | && case "$apkArch" in \
23 | x86_64|aarch64) \
24 | # arches officially built by upstream
25 | set -x \
26 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
27 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
28 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
29 | echo "key verification succeeded!"; \
30 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
31 | else \
32 | echo "key verification failed!"; \
33 | exit 1; \
34 | fi \
35 | && apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
36 | ;; \
37 | *) \
38 | # we're on an architecture upstream doesn't officially build for
39 | # let's build binaries from the published packaging sources
40 | set -x \
41 | && tempDir="$(mktemp -d)" \
42 | && chown nobody:nobody $tempDir \
43 | && apk add --no-cache --virtual .build-deps \
44 | gcc \
45 | libc-dev \
46 | make \
47 | openssl-dev \
48 | pcre2-dev \
49 | zlib-dev \
50 | linux-headers \
51 | libxslt-dev \
52 | gd-dev \
53 | geoip-dev \
54 | libedit-dev \
55 | bash \
56 | alpine-sdk \
57 | findutils \
58 | && su nobody -s /bin/sh -c " \
59 | export HOME=${tempDir} \
60 | && cd ${tempDir} \
61 | && curl -f -O https://hg.nginx.org/pkg-oss/archive/e5d85b3424bb.tar.gz \
62 | && PKGOSSCHECKSUM=\"4f33347bf05e7d7dd42a52b6e7af7ec21e3ed71df05a8ec16dd1228425f04e4318d88b1340370ccb6ad02cde590fc102094ddffbb1fc86d2085295a43f02f67b *e5d85b3424bb.tar.gz\" \
63 | && if [ \"\$(openssl sha512 -r e5d85b3424bb.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
64 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
65 | else \
66 | echo \"pkg-oss tarball checksum verification failed!\"; \
67 | exit 1; \
68 | fi \
69 | && tar xzvf e5d85b3424bb.tar.gz \
70 | && cd pkg-oss-e5d85b3424bb \
71 | && cd alpine \
72 | && make module-geoip module-image-filter module-njs module-xslt \
73 | && apk index -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
74 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
75 | " \
76 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
77 | && apk del --no-network .build-deps \
78 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
79 | ;; \
80 | esac \
81 | # remove checksum deps
82 | && apk del --no-network .checksum-deps \
83 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
84 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
85 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
86 | && if [ -f "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi \
87 | # Bring in curl and ca-certificates to make registering on DNS SD easier
88 | && apk add --no-cache curl ca-certificates
89 |
--------------------------------------------------------------------------------
/mainline/alpine/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.25.3-alpine-slim
7 |
8 | ENV NJS_VERSION 0.8.2
9 |
10 | RUN set -x \
11 | && apkArch="$(cat /etc/apk/arch)" \
12 | && nginxPackages=" \
13 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
14 | nginx-module-xslt=${NGINX_VERSION}-r${PKG_RELEASE} \
15 | nginx-module-geoip=${NGINX_VERSION}-r${PKG_RELEASE} \
16 | nginx-module-image-filter=${NGINX_VERSION}-r${PKG_RELEASE} \
17 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${PKG_RELEASE} \
18 | " \
19 | # install prerequisites for public key and pkg-oss checks
20 | && apk add --no-cache --virtual .checksum-deps \
21 | openssl \
22 | && case "$apkArch" in \
23 | x86_64|aarch64) \
24 | # arches officially built by upstream
25 | set -x \
26 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
27 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
28 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
29 | echo "key verification succeeded!"; \
30 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
31 | else \
32 | echo "key verification failed!"; \
33 | exit 1; \
34 | fi \
35 | && apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
36 | ;; \
37 | *) \
38 | # we're on an architecture upstream doesn't officially build for
39 | # let's build binaries from the published packaging sources
40 | set -x \
41 | && tempDir="$(mktemp -d)" \
42 | && chown nobody:nobody $tempDir \
43 | && apk add --no-cache --virtual .build-deps \
44 | gcc \
45 | libc-dev \
46 | make \
47 | openssl-dev \
48 | pcre2-dev \
49 | zlib-dev \
50 | linux-headers \
51 | libxslt-dev \
52 | gd-dev \
53 | geoip-dev \
54 | libedit-dev \
55 | bash \
56 | alpine-sdk \
57 | findutils \
58 | && su nobody -s /bin/sh -c " \
59 | export HOME=${tempDir} \
60 | && cd ${tempDir} \
61 | && curl -f -O https://hg.nginx.org/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
62 | && PKGOSSCHECKSUM=\"00b217979265cc9d66c991c9c89427558936dbaa568d175ca45780589171d94f1866217be09a83438d95494cf38baaa6788320f6d8d23f2fb29c03117391ff88 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
63 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
64 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
65 | else \
66 | echo \"pkg-oss tarball checksum verification failed!\"; \
67 | exit 1; \
68 | fi \
69 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
70 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
71 | && cd alpine \
72 | && make module-geoip module-image-filter module-njs module-xslt \
73 | && apk index -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
74 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
75 | " \
76 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
77 | && apk del --no-network .build-deps \
78 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
79 | ;; \
80 | esac \
81 | # remove checksum deps
82 | && apk del --no-network .checksum-deps \
83 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
84 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
85 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
86 | && if [ -f "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi \
87 | # Bring in curl and ca-certificates to make registering on DNS SD easier
88 | && apk add --no-cache curl ca-certificates
89 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://www.repostatus.org/#active)
2 | [](https://github.com/nginxinc/docker-nginx/blob/master/SUPPORT.md)
3 |
4 | # About this Repo
5 |
6 | ## Maintained by: [the NGINX Docker Maintainers](https://github.com/nginxinc/docker-nginx)
7 |
8 | This is the Git repo of the [Docker "Official Image"](https://github.com/docker-library/official-images#what-are-official-images) for [`nginx`](https://hub.docker.com/_/nginx/). See [the Docker Hub page](https://hub.docker.com/_/nginx/) for the full readme on how to use this Docker image and for information regarding contributing and issues.
9 |
10 | The [full image description on Docker Hub](https://hub.docker.com/_/nginx/) is generated/maintained over in [the docker-library/docs repository](https://github.com/docker-library/docs), specifically in [the `nginx` directory](https://github.com/docker-library/docs/tree/master/nginx).
11 |
12 | The changelog for NGINX releases is available at [nginx.org changes page](https://nginx.org/en/CHANGES).
13 |
14 | ## See a change merged here that doesn't show up on Docker Hub yet?
15 |
16 | For more information about the full official images change lifecycle, see [the "An image's source changed in Git, now what?" FAQ entry](https://github.com/docker-library/faq#an-images-source-changed-in-git-now-what).
17 |
18 | For outstanding `nginx` image PRs, check [PRs with the "library/nginx" label on the official-images repository](https://github.com/docker-library/official-images/labels/library%2Fnginx). For the current "source of truth" for [`nginx`](https://hub.docker.com/_/nginx/), see [the `library/nginx` file in the official-images repository](https://github.com/docker-library/official-images/blob/master/library/nginx).
19 |
20 | ## Contributing
21 |
22 | Please see the [contributing guide](https://github.com/nginxinc/docker-nginx/blob/master/CONTRIBUTING.md) for guidelines on how to best contribute to this project.
23 |
24 | ## License
25 |
26 | [BSD 2-Clause](https://github.com/nginxinc/docker-nginx/blob/master/LICENSE)
27 |
28 | © [F5, Inc.](https://www.f5.com/) 2023
29 |
30 | ---
31 |
32 | - [](https://github.com/nginxinc/docker-nginx/actions?query=workflow%3A%22GitHub+CI%22+branch%3Amaster)
33 |
34 | | Build | Status | Badges | (per-arch) |
35 | |:-:|:-:|:-:|:-:|
36 | | [](https://doi-janky.infosiftr.net/job/multiarch/job/amd64/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v5/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v6/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v7/job/nginx/) |
37 | | [](https://doi-janky.infosiftr.net/job/multiarch/job/arm64v8/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/i386/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/mips64le/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/ppc64le/job/nginx/) |
38 | | [](https://doi-janky.infosiftr.net/job/multiarch/job/s390x/job/nginx/) | [](https://doi-janky.infosiftr.net/job/put-shared/job/light/job/nginx/) |
39 |
--------------------------------------------------------------------------------
/stable/debian-perl/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.24.0
7 |
8 | RUN set -x \
9 | && apt-get update \
10 | && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
11 | && \
12 | NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
13 | NGINX_GPGKEY_PATH=/usr/share/keyrings/nginx-archive-keyring.gpg; \
14 | export GNUPGHOME="$(mktemp -d)"; \
15 | found=''; \
16 | for server in \
17 | hkp://keyserver.ubuntu.com:80 \
18 | pgp.mit.edu \
19 | ; do \
20 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
21 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
22 | done; \
23 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
24 | gpg1 --export "$NGINX_GPGKEY" > "$NGINX_GPGKEY_PATH" ; \
25 | rm -rf "$GNUPGHOME"; \
26 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
27 | && dpkgArch="$(dpkg --print-architecture)" \
28 | && nginxPackages=" \
29 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \
30 | nginx-module-xslt=${NGINX_VERSION}-${PKG_RELEASE} \
31 | nginx-module-geoip=${NGINX_VERSION}-${PKG_RELEASE} \
32 | nginx-module-image-filter=${NGINX_VERSION}-${PKG_RELEASE} \
33 | nginx-module-perl=${NGINX_VERSION}-${PKG_RELEASE} \
34 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${PKG_RELEASE} \
35 | " \
36 | && case "$dpkgArch" in \
37 | amd64|arm64) \
38 | # arches officialy built by upstream
39 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bullseye nginx" >> /etc/apt/sources.list.d/nginx.list \
40 | && apt-get update \
41 | ;; \
42 | *) \
43 | # we're on an architecture upstream doesn't officially build for
44 | # let's build binaries from the published source packages
45 | echo "deb-src [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bullseye nginx" >> /etc/apt/sources.list.d/nginx.list \
46 | \
47 | # new directory for storing sources and .deb files
48 | && tempDir="$(mktemp -d)" \
49 | && chmod 777 "$tempDir" \
50 | # (777 to ensure APT's "_apt" user can access it too)
51 | \
52 | # save list of currently-installed packages so build dependencies can be cleanly removed later
53 | && savedAptMark="$(apt-mark showmanual)" \
54 | \
55 | # build .deb files from upstream's source packages (which are verified by apt-get)
56 | && apt-get update \
57 | && apt-get build-dep -y nginx-module-perl=${NGINX_VERSION}-${PKG_RELEASE} \
58 | && ( \
59 | cd "$tempDir" \
60 | && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
61 | apt-get source --compile nginx-module-perl=${NGINX_VERSION}-${PKG_RELEASE} \
62 | ) \
63 | # we don't remove APT lists here because they get re-downloaded and removed later
64 | \
65 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
66 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
67 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
68 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
69 | \
70 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
71 | && ls -lAFh "$tempDir" \
72 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
73 | && grep '^Package: ' "$tempDir/Packages" \
74 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
75 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
76 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
77 | # ...
78 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
79 | && apt-get -o Acquire::GzipIndexes=false update \
80 | ;; \
81 | esac \
82 | \
83 | && apt-get install --no-install-recommends --no-install-suggests -y \
84 | $nginxPackages \
85 | gettext-base \
86 | curl \
87 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
88 | \
89 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
90 | && if [ -n "$tempDir" ]; then \
91 | apt-get purge -y --auto-remove \
92 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
93 | fi
94 |
--------------------------------------------------------------------------------
/mainline/debian-perl/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.25.3
7 |
8 | RUN set -x \
9 | && apt-get update \
10 | && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
11 | && \
12 | NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
13 | NGINX_GPGKEY_PATH=/usr/share/keyrings/nginx-archive-keyring.gpg; \
14 | export GNUPGHOME="$(mktemp -d)"; \
15 | found=''; \
16 | for server in \
17 | hkp://keyserver.ubuntu.com:80 \
18 | pgp.mit.edu \
19 | ; do \
20 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
21 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
22 | done; \
23 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
24 | gpg1 --export "$NGINX_GPGKEY" > "$NGINX_GPGKEY_PATH" ; \
25 | rm -rf "$GNUPGHOME"; \
26 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
27 | && dpkgArch="$(dpkg --print-architecture)" \
28 | && nginxPackages=" \
29 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \
30 | nginx-module-xslt=${NGINX_VERSION}-${PKG_RELEASE} \
31 | nginx-module-geoip=${NGINX_VERSION}-${PKG_RELEASE} \
32 | nginx-module-image-filter=${NGINX_VERSION}-${PKG_RELEASE} \
33 | nginx-module-perl=${NGINX_VERSION}-${PKG_RELEASE} \
34 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${PKG_RELEASE} \
35 | " \
36 | && case "$dpkgArch" in \
37 | amd64|arm64) \
38 | # arches officialy built by upstream
39 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
40 | && apt-get update \
41 | ;; \
42 | *) \
43 | # we're on an architecture upstream doesn't officially build for
44 | # let's build binaries from the published source packages
45 | echo "deb-src [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
46 | \
47 | # new directory for storing sources and .deb files
48 | && tempDir="$(mktemp -d)" \
49 | && chmod 777 "$tempDir" \
50 | # (777 to ensure APT's "_apt" user can access it too)
51 | \
52 | # save list of currently-installed packages so build dependencies can be cleanly removed later
53 | && savedAptMark="$(apt-mark showmanual)" \
54 | \
55 | # build .deb files from upstream's source packages (which are verified by apt-get)
56 | && apt-get update \
57 | && apt-get build-dep -y nginx-module-perl=${NGINX_VERSION}-${PKG_RELEASE} \
58 | && ( \
59 | cd "$tempDir" \
60 | && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
61 | apt-get source --compile nginx-module-perl=${NGINX_VERSION}-${PKG_RELEASE} \
62 | ) \
63 | # we don't remove APT lists here because they get re-downloaded and removed later
64 | \
65 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
66 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
67 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
68 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
69 | \
70 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
71 | && ls -lAFh "$tempDir" \
72 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
73 | && grep '^Package: ' "$tempDir/Packages" \
74 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
75 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
76 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
77 | # ...
78 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
79 | && apt-get -o Acquire::GzipIndexes=false update \
80 | ;; \
81 | esac \
82 | \
83 | && apt-get install --no-install-recommends --no-install-suggests -y \
84 | $nginxPackages \
85 | gettext-base \
86 | curl \
87 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
88 | \
89 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
90 | && if [ -n "$tempDir" ]; then \
91 | apt-get purge -y --auto-remove \
92 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
93 | fi
94 |
--------------------------------------------------------------------------------
/entrypoint/30-tune-worker-processes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | ME=$(basename "$0")
8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
9 |
10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
11 |
12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
13 |
14 | ceildiv() {
15 | num=$1
16 | div=$2
17 | echo $(( (num + div - 1) / div ))
18 | }
19 |
20 | get_cpuset() {
21 | cpusetroot=$1
22 | cpusetfile=$2
23 | ncpu=0
24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1
25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
26 | case "$token" in
27 | *-*)
28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
29 | ncpu=$(( ncpu+count ))
30 | ;;
31 | *)
32 | ncpu=$(( ncpu+1 ))
33 | ;;
34 | esac
35 | done
36 | echo "$ncpu"
37 | }
38 |
39 | get_quota() {
40 | cpuroot=$1
41 | ncpu=0
42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
46 | [ "$cfs_quota" = "-1" ] && return 1
47 | [ "$cfs_period" = "0" ] && return 1
48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
49 | [ "$ncpu" -gt 0 ] || return 1
50 | echo "$ncpu"
51 | }
52 |
53 | get_quota_v2() {
54 | cpuroot=$1
55 | ncpu=0
56 | [ -f "$cpuroot/cpu.max" ] || return 1
57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
59 | [ "$cfs_quota" = "max" ] && return 1
60 | [ "$cfs_period" = "0" ] && return 1
61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
62 | [ "$ncpu" -gt 0 ] || return 1
63 | echo "$ncpu"
64 | }
65 |
66 | get_cgroup_v1_path() {
67 | needle=$1
68 | found=
69 | foundroot=
70 | mountpoint=
71 |
72 | [ -r "/proc/self/mountinfo" ] || return 1
73 | [ -r "/proc/self/cgroup" ] || return 1
74 |
75 | while IFS= read -r line; do
76 | case "$needle" in
77 | "cpuset")
78 | case "$line" in
79 | *cpuset*)
80 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
81 | break
82 | ;;
83 | esac
84 | ;;
85 | "cpu")
86 | case "$line" in
87 | *cpuset*)
88 | ;;
89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
90 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
91 | break
92 | ;;
93 | esac
94 | esac
95 | done << __EOF__
96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo )
97 | __EOF__
98 |
99 | while IFS= read -r line; do
100 | controller=$( echo "$line" | cut -d: -f 2 )
101 | case "$needle" in
102 | "cpuset")
103 | case "$controller" in
104 | cpuset)
105 | mountpoint=$( echo "$line" | cut -d: -f 3 )
106 | break
107 | ;;
108 | esac
109 | ;;
110 | "cpu")
111 | case "$controller" in
112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
113 | mountpoint=$( echo "$line" | cut -d: -f 3 )
114 | break
115 | ;;
116 | esac
117 | ;;
118 | esac
119 | done << __EOF__
120 | $( grep -F -- 'cpu' /proc/self/cgroup )
121 | __EOF__
122 |
123 | case "${found%% *}" in
124 | "/")
125 | foundroot="${found##* }$mountpoint"
126 | ;;
127 | "$mountpoint")
128 | foundroot="${found##* }"
129 | ;;
130 | esac
131 | echo "$foundroot"
132 | }
133 |
134 | get_cgroup_v2_path() {
135 | found=
136 | foundroot=
137 | mountpoint=
138 |
139 | [ -r "/proc/self/mountinfo" ] || return 1
140 | [ -r "/proc/self/cgroup" ] || return 1
141 |
142 | while IFS= read -r line; do
143 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
144 | done << __EOF__
145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
146 | __EOF__
147 |
148 | while IFS= read -r line; do
149 | mountpoint=$( echo "$line" | cut -d: -f 3 )
150 | done << __EOF__
151 | $( grep -F -- '0::' /proc/self/cgroup )
152 | __EOF__
153 |
154 | case "${found%% *}" in
155 | "")
156 | return 1
157 | ;;
158 | "/")
159 | foundroot="${found##* }$mountpoint"
160 | ;;
161 | "$mountpoint" | /../*)
162 | foundroot="${found##* }"
163 | ;;
164 | esac
165 | echo "$foundroot"
166 | }
167 |
168 | ncpu_online=$( getconf _NPROCESSORS_ONLN )
169 | ncpu_cpuset=
170 | ncpu_quota=
171 | ncpu_cpuset_v2=
172 | ncpu_quota_v2=
173 |
174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
178 |
179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
180 | "$ncpu_online" \
181 | "$ncpu_cpuset" \
182 | "$ncpu_quota" \
183 | "$ncpu_cpuset_v2" \
184 | "$ncpu_quota_v2" \
185 | | sort -n \
186 | | head -n 1 )
187 |
188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
189 |
--------------------------------------------------------------------------------
/mainline/debian/30-tune-worker-processes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | ME=$(basename "$0")
8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
9 |
10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
11 |
12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
13 |
14 | ceildiv() {
15 | num=$1
16 | div=$2
17 | echo $(( (num + div - 1) / div ))
18 | }
19 |
20 | get_cpuset() {
21 | cpusetroot=$1
22 | cpusetfile=$2
23 | ncpu=0
24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1
25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
26 | case "$token" in
27 | *-*)
28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
29 | ncpu=$(( ncpu+count ))
30 | ;;
31 | *)
32 | ncpu=$(( ncpu+1 ))
33 | ;;
34 | esac
35 | done
36 | echo "$ncpu"
37 | }
38 |
39 | get_quota() {
40 | cpuroot=$1
41 | ncpu=0
42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
46 | [ "$cfs_quota" = "-1" ] && return 1
47 | [ "$cfs_period" = "0" ] && return 1
48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
49 | [ "$ncpu" -gt 0 ] || return 1
50 | echo "$ncpu"
51 | }
52 |
53 | get_quota_v2() {
54 | cpuroot=$1
55 | ncpu=0
56 | [ -f "$cpuroot/cpu.max" ] || return 1
57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
59 | [ "$cfs_quota" = "max" ] && return 1
60 | [ "$cfs_period" = "0" ] && return 1
61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
62 | [ "$ncpu" -gt 0 ] || return 1
63 | echo "$ncpu"
64 | }
65 |
66 | get_cgroup_v1_path() {
67 | needle=$1
68 | found=
69 | foundroot=
70 | mountpoint=
71 |
72 | [ -r "/proc/self/mountinfo" ] || return 1
73 | [ -r "/proc/self/cgroup" ] || return 1
74 |
75 | while IFS= read -r line; do
76 | case "$needle" in
77 | "cpuset")
78 | case "$line" in
79 | *cpuset*)
80 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
81 | break
82 | ;;
83 | esac
84 | ;;
85 | "cpu")
86 | case "$line" in
87 | *cpuset*)
88 | ;;
89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
90 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
91 | break
92 | ;;
93 | esac
94 | esac
95 | done << __EOF__
96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo )
97 | __EOF__
98 |
99 | while IFS= read -r line; do
100 | controller=$( echo "$line" | cut -d: -f 2 )
101 | case "$needle" in
102 | "cpuset")
103 | case "$controller" in
104 | cpuset)
105 | mountpoint=$( echo "$line" | cut -d: -f 3 )
106 | break
107 | ;;
108 | esac
109 | ;;
110 | "cpu")
111 | case "$controller" in
112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
113 | mountpoint=$( echo "$line" | cut -d: -f 3 )
114 | break
115 | ;;
116 | esac
117 | ;;
118 | esac
119 | done << __EOF__
120 | $( grep -F -- 'cpu' /proc/self/cgroup )
121 | __EOF__
122 |
123 | case "${found%% *}" in
124 | "/")
125 | foundroot="${found##* }$mountpoint"
126 | ;;
127 | "$mountpoint")
128 | foundroot="${found##* }"
129 | ;;
130 | esac
131 | echo "$foundroot"
132 | }
133 |
134 | get_cgroup_v2_path() {
135 | found=
136 | foundroot=
137 | mountpoint=
138 |
139 | [ -r "/proc/self/mountinfo" ] || return 1
140 | [ -r "/proc/self/cgroup" ] || return 1
141 |
142 | while IFS= read -r line; do
143 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
144 | done << __EOF__
145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
146 | __EOF__
147 |
148 | while IFS= read -r line; do
149 | mountpoint=$( echo "$line" | cut -d: -f 3 )
150 | done << __EOF__
151 | $( grep -F -- '0::' /proc/self/cgroup )
152 | __EOF__
153 |
154 | case "${found%% *}" in
155 | "")
156 | return 1
157 | ;;
158 | "/")
159 | foundroot="${found##* }$mountpoint"
160 | ;;
161 | "$mountpoint" | /../*)
162 | foundroot="${found##* }"
163 | ;;
164 | esac
165 | echo "$foundroot"
166 | }
167 |
168 | ncpu_online=$( getconf _NPROCESSORS_ONLN )
169 | ncpu_cpuset=
170 | ncpu_quota=
171 | ncpu_cpuset_v2=
172 | ncpu_quota_v2=
173 |
174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
178 |
179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
180 | "$ncpu_online" \
181 | "$ncpu_cpuset" \
182 | "$ncpu_quota" \
183 | "$ncpu_cpuset_v2" \
184 | "$ncpu_quota_v2" \
185 | | sort -n \
186 | | head -n 1 )
187 |
188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
189 |
--------------------------------------------------------------------------------
/stable/debian/30-tune-worker-processes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | ME=$(basename "$0")
8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
9 |
10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
11 |
12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
13 |
14 | ceildiv() {
15 | num=$1
16 | div=$2
17 | echo $(( (num + div - 1) / div ))
18 | }
19 |
20 | get_cpuset() {
21 | cpusetroot=$1
22 | cpusetfile=$2
23 | ncpu=0
24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1
25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
26 | case "$token" in
27 | *-*)
28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
29 | ncpu=$(( ncpu+count ))
30 | ;;
31 | *)
32 | ncpu=$(( ncpu+1 ))
33 | ;;
34 | esac
35 | done
36 | echo "$ncpu"
37 | }
38 |
39 | get_quota() {
40 | cpuroot=$1
41 | ncpu=0
42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
46 | [ "$cfs_quota" = "-1" ] && return 1
47 | [ "$cfs_period" = "0" ] && return 1
48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
49 | [ "$ncpu" -gt 0 ] || return 1
50 | echo "$ncpu"
51 | }
52 |
53 | get_quota_v2() {
54 | cpuroot=$1
55 | ncpu=0
56 | [ -f "$cpuroot/cpu.max" ] || return 1
57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
59 | [ "$cfs_quota" = "max" ] && return 1
60 | [ "$cfs_period" = "0" ] && return 1
61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
62 | [ "$ncpu" -gt 0 ] || return 1
63 | echo "$ncpu"
64 | }
65 |
66 | get_cgroup_v1_path() {
67 | needle=$1
68 | found=
69 | foundroot=
70 | mountpoint=
71 |
72 | [ -r "/proc/self/mountinfo" ] || return 1
73 | [ -r "/proc/self/cgroup" ] || return 1
74 |
75 | while IFS= read -r line; do
76 | case "$needle" in
77 | "cpuset")
78 | case "$line" in
79 | *cpuset*)
80 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
81 | break
82 | ;;
83 | esac
84 | ;;
85 | "cpu")
86 | case "$line" in
87 | *cpuset*)
88 | ;;
89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
90 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
91 | break
92 | ;;
93 | esac
94 | esac
95 | done << __EOF__
96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo )
97 | __EOF__
98 |
99 | while IFS= read -r line; do
100 | controller=$( echo "$line" | cut -d: -f 2 )
101 | case "$needle" in
102 | "cpuset")
103 | case "$controller" in
104 | cpuset)
105 | mountpoint=$( echo "$line" | cut -d: -f 3 )
106 | break
107 | ;;
108 | esac
109 | ;;
110 | "cpu")
111 | case "$controller" in
112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
113 | mountpoint=$( echo "$line" | cut -d: -f 3 )
114 | break
115 | ;;
116 | esac
117 | ;;
118 | esac
119 | done << __EOF__
120 | $( grep -F -- 'cpu' /proc/self/cgroup )
121 | __EOF__
122 |
123 | case "${found%% *}" in
124 | "/")
125 | foundroot="${found##* }$mountpoint"
126 | ;;
127 | "$mountpoint")
128 | foundroot="${found##* }"
129 | ;;
130 | esac
131 | echo "$foundroot"
132 | }
133 |
134 | get_cgroup_v2_path() {
135 | found=
136 | foundroot=
137 | mountpoint=
138 |
139 | [ -r "/proc/self/mountinfo" ] || return 1
140 | [ -r "/proc/self/cgroup" ] || return 1
141 |
142 | while IFS= read -r line; do
143 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
144 | done << __EOF__
145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
146 | __EOF__
147 |
148 | while IFS= read -r line; do
149 | mountpoint=$( echo "$line" | cut -d: -f 3 )
150 | done << __EOF__
151 | $( grep -F -- '0::' /proc/self/cgroup )
152 | __EOF__
153 |
154 | case "${found%% *}" in
155 | "")
156 | return 1
157 | ;;
158 | "/")
159 | foundroot="${found##* }$mountpoint"
160 | ;;
161 | "$mountpoint" | /../*)
162 | foundroot="${found##* }"
163 | ;;
164 | esac
165 | echo "$foundroot"
166 | }
167 |
168 | ncpu_online=$( getconf _NPROCESSORS_ONLN )
169 | ncpu_cpuset=
170 | ncpu_quota=
171 | ncpu_cpuset_v2=
172 | ncpu_quota_v2=
173 |
174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
178 |
179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
180 | "$ncpu_online" \
181 | "$ncpu_cpuset" \
182 | "$ncpu_quota" \
183 | "$ncpu_cpuset_v2" \
184 | "$ncpu_quota_v2" \
185 | | sort -n \
186 | | head -n 1 )
187 |
188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
189 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/30-tune-worker-processes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | ME=$(basename "$0")
8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
9 |
10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
11 |
12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
13 |
14 | ceildiv() {
15 | num=$1
16 | div=$2
17 | echo $(( (num + div - 1) / div ))
18 | }
19 |
20 | get_cpuset() {
21 | cpusetroot=$1
22 | cpusetfile=$2
23 | ncpu=0
24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1
25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
26 | case "$token" in
27 | *-*)
28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
29 | ncpu=$(( ncpu+count ))
30 | ;;
31 | *)
32 | ncpu=$(( ncpu+1 ))
33 | ;;
34 | esac
35 | done
36 | echo "$ncpu"
37 | }
38 |
39 | get_quota() {
40 | cpuroot=$1
41 | ncpu=0
42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
46 | [ "$cfs_quota" = "-1" ] && return 1
47 | [ "$cfs_period" = "0" ] && return 1
48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
49 | [ "$ncpu" -gt 0 ] || return 1
50 | echo "$ncpu"
51 | }
52 |
53 | get_quota_v2() {
54 | cpuroot=$1
55 | ncpu=0
56 | [ -f "$cpuroot/cpu.max" ] || return 1
57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
59 | [ "$cfs_quota" = "max" ] && return 1
60 | [ "$cfs_period" = "0" ] && return 1
61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
62 | [ "$ncpu" -gt 0 ] || return 1
63 | echo "$ncpu"
64 | }
65 |
66 | get_cgroup_v1_path() {
67 | needle=$1
68 | found=
69 | foundroot=
70 | mountpoint=
71 |
72 | [ -r "/proc/self/mountinfo" ] || return 1
73 | [ -r "/proc/self/cgroup" ] || return 1
74 |
75 | while IFS= read -r line; do
76 | case "$needle" in
77 | "cpuset")
78 | case "$line" in
79 | *cpuset*)
80 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
81 | break
82 | ;;
83 | esac
84 | ;;
85 | "cpu")
86 | case "$line" in
87 | *cpuset*)
88 | ;;
89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
90 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
91 | break
92 | ;;
93 | esac
94 | esac
95 | done << __EOF__
96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo )
97 | __EOF__
98 |
99 | while IFS= read -r line; do
100 | controller=$( echo "$line" | cut -d: -f 2 )
101 | case "$needle" in
102 | "cpuset")
103 | case "$controller" in
104 | cpuset)
105 | mountpoint=$( echo "$line" | cut -d: -f 3 )
106 | break
107 | ;;
108 | esac
109 | ;;
110 | "cpu")
111 | case "$controller" in
112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
113 | mountpoint=$( echo "$line" | cut -d: -f 3 )
114 | break
115 | ;;
116 | esac
117 | ;;
118 | esac
119 | done << __EOF__
120 | $( grep -F -- 'cpu' /proc/self/cgroup )
121 | __EOF__
122 |
123 | case "${found%% *}" in
124 | "/")
125 | foundroot="${found##* }$mountpoint"
126 | ;;
127 | "$mountpoint")
128 | foundroot="${found##* }"
129 | ;;
130 | esac
131 | echo "$foundroot"
132 | }
133 |
134 | get_cgroup_v2_path() {
135 | found=
136 | foundroot=
137 | mountpoint=
138 |
139 | [ -r "/proc/self/mountinfo" ] || return 1
140 | [ -r "/proc/self/cgroup" ] || return 1
141 |
142 | while IFS= read -r line; do
143 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
144 | done << __EOF__
145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
146 | __EOF__
147 |
148 | while IFS= read -r line; do
149 | mountpoint=$( echo "$line" | cut -d: -f 3 )
150 | done << __EOF__
151 | $( grep -F -- '0::' /proc/self/cgroup )
152 | __EOF__
153 |
154 | case "${found%% *}" in
155 | "")
156 | return 1
157 | ;;
158 | "/")
159 | foundroot="${found##* }$mountpoint"
160 | ;;
161 | "$mountpoint" | /../*)
162 | foundroot="${found##* }"
163 | ;;
164 | esac
165 | echo "$foundroot"
166 | }
167 |
168 | ncpu_online=$( getconf _NPROCESSORS_ONLN )
169 | ncpu_cpuset=
170 | ncpu_quota=
171 | ncpu_cpuset_v2=
172 | ncpu_quota_v2=
173 |
174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
178 |
179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
180 | "$ncpu_online" \
181 | "$ncpu_cpuset" \
182 | "$ncpu_quota" \
183 | "$ncpu_cpuset_v2" \
184 | "$ncpu_quota_v2" \
185 | | sort -n \
186 | | head -n 1 )
187 |
188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
189 |
--------------------------------------------------------------------------------
/stable/alpine-slim/30-tune-worker-processes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | ME=$(basename "$0")
8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
9 |
10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
11 |
12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
13 |
14 | ceildiv() {
15 | num=$1
16 | div=$2
17 | echo $(( (num + div - 1) / div ))
18 | }
19 |
20 | get_cpuset() {
21 | cpusetroot=$1
22 | cpusetfile=$2
23 | ncpu=0
24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1
25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
26 | case "$token" in
27 | *-*)
28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
29 | ncpu=$(( ncpu+count ))
30 | ;;
31 | *)
32 | ncpu=$(( ncpu+1 ))
33 | ;;
34 | esac
35 | done
36 | echo "$ncpu"
37 | }
38 |
39 | get_quota() {
40 | cpuroot=$1
41 | ncpu=0
42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
46 | [ "$cfs_quota" = "-1" ] && return 1
47 | [ "$cfs_period" = "0" ] && return 1
48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
49 | [ "$ncpu" -gt 0 ] || return 1
50 | echo "$ncpu"
51 | }
52 |
53 | get_quota_v2() {
54 | cpuroot=$1
55 | ncpu=0
56 | [ -f "$cpuroot/cpu.max" ] || return 1
57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
59 | [ "$cfs_quota" = "max" ] && return 1
60 | [ "$cfs_period" = "0" ] && return 1
61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
62 | [ "$ncpu" -gt 0 ] || return 1
63 | echo "$ncpu"
64 | }
65 |
66 | get_cgroup_v1_path() {
67 | needle=$1
68 | found=
69 | foundroot=
70 | mountpoint=
71 |
72 | [ -r "/proc/self/mountinfo" ] || return 1
73 | [ -r "/proc/self/cgroup" ] || return 1
74 |
75 | while IFS= read -r line; do
76 | case "$needle" in
77 | "cpuset")
78 | case "$line" in
79 | *cpuset*)
80 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
81 | break
82 | ;;
83 | esac
84 | ;;
85 | "cpu")
86 | case "$line" in
87 | *cpuset*)
88 | ;;
89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
90 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
91 | break
92 | ;;
93 | esac
94 | esac
95 | done << __EOF__
96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo )
97 | __EOF__
98 |
99 | while IFS= read -r line; do
100 | controller=$( echo "$line" | cut -d: -f 2 )
101 | case "$needle" in
102 | "cpuset")
103 | case "$controller" in
104 | cpuset)
105 | mountpoint=$( echo "$line" | cut -d: -f 3 )
106 | break
107 | ;;
108 | esac
109 | ;;
110 | "cpu")
111 | case "$controller" in
112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
113 | mountpoint=$( echo "$line" | cut -d: -f 3 )
114 | break
115 | ;;
116 | esac
117 | ;;
118 | esac
119 | done << __EOF__
120 | $( grep -F -- 'cpu' /proc/self/cgroup )
121 | __EOF__
122 |
123 | case "${found%% *}" in
124 | "/")
125 | foundroot="${found##* }$mountpoint"
126 | ;;
127 | "$mountpoint")
128 | foundroot="${found##* }"
129 | ;;
130 | esac
131 | echo "$foundroot"
132 | }
133 |
134 | get_cgroup_v2_path() {
135 | found=
136 | foundroot=
137 | mountpoint=
138 |
139 | [ -r "/proc/self/mountinfo" ] || return 1
140 | [ -r "/proc/self/cgroup" ] || return 1
141 |
142 | while IFS= read -r line; do
143 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
144 | done << __EOF__
145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
146 | __EOF__
147 |
148 | while IFS= read -r line; do
149 | mountpoint=$( echo "$line" | cut -d: -f 3 )
150 | done << __EOF__
151 | $( grep -F -- '0::' /proc/self/cgroup )
152 | __EOF__
153 |
154 | case "${found%% *}" in
155 | "")
156 | return 1
157 | ;;
158 | "/")
159 | foundroot="${found##* }$mountpoint"
160 | ;;
161 | "$mountpoint" | /../*)
162 | foundroot="${found##* }"
163 | ;;
164 | esac
165 | echo "$foundroot"
166 | }
167 |
168 | ncpu_online=$( getconf _NPROCESSORS_ONLN )
169 | ncpu_cpuset=
170 | ncpu_quota=
171 | ncpu_cpuset_v2=
172 | ncpu_quota_v2=
173 |
174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
178 |
179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
180 | "$ncpu_online" \
181 | "$ncpu_cpuset" \
182 | "$ncpu_quota" \
183 | "$ncpu_cpuset_v2" \
184 | "$ncpu_quota_v2" \
185 | | sort -n \
186 | | head -n 1 )
187 |
188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
189 |
--------------------------------------------------------------------------------
/Dockerfile-alpine-slim.template:
--------------------------------------------------------------------------------
1 | FROM alpine:%%ALPINE_VERSION%%
2 |
3 | LABEL maintainer="NGINX Docker Maintainers "
4 |
5 | ENV NGINX_VERSION %%NGINX_VERSION%%
6 | ENV PKG_RELEASE %%PKG_RELEASE%%
7 |
8 | RUN set -x \
9 | # create nginx user/group first, to be consistent throughout docker variants
10 | && addgroup -g 101 -S nginx \
11 | && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
12 | && apkArch="$(cat /etc/apk/arch)" \
13 | && nginxPackages="%%PACKAGES%%
14 | " \
15 | # install prerequisites for public key and pkg-oss checks
16 | && apk add --no-cache --virtual .checksum-deps \
17 | openssl \
18 | && case "$apkArch" in \
19 | x86_64|aarch64) \
20 | # arches officially built by upstream
21 | set -x \
22 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
23 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
24 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
25 | echo "key verification succeeded!"; \
26 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
27 | else \
28 | echo "key verification failed!"; \
29 | exit 1; \
30 | fi \
31 | && apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
32 | ;; \
33 | *) \
34 | # we're on an architecture upstream doesn't officially build for
35 | # let's build binaries from the published packaging sources
36 | set -x \
37 | && tempDir="$(mktemp -d)" \
38 | && chown nobody:nobody $tempDir \
39 | && apk add --no-cache --virtual .build-deps \
40 | gcc \
41 | libc-dev \
42 | make \
43 | openssl-dev \
44 | pcre2-dev \
45 | zlib-dev \
46 | linux-headers \
47 | bash \
48 | alpine-sdk \
49 | findutils \
50 | && su nobody -s /bin/sh -c " \
51 | export HOME=${tempDir} \
52 | && cd ${tempDir} \
53 | && curl -f -O https://hg.nginx.org/pkg-oss/archive/%%REVISION%%.tar.gz \
54 | && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
55 | && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
56 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
57 | else \
58 | echo \"pkg-oss tarball checksum verification failed!\"; \
59 | exit 1; \
60 | fi \
61 | && tar xzvf %%REVISION%%.tar.gz \
62 | && cd pkg-oss-%%REVISION%% \
63 | && cd alpine \
64 | && make %%BUILDTARGET%% \
65 | && apk index -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
66 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
67 | " \
68 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
69 | && apk del --no-network .build-deps \
70 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
71 | ;; \
72 | esac \
73 | # remove checksum deps
74 | && apk del --no-network .checksum-deps \
75 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
76 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
77 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
78 | && if [ -f "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi \
79 | # Bring in gettext so we can get `envsubst`, then throw
80 | # the rest away. To do this, we need to install `gettext`
81 | # then move `envsubst` out of the way so `gettext` can
82 | # be deleted completely, then move `envsubst` back.
83 | && apk add --no-cache --virtual .gettext gettext \
84 | && mv /usr/bin/envsubst /tmp/ \
85 | \
86 | && runDeps="$( \
87 | scanelf --needed --nobanner /tmp/envsubst \
88 | | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
89 | | sort -u \
90 | | xargs -r apk info --installed \
91 | | sort -u \
92 | )" \
93 | && apk add --no-cache $runDeps \
94 | && apk del --no-network .gettext \
95 | && mv /tmp/envsubst /usr/local/bin/ \
96 | # Bring in tzdata so users could set the timezones through the environment
97 | # variables
98 | && apk add --no-cache tzdata \
99 | # forward request and error logs to docker log collector
100 | && ln -sf /dev/stdout /var/log/nginx/access.log \
101 | && ln -sf /dev/stderr /var/log/nginx/error.log \
102 | # create a docker-entrypoint.d directory
103 | && mkdir /docker-entrypoint.d
104 |
105 | COPY docker-entrypoint.sh /
106 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
107 | COPY 15-local-resolvers.envsh /docker-entrypoint.d
108 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
109 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d
110 | ENTRYPOINT ["/docker-entrypoint.sh"]
111 |
112 | EXPOSE 80
113 |
114 | STOPSIGNAL SIGQUIT
115 |
116 | CMD ["nginx", "-g", "daemon off;"]
117 |
--------------------------------------------------------------------------------
/Dockerfile-debian.template:
--------------------------------------------------------------------------------
1 | FROM debian:%%DEBIAN_VERSION%%-slim
2 |
3 | LABEL maintainer="NGINX Docker Maintainers "
4 |
5 | ENV NGINX_VERSION %%NGINX_VERSION%%
6 | ENV NJS_VERSION %%NJS_VERSION%%
7 | ENV PKG_RELEASE %%PKG_RELEASE%%
8 |
9 | RUN set -x \
10 | # create nginx user/group first, to be consistent throughout docker variants
11 | && groupadd --system --gid 101 nginx \
12 | && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
13 | && apt-get update \
14 | && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
15 | && \
16 | NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
17 | NGINX_GPGKEY_PATH=/usr/share/keyrings/nginx-archive-keyring.gpg; \
18 | export GNUPGHOME="$(mktemp -d)"; \
19 | found=''; \
20 | for server in \
21 | hkp://keyserver.ubuntu.com:80 \
22 | pgp.mit.edu \
23 | ; do \
24 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
25 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
26 | done; \
27 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
28 | gpg1 --export "$NGINX_GPGKEY" > "$NGINX_GPGKEY_PATH" ; \
29 | rm -rf "$GNUPGHOME"; \
30 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
31 | && dpkgArch="$(dpkg --print-architecture)" \
32 | && nginxPackages="%%PACKAGES%%
33 | " \
34 | && case "$dpkgArch" in \
35 | amd64|arm64) \
36 | # arches officialy built by upstream
37 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
38 | && apt-get update \
39 | ;; \
40 | *) \
41 | # we're on an architecture upstream doesn't officially build for
42 | # let's build binaries from the published source packages
43 | echo "deb-src [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
44 | \
45 | # new directory for storing sources and .deb files
46 | && tempDir="$(mktemp -d)" \
47 | && chmod 777 "$tempDir" \
48 | # (777 to ensure APT's "_apt" user can access it too)
49 | \
50 | # save list of currently-installed packages so build dependencies can be cleanly removed later
51 | && savedAptMark="$(apt-mark showmanual)" \
52 | \
53 | # build .deb files from upstream's source packages (which are verified by apt-get)
54 | && apt-get update \
55 | && apt-get build-dep -y %%BUILDTARGET%% \
56 | && ( \
57 | cd "$tempDir" \
58 | && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
59 | apt-get source --compile %%BUILDTARGET%% \
60 | ) \
61 | # we don't remove APT lists here because they get re-downloaded and removed later
62 | \
63 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
64 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
65 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
66 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
67 | \
68 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
69 | && ls -lAFh "$tempDir" \
70 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
71 | && grep '^Package: ' "$tempDir/Packages" \
72 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
73 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
74 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
75 | # ...
76 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
77 | && apt-get -o Acquire::GzipIndexes=false update \
78 | ;; \
79 | esac \
80 | \
81 | && apt-get install --no-install-recommends --no-install-suggests -y \
82 | $nginxPackages \
83 | gettext-base \
84 | curl \
85 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
86 | \
87 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
88 | && if [ -n "$tempDir" ]; then \
89 | apt-get purge -y --auto-remove \
90 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
91 | fi \
92 | # forward request and error logs to docker log collector
93 | && ln -sf /dev/stdout /var/log/nginx/access.log \
94 | && ln -sf /dev/stderr /var/log/nginx/error.log \
95 | # create a docker-entrypoint.d directory
96 | && mkdir /docker-entrypoint.d
97 |
98 | COPY docker-entrypoint.sh /
99 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
100 | COPY 15-local-resolvers.envsh /docker-entrypoint.d
101 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
102 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d
103 | ENTRYPOINT ["/docker-entrypoint.sh"]
104 |
105 | EXPOSE 80
106 |
107 | STOPSIGNAL SIGQUIT
108 |
109 | CMD ["nginx", "-g", "daemon off;"]
110 |
--------------------------------------------------------------------------------
/stable/alpine-slim/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM alpine:3.18
7 |
8 | LABEL maintainer="NGINX Docker Maintainers "
9 |
10 | ENV NGINX_VERSION 1.24.0
11 | ENV PKG_RELEASE 1
12 |
13 | RUN set -x \
14 | # create nginx user/group first, to be consistent throughout docker variants
15 | && addgroup -g 101 -S nginx \
16 | && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
17 | && apkArch="$(cat /etc/apk/arch)" \
18 | && nginxPackages=" \
19 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
20 | " \
21 | # install prerequisites for public key and pkg-oss checks
22 | && apk add --no-cache --virtual .checksum-deps \
23 | openssl \
24 | && case "$apkArch" in \
25 | x86_64|aarch64) \
26 | # arches officially built by upstream
27 | set -x \
28 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
29 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
30 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
31 | echo "key verification succeeded!"; \
32 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
33 | else \
34 | echo "key verification failed!"; \
35 | exit 1; \
36 | fi \
37 | && apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
38 | ;; \
39 | *) \
40 | # we're on an architecture upstream doesn't officially build for
41 | # let's build binaries from the published packaging sources
42 | set -x \
43 | && tempDir="$(mktemp -d)" \
44 | && chown nobody:nobody $tempDir \
45 | && apk add --no-cache --virtual .build-deps \
46 | gcc \
47 | libc-dev \
48 | make \
49 | openssl-dev \
50 | pcre2-dev \
51 | zlib-dev \
52 | linux-headers \
53 | bash \
54 | alpine-sdk \
55 | findutils \
56 | && su nobody -s /bin/sh -c " \
57 | export HOME=${tempDir} \
58 | && cd ${tempDir} \
59 | && curl -f -O https://hg.nginx.org/pkg-oss/archive/e5d85b3424bb.tar.gz \
60 | && PKGOSSCHECKSUM=\"4f33347bf05e7d7dd42a52b6e7af7ec21e3ed71df05a8ec16dd1228425f04e4318d88b1340370ccb6ad02cde590fc102094ddffbb1fc86d2085295a43f02f67b *e5d85b3424bb.tar.gz\" \
61 | && if [ \"\$(openssl sha512 -r e5d85b3424bb.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
62 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
63 | else \
64 | echo \"pkg-oss tarball checksum verification failed!\"; \
65 | exit 1; \
66 | fi \
67 | && tar xzvf e5d85b3424bb.tar.gz \
68 | && cd pkg-oss-e5d85b3424bb \
69 | && cd alpine \
70 | && make base \
71 | && apk index -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
72 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
73 | " \
74 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
75 | && apk del --no-network .build-deps \
76 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
77 | ;; \
78 | esac \
79 | # remove checksum deps
80 | && apk del --no-network .checksum-deps \
81 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
82 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
83 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
84 | && if [ -f "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi \
85 | # Bring in gettext so we can get `envsubst`, then throw
86 | # the rest away. To do this, we need to install `gettext`
87 | # then move `envsubst` out of the way so `gettext` can
88 | # be deleted completely, then move `envsubst` back.
89 | && apk add --no-cache --virtual .gettext gettext \
90 | && mv /usr/bin/envsubst /tmp/ \
91 | \
92 | && runDeps="$( \
93 | scanelf --needed --nobanner /tmp/envsubst \
94 | | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
95 | | sort -u \
96 | | xargs -r apk info --installed \
97 | | sort -u \
98 | )" \
99 | && apk add --no-cache $runDeps \
100 | && apk del --no-network .gettext \
101 | && mv /tmp/envsubst /usr/local/bin/ \
102 | # Bring in tzdata so users could set the timezones through the environment
103 | # variables
104 | && apk add --no-cache tzdata \
105 | # forward request and error logs to docker log collector
106 | && ln -sf /dev/stdout /var/log/nginx/access.log \
107 | && ln -sf /dev/stderr /var/log/nginx/error.log \
108 | # create a docker-entrypoint.d directory
109 | && mkdir /docker-entrypoint.d
110 |
111 | COPY docker-entrypoint.sh /
112 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
113 | COPY 15-local-resolvers.envsh /docker-entrypoint.d
114 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
115 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d
116 | ENTRYPOINT ["/docker-entrypoint.sh"]
117 |
118 | EXPOSE 80
119 |
120 | STOPSIGNAL SIGQUIT
121 |
122 | CMD ["nginx", "-g", "daemon off;"]
123 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM alpine:3.18
7 |
8 | LABEL maintainer="NGINX Docker Maintainers "
9 |
10 | ENV NGINX_VERSION 1.25.3
11 | ENV PKG_RELEASE 1
12 |
13 | RUN set -x \
14 | # create nginx user/group first, to be consistent throughout docker variants
15 | && addgroup -g 101 -S nginx \
16 | && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
17 | && apkArch="$(cat /etc/apk/arch)" \
18 | && nginxPackages=" \
19 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
20 | " \
21 | # install prerequisites for public key and pkg-oss checks
22 | && apk add --no-cache --virtual .checksum-deps \
23 | openssl \
24 | && case "$apkArch" in \
25 | x86_64|aarch64) \
26 | # arches officially built by upstream
27 | set -x \
28 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
29 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
30 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
31 | echo "key verification succeeded!"; \
32 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
33 | else \
34 | echo "key verification failed!"; \
35 | exit 1; \
36 | fi \
37 | && apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
38 | ;; \
39 | *) \
40 | # we're on an architecture upstream doesn't officially build for
41 | # let's build binaries from the published packaging sources
42 | set -x \
43 | && tempDir="$(mktemp -d)" \
44 | && chown nobody:nobody $tempDir \
45 | && apk add --no-cache --virtual .build-deps \
46 | gcc \
47 | libc-dev \
48 | make \
49 | openssl-dev \
50 | pcre2-dev \
51 | zlib-dev \
52 | linux-headers \
53 | bash \
54 | alpine-sdk \
55 | findutils \
56 | && su nobody -s /bin/sh -c " \
57 | export HOME=${tempDir} \
58 | && cd ${tempDir} \
59 | && curl -f -O https://hg.nginx.org/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
60 | && PKGOSSCHECKSUM=\"00b217979265cc9d66c991c9c89427558936dbaa568d175ca45780589171d94f1866217be09a83438d95494cf38baaa6788320f6d8d23f2fb29c03117391ff88 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
61 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
62 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
63 | else \
64 | echo \"pkg-oss tarball checksum verification failed!\"; \
65 | exit 1; \
66 | fi \
67 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
68 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
69 | && cd alpine \
70 | && make base \
71 | && apk index -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
72 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
73 | " \
74 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
75 | && apk del --no-network .build-deps \
76 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
77 | ;; \
78 | esac \
79 | # remove checksum deps
80 | && apk del --no-network .checksum-deps \
81 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
82 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
83 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
84 | && if [ -f "/etc/apk/keys/nginx_signing.rsa.pub" ]; then rm -f /etc/apk/keys/nginx_signing.rsa.pub; fi \
85 | # Bring in gettext so we can get `envsubst`, then throw
86 | # the rest away. To do this, we need to install `gettext`
87 | # then move `envsubst` out of the way so `gettext` can
88 | # be deleted completely, then move `envsubst` back.
89 | && apk add --no-cache --virtual .gettext gettext \
90 | && mv /usr/bin/envsubst /tmp/ \
91 | \
92 | && runDeps="$( \
93 | scanelf --needed --nobanner /tmp/envsubst \
94 | | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
95 | | sort -u \
96 | | xargs -r apk info --installed \
97 | | sort -u \
98 | )" \
99 | && apk add --no-cache $runDeps \
100 | && apk del --no-network .gettext \
101 | && mv /tmp/envsubst /usr/local/bin/ \
102 | # Bring in tzdata so users could set the timezones through the environment
103 | # variables
104 | && apk add --no-cache tzdata \
105 | # forward request and error logs to docker log collector
106 | && ln -sf /dev/stdout /var/log/nginx/access.log \
107 | && ln -sf /dev/stderr /var/log/nginx/error.log \
108 | # create a docker-entrypoint.d directory
109 | && mkdir /docker-entrypoint.d
110 |
111 | COPY docker-entrypoint.sh /
112 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
113 | COPY 15-local-resolvers.envsh /docker-entrypoint.d
114 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
115 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d
116 | ENTRYPOINT ["/docker-entrypoint.sh"]
117 |
118 | EXPOSE 80
119 |
120 | STOPSIGNAL SIGQUIT
121 |
122 | CMD ["nginx", "-g", "daemon off;"]
123 |
--------------------------------------------------------------------------------
/stable/debian/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM debian:bullseye-slim
7 |
8 | LABEL maintainer="NGINX Docker Maintainers "
9 |
10 | ENV NGINX_VERSION 1.24.0
11 | ENV NJS_VERSION 0.8.0
12 | ENV PKG_RELEASE 1~bullseye
13 |
14 | RUN set -x \
15 | # create nginx user/group first, to be consistent throughout docker variants
16 | && groupadd --system --gid 101 nginx \
17 | && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
18 | && apt-get update \
19 | && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
20 | && \
21 | NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
22 | NGINX_GPGKEY_PATH=/usr/share/keyrings/nginx-archive-keyring.gpg; \
23 | export GNUPGHOME="$(mktemp -d)"; \
24 | found=''; \
25 | for server in \
26 | hkp://keyserver.ubuntu.com:80 \
27 | pgp.mit.edu \
28 | ; do \
29 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
30 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
31 | done; \
32 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
33 | gpg1 --export "$NGINX_GPGKEY" > "$NGINX_GPGKEY_PATH" ; \
34 | rm -rf "$GNUPGHOME"; \
35 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
36 | && dpkgArch="$(dpkg --print-architecture)" \
37 | && nginxPackages=" \
38 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \
39 | nginx-module-xslt=${NGINX_VERSION}-${PKG_RELEASE} \
40 | nginx-module-geoip=${NGINX_VERSION}-${PKG_RELEASE} \
41 | nginx-module-image-filter=${NGINX_VERSION}-${PKG_RELEASE} \
42 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${PKG_RELEASE} \
43 | " \
44 | && case "$dpkgArch" in \
45 | amd64|arm64) \
46 | # arches officialy built by upstream
47 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bullseye nginx" >> /etc/apt/sources.list.d/nginx.list \
48 | && apt-get update \
49 | ;; \
50 | *) \
51 | # we're on an architecture upstream doesn't officially build for
52 | # let's build binaries from the published source packages
53 | echo "deb-src [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bullseye nginx" >> /etc/apt/sources.list.d/nginx.list \
54 | \
55 | # new directory for storing sources and .deb files
56 | && tempDir="$(mktemp -d)" \
57 | && chmod 777 "$tempDir" \
58 | # (777 to ensure APT's "_apt" user can access it too)
59 | \
60 | # save list of currently-installed packages so build dependencies can be cleanly removed later
61 | && savedAptMark="$(apt-mark showmanual)" \
62 | \
63 | # build .deb files from upstream's source packages (which are verified by apt-get)
64 | && apt-get update \
65 | && apt-get build-dep -y $nginxPackages \
66 | && ( \
67 | cd "$tempDir" \
68 | && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
69 | apt-get source --compile $nginxPackages \
70 | ) \
71 | # we don't remove APT lists here because they get re-downloaded and removed later
72 | \
73 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
74 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
75 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
76 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
77 | \
78 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
79 | && ls -lAFh "$tempDir" \
80 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
81 | && grep '^Package: ' "$tempDir/Packages" \
82 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
83 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
84 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
85 | # ...
86 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
87 | && apt-get -o Acquire::GzipIndexes=false update \
88 | ;; \
89 | esac \
90 | \
91 | && apt-get install --no-install-recommends --no-install-suggests -y \
92 | $nginxPackages \
93 | gettext-base \
94 | curl \
95 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
96 | \
97 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
98 | && if [ -n "$tempDir" ]; then \
99 | apt-get purge -y --auto-remove \
100 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
101 | fi \
102 | # forward request and error logs to docker log collector
103 | && ln -sf /dev/stdout /var/log/nginx/access.log \
104 | && ln -sf /dev/stderr /var/log/nginx/error.log \
105 | # create a docker-entrypoint.d directory
106 | && mkdir /docker-entrypoint.d
107 |
108 | COPY docker-entrypoint.sh /
109 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
110 | COPY 15-local-resolvers.envsh /docker-entrypoint.d
111 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
112 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d
113 | ENTRYPOINT ["/docker-entrypoint.sh"]
114 |
115 | EXPOSE 80
116 |
117 | STOPSIGNAL SIGQUIT
118 |
119 | CMD ["nginx", "-g", "daemon off;"]
120 |
--------------------------------------------------------------------------------
/mainline/debian/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM debian:bookworm-slim
7 |
8 | LABEL maintainer="NGINX Docker Maintainers "
9 |
10 | ENV NGINX_VERSION 1.25.3
11 | ENV NJS_VERSION 0.8.2
12 | ENV PKG_RELEASE 1~bookworm
13 |
14 | RUN set -x \
15 | # create nginx user/group first, to be consistent throughout docker variants
16 | && groupadd --system --gid 101 nginx \
17 | && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
18 | && apt-get update \
19 | && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
20 | && \
21 | NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \
22 | NGINX_GPGKEY_PATH=/usr/share/keyrings/nginx-archive-keyring.gpg; \
23 | export GNUPGHOME="$(mktemp -d)"; \
24 | found=''; \
25 | for server in \
26 | hkp://keyserver.ubuntu.com:80 \
27 | pgp.mit.edu \
28 | ; do \
29 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
30 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
31 | done; \
32 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
33 | gpg1 --export "$NGINX_GPGKEY" > "$NGINX_GPGKEY_PATH" ; \
34 | rm -rf "$GNUPGHOME"; \
35 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
36 | && dpkgArch="$(dpkg --print-architecture)" \
37 | && nginxPackages=" \
38 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \
39 | nginx-module-xslt=${NGINX_VERSION}-${PKG_RELEASE} \
40 | nginx-module-geoip=${NGINX_VERSION}-${PKG_RELEASE} \
41 | nginx-module-image-filter=${NGINX_VERSION}-${PKG_RELEASE} \
42 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${PKG_RELEASE} \
43 | " \
44 | && case "$dpkgArch" in \
45 | amd64|arm64) \
46 | # arches officialy built by upstream
47 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
48 | && apt-get update \
49 | ;; \
50 | *) \
51 | # we're on an architecture upstream doesn't officially build for
52 | # let's build binaries from the published source packages
53 | echo "deb-src [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
54 | \
55 | # new directory for storing sources and .deb files
56 | && tempDir="$(mktemp -d)" \
57 | && chmod 777 "$tempDir" \
58 | # (777 to ensure APT's "_apt" user can access it too)
59 | \
60 | # save list of currently-installed packages so build dependencies can be cleanly removed later
61 | && savedAptMark="$(apt-mark showmanual)" \
62 | \
63 | # build .deb files from upstream's source packages (which are verified by apt-get)
64 | && apt-get update \
65 | && apt-get build-dep -y $nginxPackages \
66 | && ( \
67 | cd "$tempDir" \
68 | && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \
69 | apt-get source --compile $nginxPackages \
70 | ) \
71 | # we don't remove APT lists here because they get re-downloaded and removed later
72 | \
73 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
74 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
75 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
76 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
77 | \
78 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
79 | && ls -lAFh "$tempDir" \
80 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
81 | && grep '^Package: ' "$tempDir/Packages" \
82 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
83 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
84 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
85 | # ...
86 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
87 | && apt-get -o Acquire::GzipIndexes=false update \
88 | ;; \
89 | esac \
90 | \
91 | && apt-get install --no-install-recommends --no-install-suggests -y \
92 | $nginxPackages \
93 | gettext-base \
94 | curl \
95 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
96 | \
97 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
98 | && if [ -n "$tempDir" ]; then \
99 | apt-get purge -y --auto-remove \
100 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
101 | fi \
102 | # forward request and error logs to docker log collector
103 | && ln -sf /dev/stdout /var/log/nginx/access.log \
104 | && ln -sf /dev/stderr /var/log/nginx/error.log \
105 | # create a docker-entrypoint.d directory
106 | && mkdir /docker-entrypoint.d
107 |
108 | COPY docker-entrypoint.sh /
109 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
110 | COPY 15-local-resolvers.envsh /docker-entrypoint.d
111 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
112 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d
113 | ENTRYPOINT ["/docker-entrypoint.sh"]
114 |
115 | EXPOSE 80
116 |
117 | STOPSIGNAL SIGQUIT
118 |
119 | CMD ["nginx", "-g", "daemon off;"]
120 |
--------------------------------------------------------------------------------
/update.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -Eeuo pipefail
3 | shopt -s nullglob
4 |
5 | cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
6 |
7 | declare branches=(
8 | "stable"
9 | "mainline"
10 | )
11 |
12 | # Current nginx versions
13 | # Remember to update pkgosschecksum when changing this.
14 | declare -A nginx=(
15 | [mainline]='1.25.3'
16 | [stable]='1.24.0'
17 | )
18 |
19 | # Current njs versions
20 | declare -A njs=(
21 | [mainline]='0.8.2'
22 | [stable]='0.8.0'
23 | )
24 |
25 | # Current package patchlevel version
26 | # Remember to update pkgosschecksum when changing this.
27 | declare -A pkg=(
28 | [mainline]=1
29 | [stable]=1
30 | )
31 |
32 | declare -A debian=(
33 | [mainline]='bookworm'
34 | [stable]='bullseye'
35 | )
36 |
37 | declare -A alpine=(
38 | [mainline]='3.18'
39 | [stable]='3.18'
40 | )
41 |
42 | # When we bump njs version in a stable release we don't move the tag in the
43 | # mercurial repo. This setting allows us to specify a revision to check out
44 | # when building alpine packages on architectures not supported by nginx.org
45 | # Remember to update pkgosschecksum when changing this.
46 | declare -A rev=(
47 | [mainline]='${NGINX_VERSION}-${PKG_RELEASE}'
48 | [stable]='e5d85b3424bb'
49 | )
50 |
51 | # Holds SHA512 checksum for the pkg-oss tarball produced by source code
52 | # revision/tag in the previous block
53 | # Used in alpine builds for architectures not packaged by nginx.org
54 | declare -A pkgosschecksum=(
55 | [mainline]='00b217979265cc9d66c991c9c89427558936dbaa568d175ca45780589171d94f1866217be09a83438d95494cf38baaa6788320f6d8d23f2fb29c03117391ff88'
56 | [stable]='4f33347bf05e7d7dd42a52b6e7af7ec21e3ed71df05a8ec16dd1228425f04e4318d88b1340370ccb6ad02cde590fc102094ddffbb1fc86d2085295a43f02f67b'
57 | )
58 |
59 | get_packages() {
60 | local distro="$1"
61 | shift
62 | local branch="$1"
63 | shift
64 | local perl=
65 | local r=
66 | local sep=
67 |
68 | case "$distro:$branch" in
69 | alpine*:*)
70 | r="r"
71 | sep="."
72 | ;;
73 | debian*:*)
74 | sep="+"
75 | ;;
76 | esac
77 |
78 | case "$distro" in
79 | *-perl)
80 | perl="nginx-module-perl"
81 | ;;
82 | esac
83 |
84 | echo -n ' \\\n'
85 | case "$distro" in
86 | *-slim)
87 | for p in nginx; do
88 | echo -n ' '"$p"'=${NGINX_VERSION}-'"$r"'${PKG_RELEASE} \\'
89 | done
90 | ;;
91 | *)
92 | for p in nginx nginx-module-xslt nginx-module-geoip nginx-module-image-filter $perl; do
93 | echo -n ' '"$p"'=${NGINX_VERSION}-'"$r"'${PKG_RELEASE} \\\n'
94 | done
95 | for p in nginx-module-njs; do
96 | echo -n ' '"$p"'=${NGINX_VERSION}'"$sep"'${NJS_VERSION}-'"$r"'${PKG_RELEASE} \\'
97 | done
98 | ;;
99 | esac
100 | }
101 |
102 | get_packagerepo() {
103 | local distro="${1%-perl}"
104 | distro="${distro%-slim}"
105 | shift
106 | local branch="$1"
107 | shift
108 |
109 | [ "$branch" = "mainline" ] && branch="$branch/" || branch=""
110 |
111 | echo "https://nginx.org/packages/${branch}${distro}/"
112 | }
113 |
114 | get_packagever() {
115 | local distro="${1%-perl}"
116 | shift
117 | local branch="$1"
118 | shift
119 | local suffix=
120 |
121 | [ "${distro}" = "debian" ] && suffix="~${debianver}"
122 |
123 | echo ${pkg[$branch]}${suffix}
124 | }
125 |
126 | get_buildtarget() {
127 | local distro="$1"
128 | case "$distro" in
129 | alpine-slim)
130 | echo base
131 | ;;
132 | alpine-perl)
133 | echo module-perl
134 | ;;
135 | alpine)
136 | echo module-geoip module-image-filter module-njs module-xslt
137 | ;;
138 | debian)
139 | echo "\$nginxPackages"
140 | ;;
141 | debian-perl)
142 | echo "nginx-module-perl=\${NGINX_VERSION}-\${PKG_RELEASE}"
143 | ;;
144 | esac
145 | }
146 |
147 | generated_warning() {
148 | cat <<__EOF__
149 | #
150 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
151 | #
152 | # PLEASE DO NOT EDIT IT DIRECTLY.
153 | #
154 | __EOF__
155 | }
156 |
157 | for branch in "${branches[@]}"; do
158 | for variant in \
159 | alpine{,-perl,-slim} \
160 | debian{,-perl}; do
161 | echo "$branch: $variant dockerfiles"
162 | dir="$branch/$variant"
163 | variant="$(basename "$variant")"
164 |
165 | [ -d "$dir" ] || continue
166 |
167 | template="Dockerfile-${variant}.template"
168 | {
169 | generated_warning
170 | cat "$template"
171 | } >"$dir/Dockerfile"
172 |
173 | debianver="${debian[$branch]}"
174 | alpinever="${alpine[$branch]}"
175 | nginxver="${nginx[$branch]}"
176 | njsver="${njs[${branch}]}"
177 | revver="${rev[${branch}]}"
178 | pkgosschecksumver="${pkgosschecksum[${branch}]}"
179 |
180 | packagerepo=$(get_packagerepo "$variant" "$branch")
181 | packages=$(get_packages "$variant" "$branch")
182 | packagever=$(get_packagever "$variant" "$branch")
183 | buildtarget=$(get_buildtarget "$variant")
184 |
185 | sed -i.bak \
186 | -e 's,%%ALPINE_VERSION%%,'"$alpinever"',' \
187 | -e 's,%%DEBIAN_VERSION%%,'"$debianver"',' \
188 | -e 's,%%NGINX_VERSION%%,'"$nginxver"',' \
189 | -e 's,%%NJS_VERSION%%,'"$njsver"',' \
190 | -e 's,%%PKG_RELEASE%%,'"$packagever"',' \
191 | -e 's,%%PACKAGES%%,'"$packages"',' \
192 | -e 's,%%PACKAGEREPO%%,'"$packagerepo"',' \
193 | -e 's,%%REVISION%%,'"$revver"',' \
194 | -e 's,%%PKGOSSCHECKSUM%%,'"$pkgosschecksumver"',' \
195 | -e 's,%%BUILDTARGET%%,'"$buildtarget"',' \
196 | "$dir/Dockerfile"
197 |
198 | done
199 |
200 | for variant in \
201 | alpine-slim \
202 | debian; do \
203 | echo "$branch: $variant entrypoint scripts"
204 | dir="$branch/$variant"
205 | cp -a entrypoint/*.sh "$dir/"
206 | cp -a entrypoint/*.envsh "$dir/"
207 | done
208 | done
209 |
--------------------------------------------------------------------------------
/modules/README.md:
--------------------------------------------------------------------------------
1 | # Adding third-party modules to nginx official image
2 |
3 | It's possible to extend a mainline image with third-party modules either from
4 | your own instuctions following a simple filesystem layout/syntax using
5 | `build_module.sh` helper script, or falling back to package sources from
6 | [pkg-oss](https://hg.nginx.org/pkg-oss).
7 |
8 | ## Usage
9 |
10 | ```
11 | $ docker build --build-arg ENABLED_MODULES="ndk lua" -t my-nginx-with-lua .
12 | ```
13 | This command will attempt to build an image called `my-nginx-with-lua` based on
14 | official nginx docker hub image with two modules: `ndk` and `lua`.
15 | By default, a Debian-based image will be used. If you wish to use Alpine
16 | instead, add `-f Dockerfile.alpine` to the command line. By default, mainline
17 | images are used as a base, but it's possible to specify a different image by
18 | providing `NGINX_FROM_IMAGE` build argument, e.g. `--build-arg
19 | NGINX_FROM_IMAGE=nginx:stable`.
20 |
21 | The build script will look for module build definition files on filesystem
22 | directory under the same name as the module (and resulting package) and if
23 | those are not found will try to look up requested modules in the pkg-oss
24 | repository.
25 |
26 | For well-known modules we maintain a set of build sources packages over at
27 | `pkg-oss`, so it's probably a good idea to rely on those instead of providing
28 | your own implementation.
29 |
30 | As of the time of writing this README, the following modules and their versions
31 | are available from `pkg-oss` repository:
32 |
33 | ```
34 | /pkg-oss $ LC_ALL=C make -C debian list-all-modules
35 | make: Entering directory '/pkg-oss/debian'
36 | auth-spnego 1.1.1-1
37 | brotli 1.0.0-1
38 | encrypted-session 0.09-1
39 | fips-check 0.1-1
40 | geoip 1.25.1-1
41 | geoip2 3.4-1
42 | headers-more 0.34-1
43 | image-filter 1.25.1-1
44 | lua 0.10.25-1
45 | modsecurity 1.0.3-3
46 | ndk 0.3.2-1
47 | njs 0.8.0-1
48 | opentracing 0.29.0-1
49 | passenger 6.0.18-1
50 | perl 1.25.1-1
51 | rtmp 1.2.2-1
52 | set-misc 0.33-1
53 | subs-filter 0.6.4-1
54 | xslt 1.25.1-1
55 | make: Leaving directory '/pkg-oss/debian'
56 | ```
57 |
58 | If you still want to provide your own instructions for a specific module,
59 | organize the build directory in a following way, e.g. for `echo` module:
60 |
61 | ```
62 | docker-nginx/modules $ tree echo
63 | echo
64 | ├── build-deps
65 | ├── prebuild
66 | └── source
67 |
68 | 0 directories, 3 files
69 | ```
70 |
71 | The scripts expect one file to always exist for a module you wish to build
72 | manually: `source`. It should contain a link to a zip/tarball source code of a
73 | module you want to build. In `build-deps` you can specify build dependencies
74 | for a module as found in Debian or Alpine repositories. `prebuild` is a shell
75 | script (make it `chmod +x prebuild`!) that will be executed prior to building
76 | the module but after installing the dependencies, so it can be used to install
77 | additional build dependencies if they are not available from Debian or Alpine.
78 | Keep in mind that those dependencies wont be automatically copied to the
79 | resulting image and if you're building a library, build it statically.
80 |
81 | Once the build is done in the builder image, the built packages are copied over
82 | to resulting image and installed via apt/apk. The resulting image will be
83 | tagged and can be used the same way as an official docker hub image.
84 |
85 | Note that we can not provide any support for those modifications and in no way
86 | guarantee they will work as nice as a build without third-party modules. If
87 | you encounter any issues running your image with the modules enabled, please
88 | reproduce with a vanilla image first.
89 |
90 | ## Examples
91 |
92 | ### docker-compose with pre-packaged modules
93 |
94 | If desired modules are already packaged in
95 | [pkg-oss](https://hg.nginx.org/pkg-oss/) - e.g. `debian/Makefile.module-*`
96 | exists for a given module, you can use this example.
97 |
98 | 1. Create a directory for your project:
99 |
100 | ```
101 | mkdir myapp
102 | cd myapp
103 | ````
104 |
105 | 2. Populate the build context for a custom nginx image:
106 |
107 | ```
108 | mkdir my-nginx
109 | curl -o my-nginx/Dockerfile https://raw.githubusercontent.com/nginxinc/docker-nginx/master/modules/Dockerfile
110 | ```
111 |
112 | 3. Create a `docker-compose.yml` file:
113 |
114 | ```
115 | cat > docker-compose.yml << __EOF__
116 | version: "3.3"
117 | services:
118 | web:
119 | build:
120 | context: ./my-nginx/
121 | args:
122 | ENABLED_MODULES: ndk lua
123 | image: my-nginx-with-lua:v1
124 | ports:
125 | - "80:8000"
126 | __EOF__
127 | ```
128 |
129 | Now, running `docker-compose up --build -d` will build the image and run the application for you.
130 |
131 | ### docker-compose with a non-packaged module
132 |
133 | If a needed module is not available via `pkg-oss`, you can use this example.
134 |
135 | We're going to build the image with [ngx_cache_purge](https://github.com/FRiCKLE/ngx_cache_purge) module.
136 |
137 | The steps are similar to a previous example, with a notable difference of
138 | providing a URL to fetch the module source code from.
139 |
140 | 1. Create a directory for your project:
141 |
142 | ```
143 | mkdir myapp-cache
144 | cd myapp-cache
145 | ````
146 |
147 | 2. Populate the build context for a custom nginx image:
148 |
149 | ```
150 | mkdir my-nginx
151 | curl -o my-nginx/Dockerfile https://raw.githubusercontent.com/nginxinc/docker-nginx/master/modules/Dockerfile
152 | mkdir my-nginx/cachepurge
153 | echo "https://github.com/FRiCKLE/ngx_cache_purge/archive/2.3.tar.gz" > my-nginx/cachepurge/source
154 | ```
155 |
156 | 3. Create a `docker-compose.yml` file:
157 |
158 | ```
159 | cat > docker-compose.yml << __EOF__
160 | version: "3.3"
161 | services:
162 | web:
163 | build:
164 | context: ./my-nginx/
165 | args:
166 | ENABLED_MODULES: cachepurge
167 | image: my-nginx-with-cachepurge:v1
168 | ports:
169 | - "80:8080"
170 | __EOF__
171 | ```
172 |
173 | Now, running `docker-compose up --build -d` will build the image and run the application for you.
174 |
--------------------------------------------------------------------------------