├── .github
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ ├── bug_report.yml
│ ├── config.yml
│ └── feature_request.yml
├── pull_request_template.md
└── workflows
│ ├── ci.yml
│ ├── f5_cla.yml
│ └── sync.yml
├── .gitignore
├── .test
├── config.sh
└── tests
│ ├── ipv6
│ ├── expected-std-out.txt
│ └── run.sh
│ ├── modules
│ ├── nginx.conf.sme
│ └── run.sh
│ ├── static
│ └── run.sh
│ ├── templates-resolver-ipv6
│ ├── expected-std-out.txt
│ ├── run.sh
│ └── server.conf.template
│ ├── templates-resolver
│ ├── run.sh
│ └── server.conf.template
│ ├── templates
│ ├── run.sh
│ └── server.conf.template
│ └── workers
│ ├── expected-std-out.txt
│ ├── run.sh
│ └── server.conf.template
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Dockerfile-alpine-otel.template
├── Dockerfile-alpine-perl.template
├── Dockerfile-alpine-slim.template
├── Dockerfile-alpine.template
├── Dockerfile-debian-otel.template
├── Dockerfile-debian-perl.template
├── Dockerfile-debian.template
├── LICENSE
├── README.md
├── SECURITY.md
├── SUPPORT.md
├── entrypoint
├── 10-listen-on-ipv6-by-default.sh
├── 15-local-resolvers.envsh
├── 20-envsubst-on-templates.sh
├── 30-tune-worker-processes.sh
└── docker-entrypoint.sh
├── generate-stackbrew-library.sh
├── mainline
├── alpine-otel
│ └── Dockerfile
├── alpine-perl
│ └── Dockerfile
├── alpine-slim
│ ├── 10-listen-on-ipv6-by-default.sh
│ ├── 15-local-resolvers.envsh
│ ├── 20-envsubst-on-templates.sh
│ ├── 30-tune-worker-processes.sh
│ ├── Dockerfile
│ └── docker-entrypoint.sh
├── alpine
│ └── Dockerfile
├── debian-otel
│ └── Dockerfile
├── debian-perl
│ └── Dockerfile
└── debian
│ ├── 10-listen-on-ipv6-by-default.sh
│ ├── 15-local-resolvers.envsh
│ ├── 20-envsubst-on-templates.sh
│ ├── 30-tune-worker-processes.sh
│ ├── Dockerfile
│ └── docker-entrypoint.sh
├── modules
├── Dockerfile
├── Dockerfile.alpine
├── README.md
└── echo
│ ├── build-deps
│ ├── prebuild
│ └── source
├── stable
├── alpine-otel
│ └── Dockerfile
├── alpine-perl
│ └── Dockerfile
├── alpine-slim
│ ├── 10-listen-on-ipv6-by-default.sh
│ ├── 15-local-resolvers.envsh
│ ├── 20-envsubst-on-templates.sh
│ ├── 30-tune-worker-processes.sh
│ ├── Dockerfile
│ └── docker-entrypoint.sh
├── alpine
│ └── Dockerfile
├── debian-otel
│ └── Dockerfile
├── debian-perl
│ └── Dockerfile
└── debian
│ ├── 10-listen-on-ipv6-by-default.sh
│ ├── 15-local-resolvers.envsh
│ ├── 20-envsubst-on-templates.sh
│ ├── 30-tune-worker-processes.sh
│ ├── Dockerfile
│ └── docker-entrypoint.sh
├── sync-awsecr.sh
└── update.sh
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | #####################
2 | # Main global owner #
3 | #####################
4 |
5 | * @nginx/syseng
6 |
7 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: 🐛 Bug report
3 | description: Create a report to help us improve
4 | labels: bug
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thanks for taking the time to fill out this bug report!
10 |
11 | Before you continue filling out this report, please take a moment to check that your bug has not been [already reported on GitHub][issue search] 🙌
12 |
13 | Remember to redact any sensitive information such as authentication credentials and/or license keys!
14 |
15 | [issue search]: ../search?q=is%3Aissue&type=issues
16 |
17 | - type: textarea
18 | id: overview
19 | attributes:
20 | label: Bug Overview
21 | description: A clear and concise overview of the bug.
22 | placeholder: When I do "X" with the NGINX Docker image, "Y" happens instead of "Z".
23 | validations:
24 | required: true
25 |
26 | - type: textarea
27 | id: behavior
28 | attributes:
29 | label: Expected Behavior
30 | description: A clear and concise description of what you expected to happen.
31 | placeholder: When I do "X" with the NGINX Docker image, I expect "Z" to happen.
32 | validations:
33 | required: true
34 |
35 | - type: textarea
36 | id: steps
37 | attributes:
38 | label: Steps to Reproduce the Bug
39 | description: Detail the series of steps required to reproduce the bug.
40 | placeholder: When I run the Docker NGINX image using [...], the image fails with an error message. If I check the terminal outputs and/or logs, I see the following error info.
41 | validations:
42 | required: true
43 |
44 | - type: textarea
45 | id: environment
46 | attributes:
47 | label: Environment Details
48 | description: Please provide details about your environment.
49 | value: |
50 | - Version/release of Docker and method of installation (e.g. Docker Desktop / Docker Server)
51 | - Version of the Docker NGINX image or specific commit: [e.g. 1.4.3/commit hash]
52 | - Target deployment platform: [e.g. OpenShift/Kubernetes/Docker Compose/local cluster/etc...]
53 | - Target OS: [e.g. RHEL 9/Ubuntu 24.04/etc...]
54 | validations:
55 | required: true
56 |
57 | - type: textarea
58 | id: context
59 | attributes:
60 | label: Additional Context
61 | description: Add any other context about the problem here.
62 | placeholder: Feel free to add any other context/information/screenshots/etc... that you think might be relevant to this issue in here.
63 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | ---
2 | blank_issues_enabled: false
3 | contact_links:
4 | - name: 💬 Talk to the NGINX community!
5 | url: https://community.nginx.org
6 | about: A community forum for NGINX users, developers, and contributors
7 | - name: 📝 Code of Conduct
8 | url: https://www.contributor-covenant.org/version/2/1/code_of_conduct
9 | about: NGINX follows the Contributor Covenant Code of Conduct to ensure a safe and inclusive community
10 | - name: 💼 For commercial & enterprise users
11 | url: https://www.f5.com/products/nginx
12 | about: F5 offers a wide range of NGINX products for commercial & enterprise users
13 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: ✨ Feature request
3 | description: Suggest an idea for this project
4 | labels: enhancement
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thanks for taking the time to fill out this feature request!
10 |
11 | Before you continue filling out this request, please take a moment to check that your feature has not been [already requested on GitHub][issue search] 🙌
12 |
13 | **Note:** If you are seeking community support or have a question, please consider starting a new thread via [GitHub discussions][discussions] or the [NGINX Community forum][forum].
14 |
15 | [issue search]: ../search?q=is%3Aissue&type=issues
16 |
17 | [discussions]: ../discussions
18 | [forum]: https://community.nginx.org
19 |
20 | - type: textarea
21 | id: overview
22 | attributes:
23 | label: Feature Overview
24 | description: A clear and concise description of what the feature request is.
25 | placeholder: I would like the Docker NGINX image to be able to do "X".
26 | validations:
27 | required: true
28 |
29 | - type: textarea
30 | id: alternatives
31 | attributes:
32 | label: Alternatives Considered
33 | description: Detail any potential alternative solutions/workarounds you've used or considered.
34 | placeholder: I have done/might be able to do "X" in the Docker NGINX image by doing "Y".
35 |
36 | - type: textarea
37 | id: context
38 | attributes:
39 | label: Additional Context
40 | description: Add any other context about the problem here.
41 | placeholder: Feel free to add any other context/information/screenshots/etc... that you think might be relevant to this feature request here.
42 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ### Proposed changes
2 |
3 | Describe the use case and detail of the change. If this PR addresses an issue on GitHub, make sure to include a link to that issue using one of the [supported keywords](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue) in this PR's description or commit message.
4 |
5 | ### Checklist
6 |
7 | Before creating a PR, run through this checklist and mark each as complete:
8 |
9 | - [ ] I have read the [contributing guidelines](/CONTRIBUTING.md)
10 | - [ ] I have signed the [F5 Contributor License Agreement (CLA)](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md)
11 | - [ ] I have run `./update.sh` and ensured all entrypoint/Dockerfile template changes have been applied to the relevant image entrypoint scripts & Dockerfiles
12 | - [ ] If applicable, I have added tests that prove my fix is effective or that my feature works
13 | - [ ] If applicable, I have checked that any relevant tests pass after adding my changes
14 | - [ ] I have updated any relevant documentation ([`README.md`](/README.md) and/or [`modules/README.md`](/modules/README.md))
15 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: GitHub CI
2 |
3 | on:
4 | pull_request:
5 | push:
6 | schedule:
7 | - cron: 0 10 * * Mon
8 |
9 | defaults:
10 | run:
11 | shell: 'bash -Eeuo pipefail -x {0}'
12 |
13 | jobs:
14 |
15 | generate-jobs:
16 | name: Generate Jobs
17 | runs-on: ubuntu-latest
18 | outputs:
19 | strategy: ${{ steps.generate-jobs.outputs.strategy }}
20 | steps:
21 | - uses: actions/checkout@v3
22 | - uses: docker-library/bashbrew@v0.1.12
23 | - id: generate-jobs
24 | name: Generate Jobs
25 | run: |
26 | strategy="$(GITHUB_REPOSITORY=nginx "$BASHBREW_SCRIPTS/github-actions/generate.sh")"
27 | strategy="$(GITHUB_REPOSITORY=nginx "$BASHBREW_SCRIPTS/github-actions/munge-i386.sh" -c <<<"$strategy")"
28 | echo "strategy=$strategy" >> "$GITHUB_OUTPUT"
29 | jq . <<<"$strategy" # sanity check / debugging aid
30 |
31 | test:
32 | needs: generate-jobs
33 | strategy: ${{ fromJson(needs.generate-jobs.outputs.strategy) }}
34 | name: ${{ matrix.name }}
35 | runs-on: ${{ matrix.os }}
36 | steps:
37 | - uses: actions/checkout@v3
38 | - name: Prepare Environment
39 | run: ${{ matrix.runs.prepare }}
40 | - name: Pull Dependencies
41 | run: ${{ matrix.runs.pull }}
42 | - name: Build ${{ matrix.name }}
43 | run: ${{ matrix.runs.build }}
44 | - name: History ${{ matrix.name }}
45 | run: ${{ matrix.runs.history }}
46 | - name: Test ${{ matrix.name }}
47 | run: ${{ matrix.runs.test }}
48 | - name: '"docker images"'
49 | run: ${{ matrix.runs.images }}
50 |
--------------------------------------------------------------------------------
/.github/workflows/f5_cla.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: F5 CLA
3 | on:
4 | issue_comment:
5 | types: [created]
6 | pull_request_target:
7 | types: [opened, closed, synchronize]
8 | permissions: read-all
9 | jobs:
10 | f5-cla:
11 | name: F5 CLA
12 | runs-on: ubuntu-24.04
13 | permissions:
14 | actions: write
15 | pull-requests: write
16 | statuses: write
17 | steps:
18 | - name: Run F5 Contributor License Agreement (CLA) assistant
19 | if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have hereby read the F5 CLA and agree to its terms') || github.event_name == 'pull_request_target'
20 | uses: contributor-assistant/github-action@ca4a40a7d1004f18d9960b404b97e5f30a505a08 # v2.6.1
21 | with:
22 | # Path to the CLA document.
23 | path-to-document: https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md
24 | # Custom CLA messages.
25 | custom-notsigned-prcomment: '🎉 Thank you for your contribution! It appears you have not yet signed the [F5 Contributor License Agreement (CLA)](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md), which is required for your changes to be incorporated into an F5 Open Source Software (OSS) project. Please kindly read the [F5 CLA](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md) and reply on a new comment with the following text to agree:'
26 | custom-pr-sign-comment: 'I have hereby read the F5 CLA and agree to its terms'
27 | custom-allsigned-prcomment: '✅ All required contributors have signed the F5 CLA for this PR. Thank you!'
28 | # Remote repository storing CLA signatures.
29 | remote-organization-name: f5
30 | remote-repository-name: f5-cla-data
31 | # Branch where CLA signatures are stored.
32 | branch: main
33 | path-to-signatures: signatures/signatures.json
34 | # Comma separated list of usernames for maintainers or any other individuals who should not be prompted for a CLA.
35 | # NOTE: You will want to edit the usernames to suit your project needs.
36 | allowlist: bot*
37 | # Do not lock PRs after a merge.
38 | lock-pullrequest-aftermerge: false
39 | env:
40 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
41 | PERSONAL_ACCESS_TOKEN: ${{ secrets.F5_CLA_TOKEN }}
42 |
--------------------------------------------------------------------------------
/.github/workflows/sync.yml:
--------------------------------------------------------------------------------
1 | name: Sync DockerHub with AWS ECR
2 |
3 | on:
4 | workflow_dispatch:
5 | schedule:
6 | - cron: 23 20 * * *
7 |
8 | defaults:
9 | run:
10 | shell: 'bash -Eeuo pipefail -x {0}'
11 |
12 | jobs:
13 | sync-awsecr:
14 | name: Sync Docker Hub to AWS ECR Public
15 | runs-on: ubuntu-24.04
16 | permissions:
17 | id-token: write
18 | contents: read
19 | steps:
20 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
21 |
22 | - name: Configure AWS credentials
23 | uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
24 | with:
25 | role-to-assume: ${{ secrets.AWS_ROLE_PUBLIC_ECR }}
26 | aws-region: us-east-1
27 |
28 | - name: Login to Amazon ECR Public
29 | id: login-ecr-public
30 | uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
31 | with:
32 | registry-type: public
33 |
34 | - name: Login to Docker Hub
35 | uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
36 | with:
37 | username: ${{ secrets.DOCKERHUB_USERNAME }}
38 | password: ${{ secrets.DOCKERHUB_TOKEN }}
39 |
40 | - name: Build, tag, and push docker image to Amazon ECR Public
41 | run: |
42 | ./sync-awsecr.sh > sync-real.sh
43 | chmod +x sync-real.sh
44 | ./sync-real.sh
45 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.bak
2 |
--------------------------------------------------------------------------------
/.test/config.sh:
--------------------------------------------------------------------------------
1 | imageTests+=(
2 | [nginx]='
3 | ipv6
4 | static
5 | templates
6 | templates-resolver
7 | templates-resolver-ipv6
8 | workers
9 | modules
10 | '
11 | )
12 |
--------------------------------------------------------------------------------
/.test/tests/ipv6/expected-std-out.txt:
--------------------------------------------------------------------------------
1 |
Welcome to nginx!
2 | 10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
3 |
--------------------------------------------------------------------------------
/.test/tests/ipv6/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ "$DEBUG" ] && set -x
4 |
5 | set -eo pipefail
6 |
7 | # check if we have ipv6 available
8 | if [ ! -f "/proc/net/if_inet6" ]; then
9 | exit 0
10 | fi
11 |
12 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
13 |
14 | image="$1"
15 |
16 | clientImage='buildpack-deps:buster-curl'
17 | # ensure the clientImage is ready and available
18 | if ! docker image inspect "$clientImage" &> /dev/null; then
19 | docker pull "$clientImage" > /dev/null
20 | fi
21 |
22 | cid="$(docker run -d "$image")"
23 | trap "docker rm -vf $cid > /dev/null" EXIT
24 |
25 | _request() {
26 | local method="$1"
27 | shift
28 |
29 | local proto="$1"
30 | shift
31 |
32 | local url="${1#/}"
33 | shift
34 |
35 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
36 | echo >&2 "$image stopped unexpectedly!"
37 | ( set -x && docker logs "$cid" ) >&2 || true
38 | false
39 | fi
40 |
41 | docker run --rm \
42 | --link "$cid":nginx \
43 | "$clientImage" \
44 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
45 | }
46 |
47 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
48 |
49 | # Check that we can request /
50 | _request GET http '/index.html' | grep 'Welcome to nginx!
'
51 |
52 | docker logs $cid 2>&1 | grep "Enabled listen on IPv6"
53 |
--------------------------------------------------------------------------------
/.test/tests/modules/nginx.conf.sme:
--------------------------------------------------------------------------------
1 | user nginx;
2 | worker_processes auto;
3 |
4 | load_module modules/ndk_http_module.so;
5 | load_module modules/ngx_http_echo_module.so;
6 | load_module modules/ngx_http_set_misc_module.so;
7 |
8 | error_log /var/log/nginx/error.log notice;
9 | pid /var/run/nginx.pid;
10 |
11 | events {
12 | worker_connections 1024;
13 | }
14 |
15 | http {
16 | include /etc/nginx/mime.types;
17 | default_type application/octet-stream;
18 |
19 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
20 | '$status $body_bytes_sent "$http_referer" '
21 | '"$http_user_agent" "$http_x_forwarded_for"';
22 |
23 | access_log /var/log/nginx/access.log main;
24 |
25 | server {
26 | listen 80 default_server;
27 | location /hello {
28 | set $raw "hello";
29 | set_sha1 $digest $raw;
30 |
31 | echo $digest;
32 | }
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/.test/tests/modules/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eo pipefail
4 |
5 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
6 |
7 | echo $dir
8 |
9 | image="$1"
10 |
11 | case "$image" in
12 | *-perl)
13 | ;;
14 | *)
15 | echo >&2 "skipping non-leaf image: $image"
16 | exit
17 | ;;
18 | esac
19 |
20 | dockerfile="Dockerfile"
21 | case "$image" in
22 | *alpine*)
23 | dockerfile="$dockerfile.alpine"
24 | ;;
25 | esac
26 |
27 | clientImage='buildpack-deps:buster-curl'
28 | # ensure the clientImage is ready and available
29 | if ! docker image inspect "$clientImage" &> /dev/null; then
30 | docker pull "$clientImage" > /dev/null
31 | fi
32 |
33 | # Create an instance of the container-under-test
34 | modulesImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
35 | docker build --build-arg NGINX_FROM_IMAGE="$image" --build-arg ENABLED_MODULES="ndk set-misc echo" -t "$modulesImage" -f "modules/$dockerfile" "$GITHUB_WORKSPACE/modules"
36 |
37 | serverImage="${modulesImage}-sme"
38 | "$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
45 |
46 | _request() {
47 | local method="$1"
48 | shift
49 |
50 | local proto="$1"
51 | shift
52 |
53 | local url="${1#/}"
54 | shift
55 |
56 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
57 | echo >&2 "$image stopped unexpectedly!"
58 | ( set -x && docker logs "$cid" ) >&2 || true
59 | false
60 | fi
61 |
62 | docker run --rm \
63 | --link "$cid":nginx \
64 | "$clientImage" \
65 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
66 | }
67 |
68 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
69 |
70 | # Check that we can request /
71 | _request GET http '/hello' | grep 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
72 |
--------------------------------------------------------------------------------
/.test/tests/static/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ "$DEBUG" ] && set -x
4 |
5 | set -eo pipefail
6 |
7 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
8 |
9 | image="$1"
10 |
11 | clientImage='buildpack-deps:buster-curl'
12 | # ensure the clientImage is ready and available
13 | if ! docker image inspect "$clientImage" &> /dev/null; then
14 | docker pull "$clientImage" > /dev/null
15 | fi
16 |
17 | # Create an instance of the container-under-test
18 | cid="$(docker run -d "$image")"
19 | trap "docker rm -vf $cid > /dev/null" EXIT
20 |
21 | _request() {
22 | local method="$1"
23 | shift
24 |
25 | local proto="$1"
26 | shift
27 |
28 | local url="${1#/}"
29 | shift
30 |
31 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
32 | echo >&2 "$image stopped unexpectedly!"
33 | ( set -x && docker logs "$cid" ) >&2 || true
34 | false
35 | fi
36 |
37 | docker run --rm \
38 | --link "$cid":nginx \
39 | "$clientImage" \
40 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
41 | }
42 |
43 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
44 |
45 | # Check that we can request /
46 | _request GET http '/index.html' | grep 'Welcome to nginx!
'
47 |
--------------------------------------------------------------------------------
/.test/tests/templates-resolver-ipv6/expected-std-out.txt:
--------------------------------------------------------------------------------
1 | example.com - OK
2 |
--------------------------------------------------------------------------------
/.test/tests/templates-resolver-ipv6/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ "$DEBUG" ] && set -x
4 |
5 | set -eo pipefail
6 |
7 | # check if we have ipv6 available
8 | if [ ! -f "/proc/net/if_inet6" ]; then
9 | exit 0
10 | fi
11 |
12 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
13 |
14 | image="$1"
15 |
16 | clientImage='buildpack-deps:buster-curl'
17 | # ensure the clientImage is ready and available
18 | if ! docker image inspect "$clientImage" &> /dev/null; then
19 | docker pull "$clientImage" > /dev/null
20 | fi
21 |
22 | # Create a new Docker network
23 | nid="$(docker network create --ipv6 --subnet fd0c:7e57::/64 nginx-test-ipv6-network)"
24 |
25 | _network_exit_handler() {
26 | docker network rm -f $nid > /dev/null
27 | }
28 |
29 | # Create an instance of the container-under-test
30 | serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
31 | "$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null
39 | }
40 | _exit_handler() { _container_exit_handler; _network_exit_handler; }
41 | trap "_exit_handler" EXIT
42 |
43 | ipv6cid="$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.GlobalIPv6Address}}{{end}}' $cid)"
44 |
45 | _request() {
46 | local method="$1"
47 | shift
48 |
49 | local proto="$1"
50 | shift
51 |
52 | local url="${1#/}"
53 | shift
54 |
55 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
56 | echo >&2 "$image stopped unexpectedly!"
57 | ( set -x && docker logs "$cid" ) >&2 || true
58 | false
59 | fi
60 |
61 | docker run --rm \
62 | --network "$nid" \
63 | "$clientImage" \
64 | curl -fsSL -X"$method" --connect-to "::[$ipv6cid]:" "$@" "$proto://example.com/$url"
65 | }
66 |
67 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
68 |
69 | # Check that we can request /
70 | _request GET http '/resolver-templates' | grep 'example.com - OK'
71 |
--------------------------------------------------------------------------------
/.test/tests/templates-resolver-ipv6/server.conf.template:
--------------------------------------------------------------------------------
1 | resolver ${NGINX_LOCAL_RESOLVERS};
2 |
3 | server {
4 | listen 80;
5 | listen [::]:80;
6 | server_name ${NGINX_MY_SERVER_NAME};
7 | default_type text/plain;
8 | location = / { return 200 'OK\n'; }
9 | location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
10 | }
11 |
--------------------------------------------------------------------------------
/.test/tests/templates-resolver/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ "$DEBUG" ] && set -x
4 |
5 | set -eo pipefail
6 |
7 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
8 |
9 | image="$1"
10 |
11 | clientImage='buildpack-deps:buster-curl'
12 | # ensure the clientImage is ready and available
13 | if ! docker image inspect "$clientImage" &> /dev/null; then
14 | docker pull "$clientImage" > /dev/null
15 | fi
16 |
17 | # Create an instance of the container-under-test
18 | serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
19 | "$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
25 |
26 | _request() {
27 | local method="$1"
28 | shift
29 |
30 | local proto="$1"
31 | shift
32 |
33 | local url="${1#/}"
34 | shift
35 |
36 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
37 | echo >&2 "$image stopped unexpectedly!"
38 | ( set -x && docker logs "$cid" ) >&2 || true
39 | false
40 | fi
41 |
42 | docker run --rm \
43 | --link "$cid":nginx \
44 | "$clientImage" \
45 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
46 | }
47 |
48 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
49 |
50 | # Check that we can request /
51 | _request GET http '/resolver-templates' | grep 'example.com - OK'
52 |
--------------------------------------------------------------------------------
/.test/tests/templates-resolver/server.conf.template:
--------------------------------------------------------------------------------
1 | resolver ${NGINX_LOCAL_RESOLVERS};
2 |
3 | server {
4 | listen 80;
5 | server_name ${NGINX_MY_SERVER_NAME};
6 | default_type text/plain;
7 | location = / { return 200 'OK\n'; }
8 | location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
9 | }
10 |
--------------------------------------------------------------------------------
/.test/tests/templates/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ "$DEBUG" ] && set -x
4 |
5 | set -eo pipefail
6 |
7 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
8 |
9 | image="$1"
10 |
11 | clientImage='buildpack-deps:buster-curl'
12 | # ensure the clientImage is ready and available
13 | if ! docker image inspect "$clientImage" &> /dev/null; then
14 | docker pull "$clientImage" > /dev/null
15 | fi
16 |
17 | # Create an instance of the container-under-test
18 | serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
19 | "$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
25 |
26 | _request() {
27 | local method="$1"
28 | shift
29 |
30 | local proto="$1"
31 | shift
32 |
33 | local url="${1#/}"
34 | shift
35 |
36 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
37 | echo >&2 "$image stopped unexpectedly!"
38 | ( set -x && docker logs "$cid" ) >&2 || true
39 | false
40 | fi
41 |
42 | docker run --rm \
43 | --link "$cid":nginx \
44 | "$clientImage" \
45 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
46 | }
47 |
48 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
49 |
50 | # Check that we can request /
51 | _request GET http '/templates' | grep 'example.com - OK'
52 |
--------------------------------------------------------------------------------
/.test/tests/templates/server.conf.template:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name ${NGINX_MY_SERVER_NAME};
4 | default_type text/plain;
5 | location = / { return 200 'OK\n'; }
6 | location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
7 | }
8 |
--------------------------------------------------------------------------------
/.test/tests/workers/expected-std-out.txt:
--------------------------------------------------------------------------------
1 | example.com - OK
2 | # Commented out by 30-tune-worker-processes.sh
3 |
--------------------------------------------------------------------------------
/.test/tests/workers/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ "$DEBUG" ] && set -x
4 |
5 | set -eo pipefail
6 |
7 | dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
8 |
9 | image="$1"
10 |
11 | clientImage='buildpack-deps:buster-curl'
12 | # ensure the clientImage is ready and available
13 | if ! docker image inspect "$clientImage" &> /dev/null; then
14 | docker pull "$clientImage" > /dev/null
15 | fi
16 |
17 | # Create an instance of the container-under-test
18 | serverImage="$("$HOME/oi/test/tests/image-name.sh" librarytest/nginx-template "$image")"
19 | "$HOME/oi/test/tests/docker-build.sh" "$dir" "$serverImage" < /dev/null" EXIT
25 |
26 | _request() {
27 | local method="$1"
28 | shift
29 |
30 | local proto="$1"
31 | shift
32 |
33 | local url="${1#/}"
34 | shift
35 |
36 | if [ "$(docker inspect -f '{{.State.Running}}' "$cid" 2>/dev/null)" != 'true' ]; then
37 | echo >&2 "$image stopped unexpectedly!"
38 | ( set -x && docker logs "$cid" ) >&2 || true
39 | false
40 | fi
41 |
42 | docker run --rm \
43 | --link "$cid":nginx \
44 | "$clientImage" \
45 | curl -fsSL -X"$method" --connect-to '::nginx:' "$@" "$proto://example.com/$url"
46 | }
47 |
48 | . "$HOME/oi/test/retry.sh" '[ "$(_request GET / --output /dev/null || echo $?)" != 7 ]'
49 |
50 | # Check that we can request /
51 | _request GET http '/worker-templates' | grep 'example.com - OK'
52 |
53 | result="$(docker exec $cid grep "Commented out by" /etc/nginx/nginx.conf)"
54 |
55 | echo "$result" | cut -d\ -f 1-5
56 |
--------------------------------------------------------------------------------
/.test/tests/workers/server.conf.template:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name ${NGINX_MY_SERVER_NAME};
4 | default_type text/plain;
5 | location = / { return 200 'OK\n'; }
6 | location / { return 200 "${NGINX_MY_SERVER_NAME} - OK\n"; }
7 | }
8 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation.
6 |
7 | We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
8 |
9 | ## Our Standards
10 |
11 | Examples of behavior that contributes to a positive environment for our community include:
12 |
13 | - Demonstrating empathy and kindness toward other people.
14 | - Being respectful of differing opinions, viewpoints, and experiences.
15 | - Giving and gracefully accepting constructive feedback.
16 | - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience.
17 | - Focusing on what is best not just for us as individuals, but for the overall community.
18 |
19 | Examples of unacceptable behavior include:
20 |
21 | - The use of sexualized language or imagery, and sexual attention or advances of any kind.
22 | - Trolling, insulting or derogatory comments, and personal or political attacks.
23 | - Public or private harassment.
24 | - Publishing others' private information, such as a physical or email address, without their explicit permission.
25 | - Other conduct which could reasonably be considered inappropriate in a professional setting.
26 |
27 | ## Enforcement Responsibilities
28 |
29 | Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
30 |
31 | Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.
32 |
33 | ## Scope
34 |
35 | This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official email address, posting via an official social media account, or acting as an appointed representative at an online or offline event.
36 |
37 | ## Enforcement
38 |
39 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at . All complaints will be reviewed and investigated promptly and fairly.
40 |
41 | All community leaders are obligated to respect the privacy and security of the reporter of any incident.
42 |
43 | ## Enforcement Guidelines
44 |
45 | Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:
46 |
47 | ### 1. Correction
48 |
49 | **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
50 |
51 | **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.
52 |
53 | ### 2. Warning
54 |
55 | **Community Impact**: A violation through a single incident or series of actions.
56 |
57 | **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.
58 |
59 | ### 3. Temporary Ban
60 |
61 | **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior.
62 |
63 | **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
64 |
65 | ### 4. Permanent Ban
66 |
67 | **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
68 |
69 | **Consequence**: A permanent ban from any sort of public interaction within the community.
70 |
71 | ## Attribution
72 |
73 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.1, available at .
74 |
75 | Community Impact Guidelines were inspired by
76 | [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/inclusion).
77 |
78 | For answers to common questions about this code of conduct, see the FAQ at . Translations are available at .
79 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | The following is a set of guidelines for contributing to the Docker NGINX image. We really appreciate that you are considering contributing!
4 |
5 | #### Table Of Contents
6 |
7 | - [Getting Started](#getting-started)
8 | - [Contributing](#contributing)
9 | - [Code Guidelines](#code-guidelines)
10 | - [Code of Conduct](/CODE_OF_CONDUCT.md)
11 |
12 | ## Getting Started
13 |
14 | Follow our [how to use this image guide](https://hub.docker.com/_/nginx/) to get the Docker NGINX image up and running.
15 |
16 | ## Contributing
17 |
18 | ### Report a Bug
19 |
20 | To report a bug, open an issue on GitHub with the label `bug` using the available [bug report issue form](/.github/ISSUE_TEMPLATE/bug_report.yml). Please ensure the bug has not already been reported. **If the bug is a potential security vulnerability, please report it using our [security policy](/SECURITY.md).**
21 |
22 | ### Suggest a Feature or Enhancement
23 |
24 | To suggest a feature or enhancement, please create an issue on GitHub with the label `enhancement` using the available [feature request issue form](/.github/ISSUE_TEMPLATE/feature_request.yml). Please ensure the feature or enhancement has not already been suggested.
25 |
26 | ### Open a Pull Request (PR)
27 |
28 | - Fork the repo, create a branch, implement your changes, add any relevant tests, and submit a PR when your changes are **tested** and ready for review.
29 | - Fill in the [PR template](/.github/pull_request_template.md).
30 |
31 | **Note:** If you'd like to implement a new feature, please consider creating a [feature request issue](/.github/ISSUE_TEMPLATE/feature_request.yml) first to start a discussion about the feature.
32 |
33 | #### F5 Contributor License Agreement (CLA)
34 |
35 | F5 requires all contributors to agree to the terms of the F5 CLA (available [here](https://github.com/f5/f5-cla/.github/blob/main/docs/f5_cla.md)) before any of their changes can be incorporated into an F5 Open Source repository (even contributions to the F5 CLA itself!).
36 |
37 | If you have not yet agreed to the F5 CLA terms and submit a PR to this repository, a bot will prompt you to view and agree to the F5 CLA. You will have to agree to the F5 CLA terms through a comment in the PR before any of your changes can be merged. Your agreement signature will be safely stored by F5 and no longer be required in future PRs.
38 |
39 | ## Code Guidelines
40 |
41 | ### Git Guidelines
42 |
43 | - Keep a clean, concise and meaningful git commit history on your branch (within reason), rebasing locally and squashing before submitting a PR.
44 | - If possible and/or relevant, use the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format when writing a commit message, so that changelogs can be automatically generated
45 | - Follow the guidelines of writing a good commit message as described here and summarised in the next few points:
46 | - In the subject line, use the present tense ("Add feature" not "Added feature").
47 | - In the subject line, use the imperative mood ("Move cursor to..." not "Moves cursor to...").
48 | - Limit the subject line to 72 characters or less.
49 | - Reference issues and pull requests liberally after the subject line.
50 | - Add more detailed description in the body of the git message (`git commit -a` to give you more space and time in your text editor to write a good message instead of `git commit -am`).
51 |
52 | ### Docker Guidelines
53 |
54 | - Update any entrypoint scripts via the the scripts contained in the `/entrypoint` directory.
55 | - Update any Dockerfiles via the Dockerfile templates in the root directory (e.g. `Dockerfile-alpine.template`).
56 | - Run the `./update.sh` script to apply all entrypoint/Dockerfile template changes to the relevant image entrypoints & Dockerfiles.
57 |
--------------------------------------------------------------------------------
/Dockerfile-alpine-otel.template:
--------------------------------------------------------------------------------
1 | FROM nginx:%%NGINX_VERSION%%-alpine
2 |
3 | ENV OTEL_VERSION %%OTEL_VERSION%%
4 |
5 | RUN set -x \
6 | && apkArch="$(cat /etc/apk/arch)" \
7 | && nginxPackages="%%PACKAGES%%
8 | " \
9 | # install prerequisites for public key and pkg-oss checks
10 | && apk add --no-cache --virtual .checksum-deps \
11 | openssl \
12 | && case "$apkArch" in \
13 | x86_64|aarch64) \
14 | # arches officially built by upstream
15 | apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
16 | ;; \
17 | *) \
18 | # we're on an architecture upstream doesn't officially build for
19 | # let's build binaries from the published packaging sources
20 | set -x \
21 | && tempDir="$(mktemp -d)" \
22 | && chown nobody:nobody $tempDir \
23 | && apk add --no-cache --virtual .build-deps \
24 | gcc \
25 | libc-dev \
26 | make \
27 | openssl-dev \
28 | pcre2-dev \
29 | zlib-dev \
30 | linux-headers \
31 | cmake \
32 | bash \
33 | alpine-sdk \
34 | findutils \
35 | curl \
36 | xz \
37 | protobuf-dev \
38 | grpc-dev \
39 | && su nobody -s /bin/sh -c " \
40 | export HOME=${tempDir} \
41 | && cd ${tempDir} \
42 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
43 | && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
44 | && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
45 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
46 | else \
47 | echo \"pkg-oss tarball checksum verification failed!\"; \
48 | exit 1; \
49 | fi \
50 | && tar xzvf %%REVISION%%.tar.gz \
51 | && cd pkg-oss-%%REVISION%% \
52 | && cd alpine \
53 | && make %%BUILDTARGET%% \
54 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
55 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
56 | " \
57 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
58 | && apk del --no-network .build-deps \
59 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
60 | ;; \
61 | esac \
62 | # remove checksum deps
63 | && apk del --no-network .checksum-deps \
64 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
65 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
66 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
67 |
--------------------------------------------------------------------------------
/Dockerfile-alpine-perl.template:
--------------------------------------------------------------------------------
1 | FROM nginx:%%NGINX_VERSION%%-alpine
2 |
3 | RUN set -x \
4 | && apkArch="$(cat /etc/apk/arch)" \
5 | && nginxPackages="%%PACKAGES%%
6 | " \
7 | # install prerequisites for public key and pkg-oss checks
8 | && apk add --no-cache --virtual .checksum-deps \
9 | openssl \
10 | && case "$apkArch" in \
11 | x86_64|aarch64) \
12 | # arches officially built by upstream
13 | apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
14 | ;; \
15 | *) \
16 | # we're on an architecture upstream doesn't officially build for
17 | # let's build binaries from the published packaging sources
18 | set -x \
19 | && tempDir="$(mktemp -d)" \
20 | && chown nobody:nobody $tempDir \
21 | && apk add --no-cache --virtual .build-deps \
22 | gcc \
23 | libc-dev \
24 | make \
25 | openssl-dev \
26 | pcre2-dev \
27 | zlib-dev \
28 | linux-headers \
29 | perl-dev \
30 | bash \
31 | alpine-sdk \
32 | findutils \
33 | curl \
34 | && su nobody -s /bin/sh -c " \
35 | export HOME=${tempDir} \
36 | && cd ${tempDir} \
37 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
38 | && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
39 | && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
40 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
41 | else \
42 | echo \"pkg-oss tarball checksum verification failed!\"; \
43 | exit 1; \
44 | fi \
45 | && tar xzvf %%REVISION%%.tar.gz \
46 | && cd pkg-oss-%%REVISION%% \
47 | && cd alpine \
48 | && make %%BUILDTARGET%% \
49 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
50 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
51 | " \
52 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
53 | && apk del --no-network .build-deps \
54 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
55 | ;; \
56 | esac \
57 | # remove checksum deps
58 | && apk del --no-network .checksum-deps \
59 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
60 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
61 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
62 |
--------------------------------------------------------------------------------
/Dockerfile-alpine-slim.template:
--------------------------------------------------------------------------------
1 | FROM alpine:%%ALPINE_VERSION%%
2 |
3 | LABEL maintainer="NGINX Docker Maintainers "
4 |
5 | ENV NGINX_VERSION %%NGINX_VERSION%%
6 | ENV PKG_RELEASE %%PKG_RELEASE%%
7 | ENV DYNPKG_RELEASE %%DYNPKG_RELEASE%%
8 |
9 | RUN set -x \
10 | # create nginx user/group first, to be consistent throughout docker variants
11 | && addgroup -g 101 -S nginx \
12 | && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
13 | && apkArch="$(cat /etc/apk/arch)" \
14 | && nginxPackages="%%PACKAGES%%
15 | " \
16 | # install prerequisites for public key and pkg-oss checks
17 | && apk add --no-cache --virtual .checksum-deps \
18 | openssl \
19 | && case "$apkArch" in \
20 | x86_64|aarch64) \
21 | # arches officially built by upstream
22 | set -x \
23 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
24 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
25 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
26 | echo "key verification succeeded!"; \
27 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
28 | else \
29 | echo "key verification failed!"; \
30 | exit 1; \
31 | fi \
32 | && apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
33 | ;; \
34 | *) \
35 | # we're on an architecture upstream doesn't officially build for
36 | # let's build binaries from the published packaging sources
37 | set -x \
38 | && tempDir="$(mktemp -d)" \
39 | && chown nobody:nobody $tempDir \
40 | && apk add --no-cache --virtual .build-deps \
41 | gcc \
42 | libc-dev \
43 | make \
44 | openssl-dev \
45 | pcre2-dev \
46 | zlib-dev \
47 | linux-headers \
48 | bash \
49 | alpine-sdk \
50 | findutils \
51 | curl \
52 | && su nobody -s /bin/sh -c " \
53 | export HOME=${tempDir} \
54 | && cd ${tempDir} \
55 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
56 | && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
57 | && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
58 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
59 | else \
60 | echo \"pkg-oss tarball checksum verification failed!\"; \
61 | exit 1; \
62 | fi \
63 | && tar xzvf %%REVISION%%.tar.gz \
64 | && cd pkg-oss-%%REVISION%% \
65 | && cd alpine \
66 | && make %%BUILDTARGET%% \
67 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
68 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
69 | " \
70 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
71 | && apk del --no-network .build-deps \
72 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
73 | ;; \
74 | esac \
75 | # remove checksum deps
76 | && apk del --no-network .checksum-deps \
77 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
78 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
79 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
80 | # Add `envsubst` for templating environment variables
81 | && apk add --no-cache gettext-envsubst \
82 | # Bring in tzdata so users could set the timezones through the environment
83 | # variables
84 | && apk add --no-cache tzdata \
85 | # forward request and error logs to docker log collector
86 | && ln -sf /dev/stdout /var/log/nginx/access.log \
87 | && ln -sf /dev/stderr /var/log/nginx/error.log \
88 | # create a docker-entrypoint.d directory
89 | && mkdir /docker-entrypoint.d
90 |
91 | COPY docker-entrypoint.sh /
92 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
93 | COPY 15-local-resolvers.envsh /docker-entrypoint.d
94 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
95 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d
96 | ENTRYPOINT ["/docker-entrypoint.sh"]
97 |
98 | EXPOSE 80
99 |
100 | STOPSIGNAL SIGQUIT
101 |
102 | CMD ["nginx", "-g", "daemon off;"]
103 |
--------------------------------------------------------------------------------
/Dockerfile-alpine.template:
--------------------------------------------------------------------------------
1 | FROM nginx:%%NGINX_VERSION%%-alpine-slim
2 |
3 | ENV NJS_VERSION %%NJS_VERSION%%
4 | ENV NJS_RELEASE %%NJS_RELEASE%%
5 |
6 | RUN set -x \
7 | && apkArch="$(cat /etc/apk/arch)" \
8 | && nginxPackages="%%PACKAGES%%
9 | " \
10 | # install prerequisites for public key and pkg-oss checks
11 | && apk add --no-cache --virtual .checksum-deps \
12 | openssl \
13 | && case "$apkArch" in \
14 | x86_64|aarch64) \
15 | # arches officially built by upstream
16 | apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
17 | ;; \
18 | *) \
19 | # we're on an architecture upstream doesn't officially build for
20 | # let's build binaries from the published packaging sources
21 | set -x \
22 | && tempDir="$(mktemp -d)" \
23 | && chown nobody:nobody $tempDir \
24 | && apk add --no-cache --virtual .build-deps \
25 | gcc \
26 | libc-dev \
27 | make \
28 | openssl-dev \
29 | pcre2-dev \
30 | zlib-dev \
31 | linux-headers \
32 | libxslt-dev \
33 | gd-dev \
34 | geoip-dev \
35 | libedit-dev \
36 | bash \
37 | alpine-sdk \
38 | findutils \
39 | curl \
40 | && su nobody -s /bin/sh -c " \
41 | export HOME=${tempDir} \
42 | && cd ${tempDir} \
43 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \
44 | && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \
45 | && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
46 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
47 | else \
48 | echo \"pkg-oss tarball checksum verification failed!\"; \
49 | exit 1; \
50 | fi \
51 | && tar xzvf %%REVISION%%.tar.gz \
52 | && cd pkg-oss-%%REVISION%% \
53 | && cd alpine \
54 | && make %%BUILDTARGET%% \
55 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
56 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
57 | " \
58 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
59 | && apk del --no-network .build-deps \
60 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
61 | ;; \
62 | esac \
63 | # remove checksum deps
64 | && apk del --no-network .checksum-deps \
65 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
66 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
67 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
68 | # Bring in curl and ca-certificates to make registering on DNS SD easier
69 | && apk add --no-cache curl ca-certificates
70 |
--------------------------------------------------------------------------------
/Dockerfile-debian-otel.template:
--------------------------------------------------------------------------------
1 | FROM nginx:%%NGINX_VERSION%%
2 |
3 | ENV OTEL_VERSION %%OTEL_VERSION%%
4 |
5 | RUN set -x; \
6 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
7 | dpkgArch="$(dpkg --print-architecture)" \
8 | && nginxPackages="%%PACKAGES%%
9 | " \
10 | && case "$dpkgArch" in \
11 | amd64|arm64) \
12 | # arches officialy built by upstream
13 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
14 | && apt-get update \
15 | ;; \
16 | *) \
17 | # we're on an architecture upstream doesn't officially build for
18 | # let's build binaries from the published packaging sources
19 | # new directory for storing sources and .deb files
20 | tempDir="$(mktemp -d)" \
21 | && chmod 777 "$tempDir" \
22 | # (777 to ensure APT's "_apt" user can access it too)
23 | \
24 | # save list of currently-installed packages so build dependencies can be cleanly removed later
25 | && savedAptMark="$(apt-mark showmanual)" \
26 | \
27 | # build .deb files from upstream's packaging sources
28 | && apt-get update \
29 | && apt-get install --no-install-recommends --no-install-suggests -y \
30 | curl \
31 | devscripts \
32 | equivs \
33 | git \
34 | libxml2-utils \
35 | lsb-release \
36 | xsltproc \
37 | && ( \
38 | cd "$tempDir" \
39 | && REVISION="%%REVISION%%" \
40 | && REVISION=${REVISION%~*} \
41 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
42 | && PKGOSSCHECKSUM="%%PKGOSSCHECKSUM%% *${REVISION}.tar.gz" \
43 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
44 | echo "pkg-oss tarball checksum verification succeeded!"; \
45 | else \
46 | echo "pkg-oss tarball checksum verification failed!"; \
47 | exit 1; \
48 | fi \
49 | && tar xzvf ${REVISION}.tar.gz \
50 | && cd pkg-oss-${REVISION} \
51 | && cd debian \
52 | && for target in %%BUILDTARGET%%; do \
53 | make rules-$target; \
54 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
55 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \
56 | done \
57 | && make %%BUILDTARGET%% \
58 | ) \
59 | # we don't remove APT lists here because they get re-downloaded and removed later
60 | \
61 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
62 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
63 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
64 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
65 | \
66 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
67 | && ls -lAFh "$tempDir" \
68 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
69 | && grep '^Package: ' "$tempDir/Packages" \
70 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
71 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
72 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
73 | # ...
74 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
75 | && apt-get -o Acquire::GzipIndexes=false update \
76 | ;; \
77 | esac \
78 | \
79 | && apt-get install --no-install-recommends --no-install-suggests -y \
80 | $nginxPackages \
81 | gettext-base \
82 | curl \
83 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
84 | \
85 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
86 | && if [ -n "$tempDir" ]; then \
87 | apt-get purge -y --auto-remove \
88 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
89 | fi
90 |
--------------------------------------------------------------------------------
/Dockerfile-debian-perl.template:
--------------------------------------------------------------------------------
1 | FROM nginx:%%NGINX_VERSION%%
2 |
3 | RUN set -x; \
4 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
5 | dpkgArch="$(dpkg --print-architecture)" \
6 | && nginxPackages="%%PACKAGES%%
7 | " \
8 | && case "$dpkgArch" in \
9 | amd64|arm64) \
10 | # arches officialy built by upstream
11 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
12 | && apt-get update \
13 | ;; \
14 | *) \
15 | # we're on an architecture upstream doesn't officially build for
16 | # let's build binaries from the published packaging sources
17 | # new directory for storing sources and .deb files
18 | tempDir="$(mktemp -d)" \
19 | && chmod 777 "$tempDir" \
20 | # (777 to ensure APT's "_apt" user can access it too)
21 | \
22 | # save list of currently-installed packages so build dependencies can be cleanly removed later
23 | && savedAptMark="$(apt-mark showmanual)" \
24 | \
25 | # build .deb files from upstream's packaging sources
26 | && apt-get update \
27 | && apt-get install --no-install-recommends --no-install-suggests -y \
28 | curl \
29 | devscripts \
30 | equivs \
31 | git \
32 | libxml2-utils \
33 | lsb-release \
34 | xsltproc \
35 | && ( \
36 | cd "$tempDir" \
37 | && REVISION="%%REVISION%%" \
38 | && REVISION=${REVISION%~*} \
39 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
40 | && PKGOSSCHECKSUM="%%PKGOSSCHECKSUM%% *${REVISION}.tar.gz" \
41 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
42 | echo "pkg-oss tarball checksum verification succeeded!"; \
43 | else \
44 | echo "pkg-oss tarball checksum verification failed!"; \
45 | exit 1; \
46 | fi \
47 | && tar xzvf ${REVISION}.tar.gz \
48 | && cd pkg-oss-${REVISION} \
49 | && cd debian \
50 | && for target in %%BUILDTARGET%%; do \
51 | make rules-$target; \
52 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
53 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \
54 | done \
55 | && make %%BUILDTARGET%% \
56 | ) \
57 | # we don't remove APT lists here because they get re-downloaded and removed later
58 | \
59 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
60 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
61 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
62 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
63 | \
64 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
65 | && ls -lAFh "$tempDir" \
66 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
67 | && grep '^Package: ' "$tempDir/Packages" \
68 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
69 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
70 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
71 | # ...
72 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
73 | && apt-get -o Acquire::GzipIndexes=false update \
74 | ;; \
75 | esac \
76 | \
77 | && apt-get install --no-install-recommends --no-install-suggests -y \
78 | $nginxPackages \
79 | gettext-base \
80 | curl \
81 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
82 | \
83 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
84 | && if [ -n "$tempDir" ]; then \
85 | apt-get purge -y --auto-remove \
86 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
87 | fi
88 |
--------------------------------------------------------------------------------
/Dockerfile-debian.template:
--------------------------------------------------------------------------------
1 | FROM debian:%%DEBIAN_VERSION%%-slim
2 |
3 | LABEL maintainer="NGINX Docker Maintainers "
4 |
5 | ENV NGINX_VERSION %%NGINX_VERSION%%
6 | ENV NJS_VERSION %%NJS_VERSION%%
7 | ENV NJS_RELEASE %%NJS_RELEASE%%
8 | ENV PKG_RELEASE %%PKG_RELEASE%%
9 | ENV DYNPKG_RELEASE %%DYNPKG_RELEASE%%
10 |
11 | RUN set -x \
12 | # create nginx user/group first, to be consistent throughout docker variants
13 | && groupadd --system --gid 101 nginx \
14 | && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
15 | && apt-get update \
16 | && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \
17 | && \
18 | NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \
19 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
20 | export GNUPGHOME="$(mktemp -d)"; \
21 | found=''; \
22 | for NGINX_GPGKEY in $NGINX_GPGKEYS; do \
23 | for server in \
24 | hkp://keyserver.ubuntu.com:80 \
25 | pgp.mit.edu \
26 | ; do \
27 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \
28 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \
29 | done; \
30 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \
31 | done; \
32 | gpg1 --export "$NGINX_GPGKEYS" > "$NGINX_GPGKEY_PATH" ; \
33 | rm -rf "$GNUPGHOME"; \
34 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \
35 | && dpkgArch="$(dpkg --print-architecture)" \
36 | && nginxPackages="%%PACKAGES%%
37 | " \
38 | && case "$dpkgArch" in \
39 | amd64|arm64) \
40 | # arches officialy built by upstream
41 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \
42 | && apt-get update \
43 | ;; \
44 | *) \
45 | # we're on an architecture upstream doesn't officially build for
46 | # let's build binaries from the published packaging sources
47 | # new directory for storing sources and .deb files
48 | tempDir="$(mktemp -d)" \
49 | && chmod 777 "$tempDir" \
50 | # (777 to ensure APT's "_apt" user can access it too)
51 | \
52 | # save list of currently-installed packages so build dependencies can be cleanly removed later
53 | && savedAptMark="$(apt-mark showmanual)" \
54 | \
55 | # build .deb files from upstream's packaging sources
56 | && apt-get update \
57 | && apt-get install --no-install-recommends --no-install-suggests -y \
58 | curl \
59 | devscripts \
60 | equivs \
61 | git \
62 | libxml2-utils \
63 | lsb-release \
64 | xsltproc \
65 | && ( \
66 | cd "$tempDir" \
67 | && REVISION="%%REVISION%%" \
68 | && REVISION=${REVISION%~*} \
69 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
70 | && PKGOSSCHECKSUM="%%PKGOSSCHECKSUM%% *${REVISION}.tar.gz" \
71 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
72 | echo "pkg-oss tarball checksum verification succeeded!"; \
73 | else \
74 | echo "pkg-oss tarball checksum verification failed!"; \
75 | exit 1; \
76 | fi \
77 | && tar xzvf ${REVISION}.tar.gz \
78 | && cd pkg-oss-${REVISION} \
79 | && cd debian \
80 | && for target in %%BUILDTARGET%%; do \
81 | make rules-$target; \
82 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
83 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \
84 | done \
85 | && make %%BUILDTARGET%% \
86 | ) \
87 | # we don't remove APT lists here because they get re-downloaded and removed later
88 | \
89 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
90 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
91 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
92 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
93 | \
94 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
95 | && ls -lAFh "$tempDir" \
96 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
97 | && grep '^Package: ' "$tempDir/Packages" \
98 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
99 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
100 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
101 | # ...
102 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
103 | && apt-get -o Acquire::GzipIndexes=false update \
104 | ;; \
105 | esac \
106 | \
107 | && apt-get install --no-install-recommends --no-install-suggests -y \
108 | $nginxPackages \
109 | gettext-base \
110 | curl \
111 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
112 | \
113 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
114 | && if [ -n "$tempDir" ]; then \
115 | apt-get purge -y --auto-remove \
116 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
117 | fi \
118 | # forward request and error logs to docker log collector
119 | && ln -sf /dev/stdout /var/log/nginx/access.log \
120 | && ln -sf /dev/stderr /var/log/nginx/error.log \
121 | # create a docker-entrypoint.d directory
122 | && mkdir /docker-entrypoint.d
123 |
124 | COPY docker-entrypoint.sh /
125 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
126 | COPY 15-local-resolvers.envsh /docker-entrypoint.d
127 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
128 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d
129 | ENTRYPOINT ["/docker-entrypoint.sh"]
130 |
131 | EXPOSE 80
132 |
133 | STOPSIGNAL SIGQUIT
134 |
135 | CMD ["nginx", "-g", "daemon off;"]
136 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (C) 2011-2023 F5, Inc.
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions
6 | are met:
7 | 1. Redistributions of source code must retain the above copyright
8 | notice, this list of conditions and the following disclaimer.
9 | 2. Redistributions in binary form must reproduce the above copyright
10 | notice, this list of conditions and the following disclaimer in the
11 | documentation and/or other materials provided with the distribution.
12 |
13 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 | ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 | OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 | OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 | SUCH DAMAGE.
24 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://www.repostatus.org/#active)
2 | [](https://github.com/nginx/docker-nginx/blob/master/SUPPORT.md)
3 | [](https://community.nginx.org)
4 | [](https://opensource.org/license/bsd-2-clause)
5 | [](/CODE_OF_CONDUCT.md)
6 |
7 | # About this Repo
8 |
9 | ## Maintained by: [the NGINX Docker Maintainers](https://github.com/nginx/docker-nginx)
10 |
11 | This is the Git repo of the [Docker "Official Image"](https://github.com/docker-library/official-images#what-are-official-images) for [`nginx`](https://hub.docker.com/_/nginx/). See [the Docker Hub page](https://hub.docker.com/_/nginx/) for the full readme on how to use this Docker image and for information regarding contributing and issues.
12 |
13 | The [full image description on Docker Hub](https://hub.docker.com/_/nginx/) is generated/maintained over in [the docker-library/docs repository](https://github.com/docker-library/docs), specifically in [the `nginx` directory](https://github.com/docker-library/docs/tree/master/nginx).
14 |
15 | The changelog for NGINX releases is available at [nginx.org changes page](https://nginx.org/en/CHANGES).
16 |
17 | ## See a change merged here that doesn't show up on Docker Hub yet?
18 |
19 | For more information about the full official images change lifecycle, see [the "An image's source changed in Git, now what?" FAQ entry](https://github.com/docker-library/faq#an-images-source-changed-in-git-now-what).
20 |
21 | For outstanding `nginx` image PRs, check [PRs with the "library/nginx" label on the official-images repository](https://github.com/docker-library/official-images/labels/library%2Fnginx). For the current "source of truth" for [`nginx`](https://hub.docker.com/_/nginx/), see [the `library/nginx` file in the official-images repository](https://github.com/docker-library/official-images/blob/master/library/nginx).
22 |
23 | ## Contributing
24 |
25 | Please see the [contributing guide](/CONTRIBUTING.md) for guidelines on how to best contribute to this project.
26 |
27 | ## License
28 |
29 | [BSD 2-Clause](/LICENSE)
30 |
31 | © [F5, Inc.](https://www.f5.com/) 2014-2025
32 |
33 | ---
34 |
35 | - [](https://github.com/nginx/docker-nginx/actions?query=workflow%3A%22GitHub+CI%22+branch%3Amaster)
36 |
37 | | Build | Status | Badges | (per-arch) |
38 | |:-:|:-:|:-:|:-:|
39 | | [](https://doi-janky.infosiftr.net/job/multiarch/job/amd64/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v5/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v6/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v7/job/nginx/) |
40 | | [](https://doi-janky.infosiftr.net/job/multiarch/job/arm64v8/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/i386/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/mips64le/job/nginx/) | [](https://doi-janky.infosiftr.net/job/multiarch/job/ppc64le/job/nginx/) |
41 | | [](https://doi-janky.infosiftr.net/job/multiarch/job/s390x/job/nginx/) | [](https://doi-janky.infosiftr.net/job/put-shared/job/light/job/nginx/) |
42 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Latest Versions
4 |
5 | We advise users to run or update to the most recent release of the NGINX Docker image. Older versions of the NGINX Docker image may not have all enhancements and/or bug fixes applied to them.
6 |
7 | ## Reporting a Vulnerability
8 |
9 | The F5 Security Incident Response Team (F5 SIRT) offers two methods to easily report potential security vulnerabilities:
10 |
11 | - If you’re an F5 customer with an active support contract, please contact [F5 Technical Support](https://www.f5.com/support).
12 | - If you aren’t an F5 customer, please report any potential or current instances of security vulnerabilities in any F5 product to the F5 Security Incident Response Team at .
13 |
14 | For more information, please read the F5 SIRT vulnerability reporting guidelines available at [https://www.f5.com/support/report-a-vulnerability](https://www.f5.com/support/report-a-vulnerability).
15 |
--------------------------------------------------------------------------------
/SUPPORT.md:
--------------------------------------------------------------------------------
1 | # Support
2 |
3 | ## Ask a Question
4 |
5 | We use GitHub for tracking bugs and feature requests related to this project.
6 |
7 | Don't know how something in this project works? Curious if this project can achieve your desired functionality? Please open an issue on GitHub with the label `question`. Alternatively, start a GitHub discussion!
8 |
9 | ## NGINX Specific Questions and/or Issues
10 |
11 | This isn't the right place to get support for NGINX specific questions, but the following resources are available below. Thanks for your understanding!
12 |
13 | ### Community Forum
14 |
15 | We have a community [forum](https://community.nginx.org/)! If you have any questions and/or issues, try checking out the [`Troubleshooting`](https://community.nginx.org/c/troubleshooting/8) and [`How do I...?`](https://community.nginx.org/c/how-do-i/9) categories. Both fellow community members and NGINXers might be able to help you! :)
16 |
17 | ### Documentation
18 |
19 | For a comprehensive list of all NGINX directives, check out .
20 |
21 | For a comprehensive list of administration and deployment guides for all NGINX products, check out .
22 |
23 | ### Mailing List
24 |
25 | Want to get in touch with the NGINX development team directly? Try using the relevant mailing list found at !
26 |
27 | ## Contributing
28 |
29 | Please see the [contributing guide](/CONTRIBUTING.md) for guidelines on how to best contribute to this project.
30 |
31 | ## Commercial Support
32 |
33 | Commercial support for this project may be available. Please get in touch with [NGINX sales](https://www.f5.com/products/get-f5/) or check your contract details for more information!
34 |
35 | ## Community Support
36 |
37 | Community support is offered on a best effort basis through either GitHub issues/PRs/discussions or through any of our active communities.
38 |
--------------------------------------------------------------------------------
/entrypoint/10-listen-on-ipv6-by-default.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | ME=$(basename "$0")
13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
14 |
15 | # check if we have ipv6 available
16 | if [ ! -f "/proc/net/if_inet6" ]; then
17 | entrypoint_log "$ME: info: ipv6 not available"
18 | exit 0
19 | fi
20 |
21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
23 | exit 0
24 | fi
25 |
26 | # check if the file can be modified, e.g. not on a r/o filesystem
27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
28 |
29 | # check if the file is already modified, e.g. on a container restart
30 | grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
31 |
32 | if [ -f "/etc/os-release" ]; then
33 | . /etc/os-release
34 | else
35 | entrypoint_log "$ME: info: can not guess the operating system"
36 | exit 0
37 | fi
38 |
39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
40 |
41 | case "$ID" in
42 | "debian")
43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
46 | exit 0
47 | }
48 | ;;
49 | "alpine")
50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
53 | exit 0
54 | }
55 | ;;
56 | *)
57 | entrypoint_log "$ME: info: Unsupported distribution"
58 | exit 0
59 | ;;
60 | esac
61 |
62 | # enable ipv6 on default.conf listen sockets
63 | sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
64 |
65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
66 |
67 | exit 0
68 |
--------------------------------------------------------------------------------
/entrypoint/15-local-resolvers.envsh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
8 |
9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
10 |
11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
12 |
13 | NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
14 |
15 | export NGINX_LOCAL_RESOLVERS
16 |
--------------------------------------------------------------------------------
/entrypoint/20-envsubst-on-templates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | ME=$(basename "$0")
6 |
7 | entrypoint_log() {
8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
9 | echo "$@"
10 | fi
11 | }
12 |
13 | add_stream_block() {
14 | local conffile="/etc/nginx/nginx.conf"
15 |
16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then
17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
18 | else
19 | # check if the file can be modified, e.g. not on a r/o filesystem
20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
22 | cat << END >> "$conffile"
23 | # added by "$ME" on "$(date)"
24 | stream {
25 | include $stream_output_dir/*.conf;
26 | }
27 | END
28 | fi
29 | }
30 |
31 | auto_envsubst() {
32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
37 | local filter="${NGINX_ENVSUBST_FILTER:-}"
38 |
39 | local template defined_envs relative_path output_path subdir
40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
41 | [ -d "$template_dir" ] || return 0
42 | if [ ! -w "$output_dir" ]; then
43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
44 | return 0
45 | fi
46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
47 | relative_path="${template#"$template_dir/"}"
48 | output_path="$output_dir/${relative_path%"$suffix"}"
49 | subdir=$(dirname "$relative_path")
50 | # create a subdirectory where the template file exists
51 | mkdir -p "$output_dir/$subdir"
52 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
53 | envsubst "$defined_envs" < "$template" > "$output_path"
54 | done
55 |
56 | # Print the first file with the stream suffix, this will be false if there are none
57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
58 | mkdir -p "$stream_output_dir"
59 | if [ ! -w "$stream_output_dir" ]; then
60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
61 | return 0
62 | fi
63 | add_stream_block
64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
65 | relative_path="${template#"$template_dir/"}"
66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
67 | subdir=$(dirname "$relative_path")
68 | # create a subdirectory where the template file exists
69 | mkdir -p "$stream_output_dir/$subdir"
70 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
71 | envsubst "$defined_envs" < "$template" > "$output_path"
72 | done
73 | fi
74 | }
75 |
76 | auto_envsubst
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/entrypoint/30-tune-worker-processes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | ME=$(basename "$0")
8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
9 |
10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
11 |
12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
13 |
14 | ceildiv() {
15 | num=$1
16 | div=$2
17 | echo $(( (num + div - 1) / div ))
18 | }
19 |
20 | get_cpuset() {
21 | cpusetroot=$1
22 | cpusetfile=$2
23 | ncpu=0
24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1
25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
26 | case "$token" in
27 | *-*)
28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
29 | ncpu=$(( ncpu+count ))
30 | ;;
31 | *)
32 | ncpu=$(( ncpu+1 ))
33 | ;;
34 | esac
35 | done
36 | echo "$ncpu"
37 | }
38 |
39 | get_quota() {
40 | cpuroot=$1
41 | ncpu=0
42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
46 | [ "$cfs_quota" = "-1" ] && return 1
47 | [ "$cfs_period" = "0" ] && return 1
48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
49 | [ "$ncpu" -gt 0 ] || return 1
50 | echo "$ncpu"
51 | }
52 |
53 | get_quota_v2() {
54 | cpuroot=$1
55 | ncpu=0
56 | [ -f "$cpuroot/cpu.max" ] || return 1
57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
59 | [ "$cfs_quota" = "max" ] && return 1
60 | [ "$cfs_period" = "0" ] && return 1
61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
62 | [ "$ncpu" -gt 0 ] || return 1
63 | echo "$ncpu"
64 | }
65 |
66 | get_cgroup_v1_path() {
67 | needle=$1
68 | found=
69 | foundroot=
70 | mountpoint=
71 |
72 | [ -r "/proc/self/mountinfo" ] || return 1
73 | [ -r "/proc/self/cgroup" ] || return 1
74 |
75 | while IFS= read -r line; do
76 | case "$needle" in
77 | "cpuset")
78 | case "$line" in
79 | *cpuset*)
80 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
81 | break
82 | ;;
83 | esac
84 | ;;
85 | "cpu")
86 | case "$line" in
87 | *cpuset*)
88 | ;;
89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
90 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
91 | break
92 | ;;
93 | esac
94 | esac
95 | done << __EOF__
96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo )
97 | __EOF__
98 |
99 | while IFS= read -r line; do
100 | controller=$( echo "$line" | cut -d: -f 2 )
101 | case "$needle" in
102 | "cpuset")
103 | case "$controller" in
104 | cpuset)
105 | mountpoint=$( echo "$line" | cut -d: -f 3 )
106 | break
107 | ;;
108 | esac
109 | ;;
110 | "cpu")
111 | case "$controller" in
112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
113 | mountpoint=$( echo "$line" | cut -d: -f 3 )
114 | break
115 | ;;
116 | esac
117 | ;;
118 | esac
119 | done << __EOF__
120 | $( grep -F -- 'cpu' /proc/self/cgroup )
121 | __EOF__
122 |
123 | case "${found%% *}" in
124 | "/")
125 | foundroot="${found##* }$mountpoint"
126 | ;;
127 | "$mountpoint")
128 | foundroot="${found##* }"
129 | ;;
130 | esac
131 | echo "$foundroot"
132 | }
133 |
134 | get_cgroup_v2_path() {
135 | found=
136 | foundroot=
137 | mountpoint=
138 |
139 | [ -r "/proc/self/mountinfo" ] || return 1
140 | [ -r "/proc/self/cgroup" ] || return 1
141 |
142 | while IFS= read -r line; do
143 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
144 | done << __EOF__
145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
146 | __EOF__
147 |
148 | while IFS= read -r line; do
149 | mountpoint=$( echo "$line" | cut -d: -f 3 )
150 | done << __EOF__
151 | $( grep -F -- '0::' /proc/self/cgroup )
152 | __EOF__
153 |
154 | case "${found%% *}" in
155 | "")
156 | return 1
157 | ;;
158 | "/")
159 | foundroot="${found##* }$mountpoint"
160 | ;;
161 | "$mountpoint" | /../*)
162 | foundroot="${found##* }"
163 | ;;
164 | esac
165 | echo "$foundroot"
166 | }
167 |
168 | ncpu_online=$( getconf _NPROCESSORS_ONLN )
169 | ncpu_cpuset=
170 | ncpu_quota=
171 | ncpu_cpuset_v2=
172 | ncpu_quota_v2=
173 |
174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
178 |
179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
180 | "$ncpu_online" \
181 | "$ncpu_cpuset" \
182 | "$ncpu_quota" \
183 | "$ncpu_cpuset_v2" \
184 | "$ncpu_quota_v2" \
185 | | sort -n \
186 | | head -n 1 )
187 |
188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
189 |
--------------------------------------------------------------------------------
/entrypoint/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
15 |
16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
18 | case "$f" in
19 | *.envsh)
20 | if [ -x "$f" ]; then
21 | entrypoint_log "$0: Sourcing $f";
22 | . "$f"
23 | else
24 | # warn on shell scripts without exec bit
25 | entrypoint_log "$0: Ignoring $f, not executable";
26 | fi
27 | ;;
28 | *.sh)
29 | if [ -x "$f" ]; then
30 | entrypoint_log "$0: Launching $f";
31 | "$f"
32 | else
33 | # warn on shell scripts without exec bit
34 | entrypoint_log "$0: Ignoring $f, not executable";
35 | fi
36 | ;;
37 | *) entrypoint_log "$0: Ignoring $f";;
38 | esac
39 | done
40 |
41 | entrypoint_log "$0: Configuration complete; ready for start up"
42 | else
43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
44 | fi
45 | fi
46 |
47 | exec "$@"
48 |
--------------------------------------------------------------------------------
/generate-stackbrew-library.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eu
3 |
4 | declare -A aliases
5 | aliases=(
6 | [mainline]='1 1.27 latest'
7 | [stable]='1.28'
8 | )
9 |
10 | self="$(basename "$BASH_SOURCE")"
11 | cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
12 | base=debian
13 |
14 | versions=( mainline stable )
15 |
16 | # get the most recent commit which modified any of "$@"
17 | fileCommit() {
18 | git log -1 --format='format:%H' HEAD -- "$@"
19 | }
20 |
21 | # get the most recent commit which modified "$1/Dockerfile" or any file COPY'd from "$1/Dockerfile"
22 | dirCommit() {
23 | local dir="$1"; shift
24 | (
25 | cd "$dir"
26 | fileCommit \
27 | Dockerfile \
28 | $(git show HEAD:./Dockerfile | awk '
29 | toupper($1) == "COPY" {
30 | for (i = 2; i < NF; i++) {
31 | print $i
32 | }
33 | }
34 | ')
35 | )
36 | }
37 |
38 | cat <<-EOH
39 | # this file is generated via https://github.com/nginx/docker-nginx/blob/$(fileCommit "$self")/$self
40 |
41 | Maintainers: NGINX Docker Maintainers (@nginx)
42 | GitRepo: https://github.com/nginx/docker-nginx.git
43 | EOH
44 |
45 | # prints "$2$1$3$1...$N"
46 | join() {
47 | local sep="$1"; shift
48 | local out; printf -v out "${sep//%/%%}%s" "$@"
49 | echo "${out#$sep}"
50 | }
51 |
52 | for version in "${versions[@]}"; do
53 | debian_otel="debian-otel"
54 | alpine_otel="alpine-otel"
55 | commit="$(dirCommit "$version/$base")"
56 |
57 | fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
58 |
59 | versionAliases=( $fullVersion )
60 | if [ "$version" != "$fullVersion" ]; then
61 | versionAliases+=( $version )
62 | fi
63 | versionAliases+=( ${aliases[$version]:-} )
64 |
65 | debianVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk -F"[-:]" '$1 == "FROM debian" { print $2; exit }')"
66 | debianAliases=( ${versionAliases[@]/%/-$debianVersion} )
67 | debianAliases=( "${debianAliases[@]//latest-/}" )
68 |
69 | echo
70 | cat <<-EOE
71 | Tags: $(join ', ' "${versionAliases[@]}"), $(join ', ' "${debianAliases[@]}")
72 | Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x
73 | GitCommit: $commit
74 | Directory: $version/$base
75 | EOE
76 |
77 | for variant in debian-perl; do
78 | commit="$(dirCommit "$version/$variant")"
79 |
80 | variantAliases=( "${versionAliases[@]/%/-perl}" )
81 | variantAliases+=( "${versionAliases[@]/%/-${variant/debian/$debianVersion}}" )
82 | variantAliases=( "${variantAliases[@]//latest-/}" )
83 |
84 | echo
85 | cat <<-EOE
86 | Tags: $(join ', ' "${variantAliases[@]}")
87 | Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x
88 | GitCommit: $commit
89 | Directory: $version/$variant
90 | EOE
91 | done
92 |
93 | for variant in $debian_otel; do
94 | commit="$(dirCommit "$version/$variant")"
95 |
96 | variantAliases=( "${versionAliases[@]/%/-otel}" )
97 | variantAliases+=( "${versionAliases[@]/%/-${variant/debian/$debianVersion}}" )
98 | variantAliases=( "${variantAliases[@]//latest-/}" )
99 |
100 | echo
101 | cat <<-EOE
102 | Tags: $(join ', ' "${variantAliases[@]}")
103 | Architectures: amd64, arm64v8
104 | GitCommit: $commit
105 | Directory: $version/$variant
106 | EOE
107 | done
108 |
109 |
110 | commit="$(dirCommit "$version/alpine-slim")"
111 | alpineVersion="$(git show "$commit":"$version/alpine-slim/Dockerfile" | awk -F: '$1 == "FROM alpine" { print $2; exit }')"
112 |
113 | for variant in alpine alpine-perl alpine-slim; do
114 | commit="$(dirCommit "$version/$variant")"
115 |
116 | variantAliases=( "${versionAliases[@]/%/-$variant}" )
117 | variantAliases+=( "${versionAliases[@]/%/-${variant/alpine/alpine$alpineVersion}}" )
118 | variantAliases=( "${variantAliases[@]//latest-/}" )
119 |
120 | echo
121 | cat <<-EOE
122 | Tags: $(join ', ' "${variantAliases[@]}")
123 | Architectures: arm64v8, arm32v6, arm32v7, ppc64le, s390x, i386, amd64, riscv64
124 | GitCommit: $commit
125 | Directory: $version/$variant
126 | EOE
127 | done
128 |
129 | for variant in $alpine_otel; do
130 | commit="$(dirCommit "$version/$variant")"
131 |
132 | variantAliases=( "${versionAliases[@]/%/-$variant}" )
133 | variantAliases+=( "${versionAliases[@]/%/-${variant/alpine/alpine$alpineVersion}}" )
134 | variantAliases=( "${variantAliases[@]//latest-/}" )
135 |
136 | echo
137 | cat <<-EOE
138 | Tags: $(join ', ' "${variantAliases[@]}")
139 | Architectures: amd64, arm64v8
140 | GitCommit: $commit
141 | Directory: $version/$variant
142 | EOE
143 | done
144 |
145 | done
146 |
--------------------------------------------------------------------------------
/mainline/alpine-otel/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.27.5-alpine
7 |
8 | ENV OTEL_VERSION 0.1.2
9 |
10 | RUN set -x \
11 | && apkArch="$(cat /etc/apk/arch)" \
12 | && nginxPackages=" \
13 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
14 | nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
15 | nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
16 | nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
17 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
18 | nginx-module-otel=${NGINX_VERSION}.${OTEL_VERSION}-r${PKG_RELEASE} \
19 | " \
20 | # install prerequisites for public key and pkg-oss checks
21 | && apk add --no-cache --virtual .checksum-deps \
22 | openssl \
23 | && case "$apkArch" in \
24 | x86_64|aarch64) \
25 | # arches officially built by upstream
26 | apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
27 | ;; \
28 | *) \
29 | # we're on an architecture upstream doesn't officially build for
30 | # let's build binaries from the published packaging sources
31 | set -x \
32 | && tempDir="$(mktemp -d)" \
33 | && chown nobody:nobody $tempDir \
34 | && apk add --no-cache --virtual .build-deps \
35 | gcc \
36 | libc-dev \
37 | make \
38 | openssl-dev \
39 | pcre2-dev \
40 | zlib-dev \
41 | linux-headers \
42 | cmake \
43 | bash \
44 | alpine-sdk \
45 | findutils \
46 | curl \
47 | xz \
48 | protobuf-dev \
49 | grpc-dev \
50 | && su nobody -s /bin/sh -c " \
51 | export HOME=${tempDir} \
52 | && cd ${tempDir} \
53 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
54 | && PKGOSSCHECKSUM=\"c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
55 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
56 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
57 | else \
58 | echo \"pkg-oss tarball checksum verification failed!\"; \
59 | exit 1; \
60 | fi \
61 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
62 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
63 | && cd alpine \
64 | && make module-otel \
65 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
66 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
67 | " \
68 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
69 | && apk del --no-network .build-deps \
70 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
71 | ;; \
72 | esac \
73 | # remove checksum deps
74 | && apk del --no-network .checksum-deps \
75 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
76 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
77 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
78 |
--------------------------------------------------------------------------------
/mainline/alpine-perl/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.27.5-alpine
7 |
8 | RUN set -x \
9 | && apkArch="$(cat /etc/apk/arch)" \
10 | && nginxPackages=" \
11 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
12 | nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
13 | nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
14 | nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
15 | nginx-module-perl=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
16 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
17 | " \
18 | # install prerequisites for public key and pkg-oss checks
19 | && apk add --no-cache --virtual .checksum-deps \
20 | openssl \
21 | && case "$apkArch" in \
22 | x86_64|aarch64) \
23 | # arches officially built by upstream
24 | apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
25 | ;; \
26 | *) \
27 | # we're on an architecture upstream doesn't officially build for
28 | # let's build binaries from the published packaging sources
29 | set -x \
30 | && tempDir="$(mktemp -d)" \
31 | && chown nobody:nobody $tempDir \
32 | && apk add --no-cache --virtual .build-deps \
33 | gcc \
34 | libc-dev \
35 | make \
36 | openssl-dev \
37 | pcre2-dev \
38 | zlib-dev \
39 | linux-headers \
40 | perl-dev \
41 | bash \
42 | alpine-sdk \
43 | findutils \
44 | curl \
45 | && su nobody -s /bin/sh -c " \
46 | export HOME=${tempDir} \
47 | && cd ${tempDir} \
48 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
49 | && PKGOSSCHECKSUM=\"c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
50 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
51 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
52 | else \
53 | echo \"pkg-oss tarball checksum verification failed!\"; \
54 | exit 1; \
55 | fi \
56 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
57 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
58 | && cd alpine \
59 | && make module-perl \
60 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
61 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
62 | " \
63 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
64 | && apk del --no-network .build-deps \
65 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
66 | ;; \
67 | esac \
68 | # remove checksum deps
69 | && apk del --no-network .checksum-deps \
70 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
71 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
72 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
73 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/10-listen-on-ipv6-by-default.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | ME=$(basename "$0")
13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
14 |
15 | # check if we have ipv6 available
16 | if [ ! -f "/proc/net/if_inet6" ]; then
17 | entrypoint_log "$ME: info: ipv6 not available"
18 | exit 0
19 | fi
20 |
21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
23 | exit 0
24 | fi
25 |
26 | # check if the file can be modified, e.g. not on a r/o filesystem
27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
28 |
29 | # check if the file is already modified, e.g. on a container restart
30 | grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
31 |
32 | if [ -f "/etc/os-release" ]; then
33 | . /etc/os-release
34 | else
35 | entrypoint_log "$ME: info: can not guess the operating system"
36 | exit 0
37 | fi
38 |
39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
40 |
41 | case "$ID" in
42 | "debian")
43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
46 | exit 0
47 | }
48 | ;;
49 | "alpine")
50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
53 | exit 0
54 | }
55 | ;;
56 | *)
57 | entrypoint_log "$ME: info: Unsupported distribution"
58 | exit 0
59 | ;;
60 | esac
61 |
62 | # enable ipv6 on default.conf listen sockets
63 | sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
64 |
65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
66 |
67 | exit 0
68 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/15-local-resolvers.envsh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
8 |
9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
10 |
11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
12 |
13 | NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
14 |
15 | export NGINX_LOCAL_RESOLVERS
16 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/20-envsubst-on-templates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | ME=$(basename "$0")
6 |
7 | entrypoint_log() {
8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
9 | echo "$@"
10 | fi
11 | }
12 |
13 | add_stream_block() {
14 | local conffile="/etc/nginx/nginx.conf"
15 |
16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then
17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
18 | else
19 | # check if the file can be modified, e.g. not on a r/o filesystem
20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
22 | cat << END >> "$conffile"
23 | # added by "$ME" on "$(date)"
24 | stream {
25 | include $stream_output_dir/*.conf;
26 | }
27 | END
28 | fi
29 | }
30 |
31 | auto_envsubst() {
32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
37 | local filter="${NGINX_ENVSUBST_FILTER:-}"
38 |
39 | local template defined_envs relative_path output_path subdir
40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
41 | [ -d "$template_dir" ] || return 0
42 | if [ ! -w "$output_dir" ]; then
43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
44 | return 0
45 | fi
46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
47 | relative_path="${template#"$template_dir/"}"
48 | output_path="$output_dir/${relative_path%"$suffix"}"
49 | subdir=$(dirname "$relative_path")
50 | # create a subdirectory where the template file exists
51 | mkdir -p "$output_dir/$subdir"
52 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
53 | envsubst "$defined_envs" < "$template" > "$output_path"
54 | done
55 |
56 | # Print the first file with the stream suffix, this will be false if there are none
57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
58 | mkdir -p "$stream_output_dir"
59 | if [ ! -w "$stream_output_dir" ]; then
60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
61 | return 0
62 | fi
63 | add_stream_block
64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
65 | relative_path="${template#"$template_dir/"}"
66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
67 | subdir=$(dirname "$relative_path")
68 | # create a subdirectory where the template file exists
69 | mkdir -p "$stream_output_dir/$subdir"
70 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
71 | envsubst "$defined_envs" < "$template" > "$output_path"
72 | done
73 | fi
74 | }
75 |
76 | auto_envsubst
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/30-tune-worker-processes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | ME=$(basename "$0")
8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
9 |
10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
11 |
12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
13 |
14 | ceildiv() {
15 | num=$1
16 | div=$2
17 | echo $(( (num + div - 1) / div ))
18 | }
19 |
20 | get_cpuset() {
21 | cpusetroot=$1
22 | cpusetfile=$2
23 | ncpu=0
24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1
25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
26 | case "$token" in
27 | *-*)
28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
29 | ncpu=$(( ncpu+count ))
30 | ;;
31 | *)
32 | ncpu=$(( ncpu+1 ))
33 | ;;
34 | esac
35 | done
36 | echo "$ncpu"
37 | }
38 |
39 | get_quota() {
40 | cpuroot=$1
41 | ncpu=0
42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
46 | [ "$cfs_quota" = "-1" ] && return 1
47 | [ "$cfs_period" = "0" ] && return 1
48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
49 | [ "$ncpu" -gt 0 ] || return 1
50 | echo "$ncpu"
51 | }
52 |
53 | get_quota_v2() {
54 | cpuroot=$1
55 | ncpu=0
56 | [ -f "$cpuroot/cpu.max" ] || return 1
57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
59 | [ "$cfs_quota" = "max" ] && return 1
60 | [ "$cfs_period" = "0" ] && return 1
61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
62 | [ "$ncpu" -gt 0 ] || return 1
63 | echo "$ncpu"
64 | }
65 |
66 | get_cgroup_v1_path() {
67 | needle=$1
68 | found=
69 | foundroot=
70 | mountpoint=
71 |
72 | [ -r "/proc/self/mountinfo" ] || return 1
73 | [ -r "/proc/self/cgroup" ] || return 1
74 |
75 | while IFS= read -r line; do
76 | case "$needle" in
77 | "cpuset")
78 | case "$line" in
79 | *cpuset*)
80 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
81 | break
82 | ;;
83 | esac
84 | ;;
85 | "cpu")
86 | case "$line" in
87 | *cpuset*)
88 | ;;
89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
90 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
91 | break
92 | ;;
93 | esac
94 | esac
95 | done << __EOF__
96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo )
97 | __EOF__
98 |
99 | while IFS= read -r line; do
100 | controller=$( echo "$line" | cut -d: -f 2 )
101 | case "$needle" in
102 | "cpuset")
103 | case "$controller" in
104 | cpuset)
105 | mountpoint=$( echo "$line" | cut -d: -f 3 )
106 | break
107 | ;;
108 | esac
109 | ;;
110 | "cpu")
111 | case "$controller" in
112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
113 | mountpoint=$( echo "$line" | cut -d: -f 3 )
114 | break
115 | ;;
116 | esac
117 | ;;
118 | esac
119 | done << __EOF__
120 | $( grep -F -- 'cpu' /proc/self/cgroup )
121 | __EOF__
122 |
123 | case "${found%% *}" in
124 | "/")
125 | foundroot="${found##* }$mountpoint"
126 | ;;
127 | "$mountpoint")
128 | foundroot="${found##* }"
129 | ;;
130 | esac
131 | echo "$foundroot"
132 | }
133 |
134 | get_cgroup_v2_path() {
135 | found=
136 | foundroot=
137 | mountpoint=
138 |
139 | [ -r "/proc/self/mountinfo" ] || return 1
140 | [ -r "/proc/self/cgroup" ] || return 1
141 |
142 | while IFS= read -r line; do
143 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
144 | done << __EOF__
145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
146 | __EOF__
147 |
148 | while IFS= read -r line; do
149 | mountpoint=$( echo "$line" | cut -d: -f 3 )
150 | done << __EOF__
151 | $( grep -F -- '0::' /proc/self/cgroup )
152 | __EOF__
153 |
154 | case "${found%% *}" in
155 | "")
156 | return 1
157 | ;;
158 | "/")
159 | foundroot="${found##* }$mountpoint"
160 | ;;
161 | "$mountpoint" | /../*)
162 | foundroot="${found##* }"
163 | ;;
164 | esac
165 | echo "$foundroot"
166 | }
167 |
168 | ncpu_online=$( getconf _NPROCESSORS_ONLN )
169 | ncpu_cpuset=
170 | ncpu_quota=
171 | ncpu_cpuset_v2=
172 | ncpu_quota_v2=
173 |
174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
178 |
179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
180 | "$ncpu_online" \
181 | "$ncpu_cpuset" \
182 | "$ncpu_quota" \
183 | "$ncpu_cpuset_v2" \
184 | "$ncpu_quota_v2" \
185 | | sort -n \
186 | | head -n 1 )
187 |
188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
189 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM alpine:3.21
7 |
8 | LABEL maintainer="NGINX Docker Maintainers "
9 |
10 | ENV NGINX_VERSION 1.27.5
11 | ENV PKG_RELEASE 1
12 | ENV DYNPKG_RELEASE 1
13 |
14 | RUN set -x \
15 | # create nginx user/group first, to be consistent throughout docker variants
16 | && addgroup -g 101 -S nginx \
17 | && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
18 | && apkArch="$(cat /etc/apk/arch)" \
19 | && nginxPackages=" \
20 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
21 | " \
22 | # install prerequisites for public key and pkg-oss checks
23 | && apk add --no-cache --virtual .checksum-deps \
24 | openssl \
25 | && case "$apkArch" in \
26 | x86_64|aarch64) \
27 | # arches officially built by upstream
28 | set -x \
29 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
30 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
31 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
32 | echo "key verification succeeded!"; \
33 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
34 | else \
35 | echo "key verification failed!"; \
36 | exit 1; \
37 | fi \
38 | && apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
39 | ;; \
40 | *) \
41 | # we're on an architecture upstream doesn't officially build for
42 | # let's build binaries from the published packaging sources
43 | set -x \
44 | && tempDir="$(mktemp -d)" \
45 | && chown nobody:nobody $tempDir \
46 | && apk add --no-cache --virtual .build-deps \
47 | gcc \
48 | libc-dev \
49 | make \
50 | openssl-dev \
51 | pcre2-dev \
52 | zlib-dev \
53 | linux-headers \
54 | bash \
55 | alpine-sdk \
56 | findutils \
57 | curl \
58 | && su nobody -s /bin/sh -c " \
59 | export HOME=${tempDir} \
60 | && cd ${tempDir} \
61 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
62 | && PKGOSSCHECKSUM=\"c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
63 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
64 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
65 | else \
66 | echo \"pkg-oss tarball checksum verification failed!\"; \
67 | exit 1; \
68 | fi \
69 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
70 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
71 | && cd alpine \
72 | && make base \
73 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
74 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
75 | " \
76 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
77 | && apk del --no-network .build-deps \
78 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
79 | ;; \
80 | esac \
81 | # remove checksum deps
82 | && apk del --no-network .checksum-deps \
83 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
84 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
85 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
86 | # Add `envsubst` for templating environment variables
87 | && apk add --no-cache gettext-envsubst \
88 | # Bring in tzdata so users could set the timezones through the environment
89 | # variables
90 | && apk add --no-cache tzdata \
91 | # forward request and error logs to docker log collector
92 | && ln -sf /dev/stdout /var/log/nginx/access.log \
93 | && ln -sf /dev/stderr /var/log/nginx/error.log \
94 | # create a docker-entrypoint.d directory
95 | && mkdir /docker-entrypoint.d
96 |
97 | COPY docker-entrypoint.sh /
98 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
99 | COPY 15-local-resolvers.envsh /docker-entrypoint.d
100 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
101 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d
102 | ENTRYPOINT ["/docker-entrypoint.sh"]
103 |
104 | EXPOSE 80
105 |
106 | STOPSIGNAL SIGQUIT
107 |
108 | CMD ["nginx", "-g", "daemon off;"]
109 |
--------------------------------------------------------------------------------
/mainline/alpine-slim/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
15 |
16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
18 | case "$f" in
19 | *.envsh)
20 | if [ -x "$f" ]; then
21 | entrypoint_log "$0: Sourcing $f";
22 | . "$f"
23 | else
24 | # warn on shell scripts without exec bit
25 | entrypoint_log "$0: Ignoring $f, not executable";
26 | fi
27 | ;;
28 | *.sh)
29 | if [ -x "$f" ]; then
30 | entrypoint_log "$0: Launching $f";
31 | "$f"
32 | else
33 | # warn on shell scripts without exec bit
34 | entrypoint_log "$0: Ignoring $f, not executable";
35 | fi
36 | ;;
37 | *) entrypoint_log "$0: Ignoring $f";;
38 | esac
39 | done
40 |
41 | entrypoint_log "$0: Configuration complete; ready for start up"
42 | else
43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
44 | fi
45 | fi
46 |
47 | exec "$@"
48 |
--------------------------------------------------------------------------------
/mainline/alpine/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.27.5-alpine-slim
7 |
8 | ENV NJS_VERSION 0.8.10
9 | ENV NJS_RELEASE 1
10 |
11 | RUN set -x \
12 | && apkArch="$(cat /etc/apk/arch)" \
13 | && nginxPackages=" \
14 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
15 | nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
16 | nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
17 | nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
18 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
19 | " \
20 | # install prerequisites for public key and pkg-oss checks
21 | && apk add --no-cache --virtual .checksum-deps \
22 | openssl \
23 | && case "$apkArch" in \
24 | x86_64|aarch64) \
25 | # arches officially built by upstream
26 | apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
27 | ;; \
28 | *) \
29 | # we're on an architecture upstream doesn't officially build for
30 | # let's build binaries from the published packaging sources
31 | set -x \
32 | && tempDir="$(mktemp -d)" \
33 | && chown nobody:nobody $tempDir \
34 | && apk add --no-cache --virtual .build-deps \
35 | gcc \
36 | libc-dev \
37 | make \
38 | openssl-dev \
39 | pcre2-dev \
40 | zlib-dev \
41 | linux-headers \
42 | libxslt-dev \
43 | gd-dev \
44 | geoip-dev \
45 | libedit-dev \
46 | bash \
47 | alpine-sdk \
48 | findutils \
49 | curl \
50 | && su nobody -s /bin/sh -c " \
51 | export HOME=${tempDir} \
52 | && cd ${tempDir} \
53 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
54 | && PKGOSSCHECKSUM=\"c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
55 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
56 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
57 | else \
58 | echo \"pkg-oss tarball checksum verification failed!\"; \
59 | exit 1; \
60 | fi \
61 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
62 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
63 | && cd alpine \
64 | && make module-geoip module-image-filter module-njs module-xslt \
65 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
66 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
67 | " \
68 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
69 | && apk del --no-network .build-deps \
70 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
71 | ;; \
72 | esac \
73 | # remove checksum deps
74 | && apk del --no-network .checksum-deps \
75 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
76 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
77 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
78 | # Bring in curl and ca-certificates to make registering on DNS SD easier
79 | && apk add --no-cache curl ca-certificates
80 |
--------------------------------------------------------------------------------
/mainline/debian-otel/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.27.5
7 |
8 | ENV OTEL_VERSION 0.1.2
9 |
10 | RUN set -x; \
11 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
12 | dpkgArch="$(dpkg --print-architecture)" \
13 | && nginxPackages=" \
14 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \
15 | nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
16 | nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
17 | nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
18 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
19 | nginx-module-otel=${NGINX_VERSION}+${OTEL_VERSION}-${PKG_RELEASE} \
20 | " \
21 | && case "$dpkgArch" in \
22 | amd64|arm64) \
23 | # arches officialy built by upstream
24 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
25 | && apt-get update \
26 | ;; \
27 | *) \
28 | # we're on an architecture upstream doesn't officially build for
29 | # let's build binaries from the published packaging sources
30 | # new directory for storing sources and .deb files
31 | tempDir="$(mktemp -d)" \
32 | && chmod 777 "$tempDir" \
33 | # (777 to ensure APT's "_apt" user can access it too)
34 | \
35 | # save list of currently-installed packages so build dependencies can be cleanly removed later
36 | && savedAptMark="$(apt-mark showmanual)" \
37 | \
38 | # build .deb files from upstream's packaging sources
39 | && apt-get update \
40 | && apt-get install --no-install-recommends --no-install-suggests -y \
41 | curl \
42 | devscripts \
43 | equivs \
44 | git \
45 | libxml2-utils \
46 | lsb-release \
47 | xsltproc \
48 | && ( \
49 | cd "$tempDir" \
50 | && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \
51 | && REVISION=${REVISION%~*} \
52 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
53 | && PKGOSSCHECKSUM="c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745 *${REVISION}.tar.gz" \
54 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
55 | echo "pkg-oss tarball checksum verification succeeded!"; \
56 | else \
57 | echo "pkg-oss tarball checksum verification failed!"; \
58 | exit 1; \
59 | fi \
60 | && tar xzvf ${REVISION}.tar.gz \
61 | && cd pkg-oss-${REVISION} \
62 | && cd debian \
63 | && for target in module-otel; do \
64 | make rules-$target; \
65 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
66 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \
67 | done \
68 | && make module-otel \
69 | ) \
70 | # we don't remove APT lists here because they get re-downloaded and removed later
71 | \
72 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
73 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
74 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
75 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
76 | \
77 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
78 | && ls -lAFh "$tempDir" \
79 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
80 | && grep '^Package: ' "$tempDir/Packages" \
81 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
82 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
83 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
84 | # ...
85 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
86 | && apt-get -o Acquire::GzipIndexes=false update \
87 | ;; \
88 | esac \
89 | \
90 | && apt-get install --no-install-recommends --no-install-suggests -y \
91 | $nginxPackages \
92 | gettext-base \
93 | curl \
94 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
95 | \
96 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
97 | && if [ -n "$tempDir" ]; then \
98 | apt-get purge -y --auto-remove \
99 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
100 | fi
101 |
--------------------------------------------------------------------------------
/mainline/debian-perl/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.27.5
7 |
8 | RUN set -x; \
9 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
10 | dpkgArch="$(dpkg --print-architecture)" \
11 | && nginxPackages=" \
12 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \
13 | nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
14 | nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
15 | nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
16 | nginx-module-perl=${NGINX_VERSION}-${DYNPKG_RELEASE} \
17 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
18 | " \
19 | && case "$dpkgArch" in \
20 | amd64|arm64) \
21 | # arches officialy built by upstream
22 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
23 | && apt-get update \
24 | ;; \
25 | *) \
26 | # we're on an architecture upstream doesn't officially build for
27 | # let's build binaries from the published packaging sources
28 | # new directory for storing sources and .deb files
29 | tempDir="$(mktemp -d)" \
30 | && chmod 777 "$tempDir" \
31 | # (777 to ensure APT's "_apt" user can access it too)
32 | \
33 | # save list of currently-installed packages so build dependencies can be cleanly removed later
34 | && savedAptMark="$(apt-mark showmanual)" \
35 | \
36 | # build .deb files from upstream's packaging sources
37 | && apt-get update \
38 | && apt-get install --no-install-recommends --no-install-suggests -y \
39 | curl \
40 | devscripts \
41 | equivs \
42 | git \
43 | libxml2-utils \
44 | lsb-release \
45 | xsltproc \
46 | && ( \
47 | cd "$tempDir" \
48 | && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \
49 | && REVISION=${REVISION%~*} \
50 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
51 | && PKGOSSCHECKSUM="c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745 *${REVISION}.tar.gz" \
52 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
53 | echo "pkg-oss tarball checksum verification succeeded!"; \
54 | else \
55 | echo "pkg-oss tarball checksum verification failed!"; \
56 | exit 1; \
57 | fi \
58 | && tar xzvf ${REVISION}.tar.gz \
59 | && cd pkg-oss-${REVISION} \
60 | && cd debian \
61 | && for target in module-perl; do \
62 | make rules-$target; \
63 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
64 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \
65 | done \
66 | && make module-perl \
67 | ) \
68 | # we don't remove APT lists here because they get re-downloaded and removed later
69 | \
70 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
71 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
72 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
73 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
74 | \
75 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
76 | && ls -lAFh "$tempDir" \
77 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
78 | && grep '^Package: ' "$tempDir/Packages" \
79 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
80 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
81 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
82 | # ...
83 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
84 | && apt-get -o Acquire::GzipIndexes=false update \
85 | ;; \
86 | esac \
87 | \
88 | && apt-get install --no-install-recommends --no-install-suggests -y \
89 | $nginxPackages \
90 | gettext-base \
91 | curl \
92 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
93 | \
94 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
95 | && if [ -n "$tempDir" ]; then \
96 | apt-get purge -y --auto-remove \
97 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
98 | fi
99 |
--------------------------------------------------------------------------------
/mainline/debian/10-listen-on-ipv6-by-default.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | ME=$(basename "$0")
13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
14 |
15 | # check if we have ipv6 available
16 | if [ ! -f "/proc/net/if_inet6" ]; then
17 | entrypoint_log "$ME: info: ipv6 not available"
18 | exit 0
19 | fi
20 |
21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
23 | exit 0
24 | fi
25 |
26 | # check if the file can be modified, e.g. not on a r/o filesystem
27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
28 |
29 | # check if the file is already modified, e.g. on a container restart
30 | grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
31 |
32 | if [ -f "/etc/os-release" ]; then
33 | . /etc/os-release
34 | else
35 | entrypoint_log "$ME: info: can not guess the operating system"
36 | exit 0
37 | fi
38 |
39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
40 |
41 | case "$ID" in
42 | "debian")
43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
46 | exit 0
47 | }
48 | ;;
49 | "alpine")
50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
53 | exit 0
54 | }
55 | ;;
56 | *)
57 | entrypoint_log "$ME: info: Unsupported distribution"
58 | exit 0
59 | ;;
60 | esac
61 |
62 | # enable ipv6 on default.conf listen sockets
63 | sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
64 |
65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
66 |
67 | exit 0
68 |
--------------------------------------------------------------------------------
/mainline/debian/15-local-resolvers.envsh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
8 |
9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
10 |
11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
12 |
13 | NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
14 |
15 | export NGINX_LOCAL_RESOLVERS
16 |
--------------------------------------------------------------------------------
/mainline/debian/20-envsubst-on-templates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | ME=$(basename "$0")
6 |
7 | entrypoint_log() {
8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
9 | echo "$@"
10 | fi
11 | }
12 |
13 | add_stream_block() {
14 | local conffile="/etc/nginx/nginx.conf"
15 |
16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then
17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
18 | else
19 | # check if the file can be modified, e.g. not on a r/o filesystem
20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
22 | cat << END >> "$conffile"
23 | # added by "$ME" on "$(date)"
24 | stream {
25 | include $stream_output_dir/*.conf;
26 | }
27 | END
28 | fi
29 | }
30 |
31 | auto_envsubst() {
32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
37 | local filter="${NGINX_ENVSUBST_FILTER:-}"
38 |
39 | local template defined_envs relative_path output_path subdir
40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
41 | [ -d "$template_dir" ] || return 0
42 | if [ ! -w "$output_dir" ]; then
43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
44 | return 0
45 | fi
46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
47 | relative_path="${template#"$template_dir/"}"
48 | output_path="$output_dir/${relative_path%"$suffix"}"
49 | subdir=$(dirname "$relative_path")
50 | # create a subdirectory where the template file exists
51 | mkdir -p "$output_dir/$subdir"
52 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
53 | envsubst "$defined_envs" < "$template" > "$output_path"
54 | done
55 |
56 | # Print the first file with the stream suffix, this will be false if there are none
57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
58 | mkdir -p "$stream_output_dir"
59 | if [ ! -w "$stream_output_dir" ]; then
60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
61 | return 0
62 | fi
63 | add_stream_block
64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
65 | relative_path="${template#"$template_dir/"}"
66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
67 | subdir=$(dirname "$relative_path")
68 | # create a subdirectory where the template file exists
69 | mkdir -p "$stream_output_dir/$subdir"
70 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
71 | envsubst "$defined_envs" < "$template" > "$output_path"
72 | done
73 | fi
74 | }
75 |
76 | auto_envsubst
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/mainline/debian/30-tune-worker-processes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | ME=$(basename "$0")
8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
9 |
10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
11 |
12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
13 |
14 | ceildiv() {
15 | num=$1
16 | div=$2
17 | echo $(( (num + div - 1) / div ))
18 | }
19 |
20 | get_cpuset() {
21 | cpusetroot=$1
22 | cpusetfile=$2
23 | ncpu=0
24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1
25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
26 | case "$token" in
27 | *-*)
28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
29 | ncpu=$(( ncpu+count ))
30 | ;;
31 | *)
32 | ncpu=$(( ncpu+1 ))
33 | ;;
34 | esac
35 | done
36 | echo "$ncpu"
37 | }
38 |
39 | get_quota() {
40 | cpuroot=$1
41 | ncpu=0
42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
46 | [ "$cfs_quota" = "-1" ] && return 1
47 | [ "$cfs_period" = "0" ] && return 1
48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
49 | [ "$ncpu" -gt 0 ] || return 1
50 | echo "$ncpu"
51 | }
52 |
53 | get_quota_v2() {
54 | cpuroot=$1
55 | ncpu=0
56 | [ -f "$cpuroot/cpu.max" ] || return 1
57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
59 | [ "$cfs_quota" = "max" ] && return 1
60 | [ "$cfs_period" = "0" ] && return 1
61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
62 | [ "$ncpu" -gt 0 ] || return 1
63 | echo "$ncpu"
64 | }
65 |
66 | get_cgroup_v1_path() {
67 | needle=$1
68 | found=
69 | foundroot=
70 | mountpoint=
71 |
72 | [ -r "/proc/self/mountinfo" ] || return 1
73 | [ -r "/proc/self/cgroup" ] || return 1
74 |
75 | while IFS= read -r line; do
76 | case "$needle" in
77 | "cpuset")
78 | case "$line" in
79 | *cpuset*)
80 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
81 | break
82 | ;;
83 | esac
84 | ;;
85 | "cpu")
86 | case "$line" in
87 | *cpuset*)
88 | ;;
89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
90 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
91 | break
92 | ;;
93 | esac
94 | esac
95 | done << __EOF__
96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo )
97 | __EOF__
98 |
99 | while IFS= read -r line; do
100 | controller=$( echo "$line" | cut -d: -f 2 )
101 | case "$needle" in
102 | "cpuset")
103 | case "$controller" in
104 | cpuset)
105 | mountpoint=$( echo "$line" | cut -d: -f 3 )
106 | break
107 | ;;
108 | esac
109 | ;;
110 | "cpu")
111 | case "$controller" in
112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
113 | mountpoint=$( echo "$line" | cut -d: -f 3 )
114 | break
115 | ;;
116 | esac
117 | ;;
118 | esac
119 | done << __EOF__
120 | $( grep -F -- 'cpu' /proc/self/cgroup )
121 | __EOF__
122 |
123 | case "${found%% *}" in
124 | "/")
125 | foundroot="${found##* }$mountpoint"
126 | ;;
127 | "$mountpoint")
128 | foundroot="${found##* }"
129 | ;;
130 | esac
131 | echo "$foundroot"
132 | }
133 |
134 | get_cgroup_v2_path() {
135 | found=
136 | foundroot=
137 | mountpoint=
138 |
139 | [ -r "/proc/self/mountinfo" ] || return 1
140 | [ -r "/proc/self/cgroup" ] || return 1
141 |
142 | while IFS= read -r line; do
143 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
144 | done << __EOF__
145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
146 | __EOF__
147 |
148 | while IFS= read -r line; do
149 | mountpoint=$( echo "$line" | cut -d: -f 3 )
150 | done << __EOF__
151 | $( grep -F -- '0::' /proc/self/cgroup )
152 | __EOF__
153 |
154 | case "${found%% *}" in
155 | "")
156 | return 1
157 | ;;
158 | "/")
159 | foundroot="${found##* }$mountpoint"
160 | ;;
161 | "$mountpoint" | /../*)
162 | foundroot="${found##* }"
163 | ;;
164 | esac
165 | echo "$foundroot"
166 | }
167 |
168 | ncpu_online=$( getconf _NPROCESSORS_ONLN )
169 | ncpu_cpuset=
170 | ncpu_quota=
171 | ncpu_cpuset_v2=
172 | ncpu_quota_v2=
173 |
174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
178 |
179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
180 | "$ncpu_online" \
181 | "$ncpu_cpuset" \
182 | "$ncpu_quota" \
183 | "$ncpu_cpuset_v2" \
184 | "$ncpu_quota_v2" \
185 | | sort -n \
186 | | head -n 1 )
187 |
188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
189 |
--------------------------------------------------------------------------------
/mainline/debian/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
15 |
16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
18 | case "$f" in
19 | *.envsh)
20 | if [ -x "$f" ]; then
21 | entrypoint_log "$0: Sourcing $f";
22 | . "$f"
23 | else
24 | # warn on shell scripts without exec bit
25 | entrypoint_log "$0: Ignoring $f, not executable";
26 | fi
27 | ;;
28 | *.sh)
29 | if [ -x "$f" ]; then
30 | entrypoint_log "$0: Launching $f";
31 | "$f"
32 | else
33 | # warn on shell scripts without exec bit
34 | entrypoint_log "$0: Ignoring $f, not executable";
35 | fi
36 | ;;
37 | *) entrypoint_log "$0: Ignoring $f";;
38 | esac
39 | done
40 |
41 | entrypoint_log "$0: Configuration complete; ready for start up"
42 | else
43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
44 | fi
45 | fi
46 |
47 | exec "$@"
48 |
--------------------------------------------------------------------------------
/modules/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG NGINX_FROM_IMAGE=nginx:mainline
2 | FROM ${NGINX_FROM_IMAGE} AS builder
3 |
4 | ARG ENABLED_MODULES
5 |
6 | SHELL ["/bin/bash", "-exo", "pipefail", "-c"]
7 |
8 | RUN if [ "$ENABLED_MODULES" = "" ]; then \
9 | echo "No additional modules enabled, exiting"; \
10 | exit 1; \
11 | fi
12 |
13 | COPY ./ /modules/
14 |
15 | RUN apt-get update \
16 | && apt-get install -y --no-install-suggests --no-install-recommends \
17 | patch make wget git devscripts debhelper dpkg-dev \
18 | quilt lsb-release build-essential libxml2-utils xsltproc \
19 | equivs git g++ libparse-recdescent-perl \
20 | && XSLSCRIPT_SHA512="f7194c5198daeab9b3b0c3aebf006922c7df1d345d454bd8474489ff2eb6b4bf8e2ffe442489a45d1aab80da6ecebe0097759a1e12cc26b5f0613d05b7c09ffa *stdin" \
21 | && wget -O /tmp/xslscript.pl https://raw.githubusercontent.com/nginx/xslscript/9204424259c343ca08a18a78915f40f28025e093/xslscript.pl \
22 | && if [ "$(cat /tmp/xslscript.pl | openssl sha512 -r)" = "$XSLSCRIPT_SHA512" ]; then \
23 | echo "XSLScript checksum verification succeeded!"; \
24 | chmod +x /tmp/xslscript.pl; \
25 | mv /tmp/xslscript.pl /usr/local/bin/; \
26 | else \
27 | echo "XSLScript checksum verification failed!"; \
28 | exit 1; \
29 | fi \
30 | && git clone -b ${NGINX_VERSION}-${PKG_RELEASE%%~*} https://github.com/nginx/pkg-oss/ \
31 | && cd pkg-oss \
32 | && mkdir /tmp/packages \
33 | && for module in $ENABLED_MODULES; do \
34 | echo "Building $module for nginx-$NGINX_VERSION"; \
35 | if [ -d /modules/$module ]; then \
36 | echo "Building $module from user-supplied sources"; \
37 | # check if module sources file is there and not empty
38 | if [ ! -s /modules/$module/source ]; then \
39 | echo "No source file for $module in modules/$module/source, exiting"; \
40 | exit 1; \
41 | fi; \
42 | # some modules require build dependencies
43 | if [ -f /modules/$module/build-deps ]; then \
44 | echo "Installing $module build dependencies"; \
45 | apt-get update && apt-get install -y --no-install-suggests --no-install-recommends $(cat /modules/$module/build-deps | xargs); \
46 | fi; \
47 | # if a module has a build dependency that is not in a distro, provide a
48 | # shell script to fetch/build/install those
49 | # note that shared libraries produced as a result of this script will
50 | # not be copied from the builder image to the main one so build static
51 | if [ -x /modules/$module/prebuild ]; then \
52 | echo "Running prebuild script for $module"; \
53 | /modules/$module/prebuild; \
54 | fi; \
55 | /pkg-oss/build_module.sh -v $NGINX_VERSION -f -y -o /tmp/packages -n $module $(cat /modules/$module/source); \
56 | BUILT_MODULES="$BUILT_MODULES $(echo $module | tr '[A-Z]' '[a-z]' | tr -d '[/_\-\.\t ]')"; \
57 | elif make -C /pkg-oss/debian list | grep -P "^$module\s+\d" > /dev/null; then \
58 | echo "Building $module from pkg-oss sources"; \
59 | cd /pkg-oss/debian; \
60 | make rules-module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
61 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" debuild-module-$module/nginx-$NGINX_VERSION/debian/control; \
62 | make module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
63 | find ../../ -maxdepth 1 -mindepth 1 -type f -name "*.deb" -exec mv -v {} /tmp/packages/ \;; \
64 | BUILT_MODULES="$BUILT_MODULES $module"; \
65 | else \
66 | echo "Don't know how to build $module module, exiting"; \
67 | exit 1; \
68 | fi; \
69 | done \
70 | && echo "BUILT_MODULES=\"$BUILT_MODULES\"" > /tmp/packages/modules.env
71 |
72 | FROM ${NGINX_FROM_IMAGE}
73 | RUN --mount=type=bind,target=/tmp/packages/,source=/tmp/packages/,from=builder \
74 | apt-get update \
75 | && . /tmp/packages/modules.env \
76 | && for module in $BUILT_MODULES; do \
77 | apt-get install --no-install-suggests --no-install-recommends -y /tmp/packages/nginx-module-${module}_${NGINX_VERSION}*.deb; \
78 | done \
79 | && rm -rf /var/lib/apt/lists/
80 |
--------------------------------------------------------------------------------
/modules/Dockerfile.alpine:
--------------------------------------------------------------------------------
1 | ARG NGINX_FROM_IMAGE=nginx:mainline-alpine
2 | FROM ${NGINX_FROM_IMAGE} AS builder
3 |
4 | ARG ENABLED_MODULES
5 |
6 | SHELL ["/bin/ash", "-exo", "pipefail", "-c"]
7 |
8 | RUN if [ "$ENABLED_MODULES" = "" ]; then \
9 | echo "No additional modules enabled, exiting"; \
10 | exit 1; \
11 | fi
12 |
13 | COPY ./ /modules/
14 |
15 | RUN apk update \
16 | && apk add linux-headers openssl-dev pcre2-dev zlib-dev openssl abuild \
17 | musl-dev libxslt libxml2-utils make gcc unzip git \
18 | xz g++ coreutils curl \
19 | # allow abuild as a root user \
20 | && printf "#!/bin/sh\\nSETFATTR=true /usr/bin/abuild -F \"\$@\"\\n" > /usr/local/bin/abuild \
21 | && chmod +x /usr/local/bin/abuild \
22 | && git clone -b ${NGINX_VERSION}-${PKG_RELEASE} https://github.com/nginx/pkg-oss/ \
23 | && cd pkg-oss \
24 | && mkdir /tmp/packages \
25 | && for module in $ENABLED_MODULES; do \
26 | echo "Building $module for nginx-$NGINX_VERSION"; \
27 | if [ -d /modules/$module ]; then \
28 | echo "Building $module from user-supplied sources"; \
29 | # check if module sources file is there and not empty
30 | if [ ! -s /modules/$module/source ]; then \
31 | echo "No source file for $module in modules/$module/source, exiting"; \
32 | exit 1; \
33 | fi; \
34 | # some modules require build dependencies
35 | if [ -f /modules/$module/build-deps ]; then \
36 | echo "Installing $module build dependencies"; \
37 | apk update && apk add $(cat /modules/$module/build-deps | xargs); \
38 | fi; \
39 | # if a module has a build dependency that is not in a distro, provide a
40 | # shell script to fetch/build/install those
41 | # note that shared libraries produced as a result of this script will
42 | # not be copied from the builder image to the main one so build static
43 | if [ -x /modules/$module/prebuild ]; then \
44 | echo "Running prebuild script for $module"; \
45 | /modules/$module/prebuild; \
46 | fi; \
47 | /pkg-oss/build_module.sh -v $NGINX_VERSION -f -y -o /tmp/packages -n $module $(cat /modules/$module/source); \
48 | BUILT_MODULES="$BUILT_MODULES $(echo $module | tr '[A-Z]' '[a-z]' | tr -d '[/_\-\.\t ]')"; \
49 | elif make -C /pkg-oss/alpine list | grep -E "^$module\s+\d+" > /dev/null; then \
50 | echo "Building $module from pkg-oss sources"; \
51 | cd /pkg-oss/alpine; \
52 | make abuild-module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
53 | apk add $(. ./abuild-module-$module/APKBUILD; echo $makedepends;); \
54 | make module-$module BASE_VERSION=$NGINX_VERSION NGINX_VERSION=$NGINX_VERSION; \
55 | find ~/packages -type f -name "*.apk" -exec mv -v {} /tmp/packages/ \;; \
56 | BUILT_MODULES="$BUILT_MODULES $module"; \
57 | else \
58 | echo "Don't know how to build $module module, exiting"; \
59 | exit 1; \
60 | fi; \
61 | done \
62 | && echo "BUILT_MODULES=\"$BUILT_MODULES\"" > /tmp/packages/modules.env
63 |
64 | FROM ${NGINX_FROM_IMAGE}
65 | RUN --mount=type=bind,target=/tmp/packages/,source=/tmp/packages/,from=builder \
66 | . /tmp/packages/modules.env \
67 | && for module in $BUILT_MODULES; do \
68 | apk add --no-cache --allow-untrusted /tmp/packages/nginx-module-${module}-${NGINX_VERSION}*.apk; \
69 | done
70 |
--------------------------------------------------------------------------------
/modules/README.md:
--------------------------------------------------------------------------------
1 | # Adding third-party modules to nginx official image
2 |
3 | It's possible to extend a mainline image with third-party modules either from
4 | your own instructions following a simple filesystem layout/syntax using
5 | `build_module.sh` helper script, or falling back to package sources from
6 | [pkg-oss](https://github.com/nginx/pkg-oss).
7 |
8 | ## Requirements
9 |
10 | To use the Dockerfiles provided here,
11 | [Docker BuildKit](https://docs.docker.com/build/buildkit/) is required.
12 | This is enabled by default as of version 23.0; for earlier versions this can be
13 | enabled by setting the environment variable `DOCKER_BUILDKIT` to `1`.
14 |
15 | If you can not or do not want to use BuildKit, you can use a previous version
16 | of these files, see for example
17 | https://github.com/nginx/docker-nginx/tree/4bf0763f4977fff7e9648add59e0540088f3ca9f/modules.
18 |
19 | ## Usage
20 |
21 | ```
22 | $ docker build --build-arg ENABLED_MODULES="ndk lua" -t my-nginx-with-lua .
23 | ```
24 | This command will attempt to build an image called `my-nginx-with-lua` based on
25 | official nginx docker hub image with two modules: `ndk` and `lua`.
26 | By default, a Debian-based image will be used. If you wish to use Alpine
27 | instead, add `-f Dockerfile.alpine` to the command line. By default, mainline
28 | images are used as a base, but it's possible to specify a different image by
29 | providing `NGINX_FROM_IMAGE` build argument, e.g. `--build-arg
30 | NGINX_FROM_IMAGE=nginx:stable`.
31 |
32 | The build script will look for module build definition files on filesystem
33 | directory under the same name as the module (and resulting package) and if
34 | those are not found will try to look up requested modules in the pkg-oss
35 | repository.
36 |
37 | For well-known modules we maintain a set of build sources packages over at
38 | `pkg-oss`, so it's probably a good idea to rely on those instead of providing
39 | your own implementation.
40 |
41 | As of the time of writing this README, the following modules and their versions
42 | are available from `pkg-oss` repository:
43 |
44 | ```
45 | /pkg-oss $ LC_ALL=C make -C debian list-all-modules
46 | auth-spnego 1.1.2-1
47 | brotli 1.0.0-1
48 | encrypted-session 0.09-1
49 | fips-check 0.1-1
50 | geoip 1.27.4-1
51 | geoip2 3.4-1
52 | headers-more 0.37-1
53 | image-filter 1.27.4-1
54 | lua 0.10.28-1
55 | ndk 0.3.3-1
56 | njs 0.8.9-1
57 | otel 0.1.1-1
58 | passenger 6.0.26-1
59 | perl 1.27.4-1
60 | rtmp 1.2.2-1
61 | set-misc 0.33-1
62 | subs-filter 0.6.4-1
63 | xslt 1.27.4-1
64 | ```
65 |
66 | If you still want to provide your own instructions for a specific module,
67 | organize the build directory in a following way, e.g. for `echo` module:
68 |
69 | ```
70 | docker-nginx/modules $ tree echo
71 | echo
72 | ├── build-deps
73 | ├── prebuild
74 | └── source
75 |
76 | 0 directories, 3 files
77 | ```
78 |
79 | The scripts expect one file to always exist for a module you wish to build
80 | manually: `source`. It should contain a link to a zip/tarball source code of a
81 | module you want to build. In `build-deps` you can specify build dependencies
82 | for a module as found in Debian or Alpine repositories. `prebuild` is a shell
83 | script (make it `chmod +x prebuild`!) that will be executed prior to building
84 | the module but after installing the dependencies, so it can be used to install
85 | additional build dependencies if they are not available from Debian or Alpine.
86 | Keep in mind that those dependencies wont be automatically copied to the
87 | resulting image and if you're building a library, build it statically.
88 |
89 | Once the build is done in the builder image, the built packages are copied over
90 | to resulting image and installed via apt/apk. The resulting image will be
91 | tagged and can be used the same way as an official docker hub image.
92 |
93 | Note that we can not provide any support for those modifications and in no way
94 | guarantee they will work as nice as a build without third-party modules. If
95 | you encounter any issues running your image with the modules enabled, please
96 | reproduce with a vanilla image first.
97 |
98 | ## Examples
99 |
100 | ### docker-compose with pre-packaged modules
101 |
102 | If desired modules are already packaged in
103 | [pkg-oss](https://github.com/nginx/pkg-oss/) - e.g. `debian/Makefile.module-*`
104 | exists for a given module, you can use this example.
105 |
106 | 1. Create a directory for your project:
107 |
108 | ```
109 | mkdir myapp
110 | cd myapp
111 | ````
112 |
113 | 2. Populate the build context for a custom nginx image:
114 |
115 | ```
116 | mkdir my-nginx
117 | curl -o my-nginx/Dockerfile https://raw.githubusercontent.com/nginx/docker-nginx/master/modules/Dockerfile
118 | ```
119 |
120 | 3. Create a `docker-compose.yml` file:
121 |
122 | ```
123 | cat > docker-compose.yml << __EOF__
124 | version: "3.3"
125 | services:
126 | web:
127 | build:
128 | context: ./my-nginx/
129 | args:
130 | ENABLED_MODULES: ndk lua
131 | image: my-nginx-with-lua:v1
132 | ports:
133 | - "80:8000"
134 | __EOF__
135 | ```
136 |
137 | Now, running `docker-compose up --build -d` will build the image and run the application for you.
138 |
139 | ### docker-compose with a non-packaged module
140 |
141 | If a needed module is not available via `pkg-oss`, you can use this example.
142 |
143 | We're going to build the image with [ngx_cache_purge](https://github.com/FRiCKLE/ngx_cache_purge) module.
144 |
145 | The steps are similar to a previous example, with a notable difference of
146 | providing a URL to fetch the module source code from.
147 |
148 | 1. Create a directory for your project:
149 |
150 | ```
151 | mkdir myapp-cache
152 | cd myapp-cache
153 | ````
154 |
155 | 2. Populate the build context for a custom nginx image:
156 |
157 | ```
158 | mkdir my-nginx
159 | curl -o my-nginx/Dockerfile https://raw.githubusercontent.com/nginx/docker-nginx/master/modules/Dockerfile
160 | mkdir my-nginx/cachepurge
161 | echo "https://github.com/FRiCKLE/ngx_cache_purge/archive/2.3.tar.gz" > my-nginx/cachepurge/source
162 | ```
163 |
164 | 3. Create a `docker-compose.yml` file:
165 |
166 | ```
167 | cat > docker-compose.yml << __EOF__
168 | version: "3.3"
169 | services:
170 | web:
171 | build:
172 | context: ./my-nginx/
173 | args:
174 | ENABLED_MODULES: cachepurge
175 | image: my-nginx-with-cachepurge:v1
176 | ports:
177 | - "80:8080"
178 | __EOF__
179 | ```
180 |
181 | Now, running `docker-compose up --build -d` will build the image and run the application for you.
182 |
--------------------------------------------------------------------------------
/modules/echo/build-deps:
--------------------------------------------------------------------------------
1 | make gcc
2 |
--------------------------------------------------------------------------------
/modules/echo/prebuild:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # if a module has a build dependency that is not in debian/alpine
4 | # use this script to fetch/build/install them
5 | #
6 | # note that shared libraries produced as a result of this script will
7 | # not be copied from the builder image to the resulting one, so you need to
8 | # build them statically
9 |
10 | echo "No prebuild stage required - all dependencies are satisfied already!"
11 |
12 | exit 0
13 |
--------------------------------------------------------------------------------
/modules/echo/source:
--------------------------------------------------------------------------------
1 | https://github.com/openresty/echo-nginx-module/archive/v0.63.tar.gz
2 |
--------------------------------------------------------------------------------
/stable/alpine-otel/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.28.0-alpine
7 |
8 | ENV OTEL_VERSION 0.1.2
9 |
10 | RUN set -x \
11 | && apkArch="$(cat /etc/apk/arch)" \
12 | && nginxPackages=" \
13 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
14 | nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
15 | nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
16 | nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
17 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
18 | nginx-module-otel=${NGINX_VERSION}.${OTEL_VERSION}-r${PKG_RELEASE} \
19 | " \
20 | # install prerequisites for public key and pkg-oss checks
21 | && apk add --no-cache --virtual .checksum-deps \
22 | openssl \
23 | && case "$apkArch" in \
24 | x86_64|aarch64) \
25 | # arches officially built by upstream
26 | apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
27 | ;; \
28 | *) \
29 | # we're on an architecture upstream doesn't officially build for
30 | # let's build binaries from the published packaging sources
31 | set -x \
32 | && tempDir="$(mktemp -d)" \
33 | && chown nobody:nobody $tempDir \
34 | && apk add --no-cache --virtual .build-deps \
35 | gcc \
36 | libc-dev \
37 | make \
38 | openssl-dev \
39 | pcre2-dev \
40 | zlib-dev \
41 | linux-headers \
42 | cmake \
43 | bash \
44 | alpine-sdk \
45 | findutils \
46 | curl \
47 | xz \
48 | protobuf-dev \
49 | grpc-dev \
50 | && su nobody -s /bin/sh -c " \
51 | export HOME=${tempDir} \
52 | && cd ${tempDir} \
53 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
54 | && PKGOSSCHECKSUM=\"517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
55 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
56 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
57 | else \
58 | echo \"pkg-oss tarball checksum verification failed!\"; \
59 | exit 1; \
60 | fi \
61 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
62 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
63 | && cd alpine \
64 | && make module-otel \
65 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
66 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
67 | " \
68 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
69 | && apk del --no-network .build-deps \
70 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
71 | ;; \
72 | esac \
73 | # remove checksum deps
74 | && apk del --no-network .checksum-deps \
75 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
76 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
77 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
78 |
--------------------------------------------------------------------------------
/stable/alpine-perl/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.28.0-alpine
7 |
8 | RUN set -x \
9 | && apkArch="$(cat /etc/apk/arch)" \
10 | && nginxPackages=" \
11 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
12 | nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
13 | nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
14 | nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
15 | nginx-module-perl=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
16 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
17 | " \
18 | # install prerequisites for public key and pkg-oss checks
19 | && apk add --no-cache --virtual .checksum-deps \
20 | openssl \
21 | && case "$apkArch" in \
22 | x86_64|aarch64) \
23 | # arches officially built by upstream
24 | apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
25 | ;; \
26 | *) \
27 | # we're on an architecture upstream doesn't officially build for
28 | # let's build binaries from the published packaging sources
29 | set -x \
30 | && tempDir="$(mktemp -d)" \
31 | && chown nobody:nobody $tempDir \
32 | && apk add --no-cache --virtual .build-deps \
33 | gcc \
34 | libc-dev \
35 | make \
36 | openssl-dev \
37 | pcre2-dev \
38 | zlib-dev \
39 | linux-headers \
40 | perl-dev \
41 | bash \
42 | alpine-sdk \
43 | findutils \
44 | curl \
45 | && su nobody -s /bin/sh -c " \
46 | export HOME=${tempDir} \
47 | && cd ${tempDir} \
48 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
49 | && PKGOSSCHECKSUM=\"517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
50 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
51 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
52 | else \
53 | echo \"pkg-oss tarball checksum verification failed!\"; \
54 | exit 1; \
55 | fi \
56 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
57 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
58 | && cd alpine \
59 | && make module-perl \
60 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
61 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
62 | " \
63 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
64 | && apk del --no-network .build-deps \
65 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
66 | ;; \
67 | esac \
68 | # remove checksum deps
69 | && apk del --no-network .checksum-deps \
70 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
71 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
72 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi
73 |
--------------------------------------------------------------------------------
/stable/alpine-slim/10-listen-on-ipv6-by-default.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | ME=$(basename "$0")
13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
14 |
15 | # check if we have ipv6 available
16 | if [ ! -f "/proc/net/if_inet6" ]; then
17 | entrypoint_log "$ME: info: ipv6 not available"
18 | exit 0
19 | fi
20 |
21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
23 | exit 0
24 | fi
25 |
26 | # check if the file can be modified, e.g. not on a r/o filesystem
27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
28 |
29 | # check if the file is already modified, e.g. on a container restart
30 | grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
31 |
32 | if [ -f "/etc/os-release" ]; then
33 | . /etc/os-release
34 | else
35 | entrypoint_log "$ME: info: can not guess the operating system"
36 | exit 0
37 | fi
38 |
39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
40 |
41 | case "$ID" in
42 | "debian")
43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
46 | exit 0
47 | }
48 | ;;
49 | "alpine")
50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
53 | exit 0
54 | }
55 | ;;
56 | *)
57 | entrypoint_log "$ME: info: Unsupported distribution"
58 | exit 0
59 | ;;
60 | esac
61 |
62 | # enable ipv6 on default.conf listen sockets
63 | sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
64 |
65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
66 |
67 | exit 0
68 |
--------------------------------------------------------------------------------
/stable/alpine-slim/15-local-resolvers.envsh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
8 |
9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
10 |
11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
12 |
13 | NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
14 |
15 | export NGINX_LOCAL_RESOLVERS
16 |
--------------------------------------------------------------------------------
/stable/alpine-slim/20-envsubst-on-templates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | ME=$(basename "$0")
6 |
7 | entrypoint_log() {
8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
9 | echo "$@"
10 | fi
11 | }
12 |
13 | add_stream_block() {
14 | local conffile="/etc/nginx/nginx.conf"
15 |
16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then
17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
18 | else
19 | # check if the file can be modified, e.g. not on a r/o filesystem
20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
22 | cat << END >> "$conffile"
23 | # added by "$ME" on "$(date)"
24 | stream {
25 | include $stream_output_dir/*.conf;
26 | }
27 | END
28 | fi
29 | }
30 |
31 | auto_envsubst() {
32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
37 | local filter="${NGINX_ENVSUBST_FILTER:-}"
38 |
39 | local template defined_envs relative_path output_path subdir
40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
41 | [ -d "$template_dir" ] || return 0
42 | if [ ! -w "$output_dir" ]; then
43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
44 | return 0
45 | fi
46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
47 | relative_path="${template#"$template_dir/"}"
48 | output_path="$output_dir/${relative_path%"$suffix"}"
49 | subdir=$(dirname "$relative_path")
50 | # create a subdirectory where the template file exists
51 | mkdir -p "$output_dir/$subdir"
52 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
53 | envsubst "$defined_envs" < "$template" > "$output_path"
54 | done
55 |
56 | # Print the first file with the stream suffix, this will be false if there are none
57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
58 | mkdir -p "$stream_output_dir"
59 | if [ ! -w "$stream_output_dir" ]; then
60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
61 | return 0
62 | fi
63 | add_stream_block
64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
65 | relative_path="${template#"$template_dir/"}"
66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
67 | subdir=$(dirname "$relative_path")
68 | # create a subdirectory where the template file exists
69 | mkdir -p "$stream_output_dir/$subdir"
70 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
71 | envsubst "$defined_envs" < "$template" > "$output_path"
72 | done
73 | fi
74 | }
75 |
76 | auto_envsubst
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/stable/alpine-slim/30-tune-worker-processes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | ME=$(basename "$0")
8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
9 |
10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
11 |
12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
13 |
14 | ceildiv() {
15 | num=$1
16 | div=$2
17 | echo $(( (num + div - 1) / div ))
18 | }
19 |
20 | get_cpuset() {
21 | cpusetroot=$1
22 | cpusetfile=$2
23 | ncpu=0
24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1
25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
26 | case "$token" in
27 | *-*)
28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
29 | ncpu=$(( ncpu+count ))
30 | ;;
31 | *)
32 | ncpu=$(( ncpu+1 ))
33 | ;;
34 | esac
35 | done
36 | echo "$ncpu"
37 | }
38 |
39 | get_quota() {
40 | cpuroot=$1
41 | ncpu=0
42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
46 | [ "$cfs_quota" = "-1" ] && return 1
47 | [ "$cfs_period" = "0" ] && return 1
48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
49 | [ "$ncpu" -gt 0 ] || return 1
50 | echo "$ncpu"
51 | }
52 |
53 | get_quota_v2() {
54 | cpuroot=$1
55 | ncpu=0
56 | [ -f "$cpuroot/cpu.max" ] || return 1
57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
59 | [ "$cfs_quota" = "max" ] && return 1
60 | [ "$cfs_period" = "0" ] && return 1
61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
62 | [ "$ncpu" -gt 0 ] || return 1
63 | echo "$ncpu"
64 | }
65 |
66 | get_cgroup_v1_path() {
67 | needle=$1
68 | found=
69 | foundroot=
70 | mountpoint=
71 |
72 | [ -r "/proc/self/mountinfo" ] || return 1
73 | [ -r "/proc/self/cgroup" ] || return 1
74 |
75 | while IFS= read -r line; do
76 | case "$needle" in
77 | "cpuset")
78 | case "$line" in
79 | *cpuset*)
80 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
81 | break
82 | ;;
83 | esac
84 | ;;
85 | "cpu")
86 | case "$line" in
87 | *cpuset*)
88 | ;;
89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
90 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
91 | break
92 | ;;
93 | esac
94 | esac
95 | done << __EOF__
96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo )
97 | __EOF__
98 |
99 | while IFS= read -r line; do
100 | controller=$( echo "$line" | cut -d: -f 2 )
101 | case "$needle" in
102 | "cpuset")
103 | case "$controller" in
104 | cpuset)
105 | mountpoint=$( echo "$line" | cut -d: -f 3 )
106 | break
107 | ;;
108 | esac
109 | ;;
110 | "cpu")
111 | case "$controller" in
112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
113 | mountpoint=$( echo "$line" | cut -d: -f 3 )
114 | break
115 | ;;
116 | esac
117 | ;;
118 | esac
119 | done << __EOF__
120 | $( grep -F -- 'cpu' /proc/self/cgroup )
121 | __EOF__
122 |
123 | case "${found%% *}" in
124 | "/")
125 | foundroot="${found##* }$mountpoint"
126 | ;;
127 | "$mountpoint")
128 | foundroot="${found##* }"
129 | ;;
130 | esac
131 | echo "$foundroot"
132 | }
133 |
134 | get_cgroup_v2_path() {
135 | found=
136 | foundroot=
137 | mountpoint=
138 |
139 | [ -r "/proc/self/mountinfo" ] || return 1
140 | [ -r "/proc/self/cgroup" ] || return 1
141 |
142 | while IFS= read -r line; do
143 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
144 | done << __EOF__
145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
146 | __EOF__
147 |
148 | while IFS= read -r line; do
149 | mountpoint=$( echo "$line" | cut -d: -f 3 )
150 | done << __EOF__
151 | $( grep -F -- '0::' /proc/self/cgroup )
152 | __EOF__
153 |
154 | case "${found%% *}" in
155 | "")
156 | return 1
157 | ;;
158 | "/")
159 | foundroot="${found##* }$mountpoint"
160 | ;;
161 | "$mountpoint" | /../*)
162 | foundroot="${found##* }"
163 | ;;
164 | esac
165 | echo "$foundroot"
166 | }
167 |
168 | ncpu_online=$( getconf _NPROCESSORS_ONLN )
169 | ncpu_cpuset=
170 | ncpu_quota=
171 | ncpu_cpuset_v2=
172 | ncpu_quota_v2=
173 |
174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
178 |
179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
180 | "$ncpu_online" \
181 | "$ncpu_cpuset" \
182 | "$ncpu_quota" \
183 | "$ncpu_cpuset_v2" \
184 | "$ncpu_quota_v2" \
185 | | sort -n \
186 | | head -n 1 )
187 |
188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
189 |
--------------------------------------------------------------------------------
/stable/alpine-slim/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM alpine:3.21
7 |
8 | LABEL maintainer="NGINX Docker Maintainers "
9 |
10 | ENV NGINX_VERSION 1.28.0
11 | ENV PKG_RELEASE 1
12 | ENV DYNPKG_RELEASE 1
13 |
14 | RUN set -x \
15 | # create nginx user/group first, to be consistent throughout docker variants
16 | && addgroup -g 101 -S nginx \
17 | && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \
18 | && apkArch="$(cat /etc/apk/arch)" \
19 | && nginxPackages=" \
20 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
21 | " \
22 | # install prerequisites for public key and pkg-oss checks
23 | && apk add --no-cache --virtual .checksum-deps \
24 | openssl \
25 | && case "$apkArch" in \
26 | x86_64|aarch64) \
27 | # arches officially built by upstream
28 | set -x \
29 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \
30 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \
31 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \
32 | echo "key verification succeeded!"; \
33 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \
34 | else \
35 | echo "key verification failed!"; \
36 | exit 1; \
37 | fi \
38 | && apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
39 | ;; \
40 | *) \
41 | # we're on an architecture upstream doesn't officially build for
42 | # let's build binaries from the published packaging sources
43 | set -x \
44 | && tempDir="$(mktemp -d)" \
45 | && chown nobody:nobody $tempDir \
46 | && apk add --no-cache --virtual .build-deps \
47 | gcc \
48 | libc-dev \
49 | make \
50 | openssl-dev \
51 | pcre2-dev \
52 | zlib-dev \
53 | linux-headers \
54 | bash \
55 | alpine-sdk \
56 | findutils \
57 | curl \
58 | && su nobody -s /bin/sh -c " \
59 | export HOME=${tempDir} \
60 | && cd ${tempDir} \
61 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
62 | && PKGOSSCHECKSUM=\"517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
63 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
64 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
65 | else \
66 | echo \"pkg-oss tarball checksum verification failed!\"; \
67 | exit 1; \
68 | fi \
69 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
70 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
71 | && cd alpine \
72 | && make base \
73 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
74 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
75 | " \
76 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
77 | && apk del --no-network .build-deps \
78 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
79 | ;; \
80 | esac \
81 | # remove checksum deps
82 | && apk del --no-network .checksum-deps \
83 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
84 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
85 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
86 | # Add `envsubst` for templating environment variables
87 | && apk add --no-cache gettext-envsubst \
88 | # Bring in tzdata so users could set the timezones through the environment
89 | # variables
90 | && apk add --no-cache tzdata \
91 | # forward request and error logs to docker log collector
92 | && ln -sf /dev/stdout /var/log/nginx/access.log \
93 | && ln -sf /dev/stderr /var/log/nginx/error.log \
94 | # create a docker-entrypoint.d directory
95 | && mkdir /docker-entrypoint.d
96 |
97 | COPY docker-entrypoint.sh /
98 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d
99 | COPY 15-local-resolvers.envsh /docker-entrypoint.d
100 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d
101 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d
102 | ENTRYPOINT ["/docker-entrypoint.sh"]
103 |
104 | EXPOSE 80
105 |
106 | STOPSIGNAL SIGQUIT
107 |
108 | CMD ["nginx", "-g", "daemon off;"]
109 |
--------------------------------------------------------------------------------
/stable/alpine-slim/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
15 |
16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
18 | case "$f" in
19 | *.envsh)
20 | if [ -x "$f" ]; then
21 | entrypoint_log "$0: Sourcing $f";
22 | . "$f"
23 | else
24 | # warn on shell scripts without exec bit
25 | entrypoint_log "$0: Ignoring $f, not executable";
26 | fi
27 | ;;
28 | *.sh)
29 | if [ -x "$f" ]; then
30 | entrypoint_log "$0: Launching $f";
31 | "$f"
32 | else
33 | # warn on shell scripts without exec bit
34 | entrypoint_log "$0: Ignoring $f, not executable";
35 | fi
36 | ;;
37 | *) entrypoint_log "$0: Ignoring $f";;
38 | esac
39 | done
40 |
41 | entrypoint_log "$0: Configuration complete; ready for start up"
42 | else
43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
44 | fi
45 | fi
46 |
47 | exec "$@"
48 |
--------------------------------------------------------------------------------
/stable/alpine/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.28.0-alpine-slim
7 |
8 | ENV NJS_VERSION 0.8.10
9 | ENV NJS_RELEASE 1
10 |
11 | RUN set -x \
12 | && apkArch="$(cat /etc/apk/arch)" \
13 | && nginxPackages=" \
14 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \
15 | nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
16 | nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
17 | nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \
18 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \
19 | " \
20 | # install prerequisites for public key and pkg-oss checks
21 | && apk add --no-cache --virtual .checksum-deps \
22 | openssl \
23 | && case "$apkArch" in \
24 | x86_64|aarch64) \
25 | # arches officially built by upstream
26 | apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \
27 | ;; \
28 | *) \
29 | # we're on an architecture upstream doesn't officially build for
30 | # let's build binaries from the published packaging sources
31 | set -x \
32 | && tempDir="$(mktemp -d)" \
33 | && chown nobody:nobody $tempDir \
34 | && apk add --no-cache --virtual .build-deps \
35 | gcc \
36 | libc-dev \
37 | make \
38 | openssl-dev \
39 | pcre2-dev \
40 | zlib-dev \
41 | linux-headers \
42 | libxslt-dev \
43 | gd-dev \
44 | geoip-dev \
45 | libedit-dev \
46 | bash \
47 | alpine-sdk \
48 | findutils \
49 | curl \
50 | && su nobody -s /bin/sh -c " \
51 | export HOME=${tempDir} \
52 | && cd ${tempDir} \
53 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
54 | && PKGOSSCHECKSUM=\"517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \
55 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \
56 | echo \"pkg-oss tarball checksum verification succeeded!\"; \
57 | else \
58 | echo \"pkg-oss tarball checksum verification failed!\"; \
59 | exit 1; \
60 | fi \
61 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \
62 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \
63 | && cd alpine \
64 | && make module-geoip module-image-filter module-njs module-xslt \
65 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \
66 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \
67 | " \
68 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \
69 | && apk del --no-network .build-deps \
70 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \
71 | ;; \
72 | esac \
73 | # remove checksum deps
74 | && apk del --no-network .checksum-deps \
75 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
76 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \
77 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \
78 | # Bring in curl and ca-certificates to make registering on DNS SD easier
79 | && apk add --no-cache curl ca-certificates
80 |
--------------------------------------------------------------------------------
/stable/debian-otel/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.28.0
7 |
8 | ENV OTEL_VERSION 0.1.2
9 |
10 | RUN set -x; \
11 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
12 | dpkgArch="$(dpkg --print-architecture)" \
13 | && nginxPackages=" \
14 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \
15 | nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
16 | nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
17 | nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
18 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
19 | nginx-module-otel=${NGINX_VERSION}+${OTEL_VERSION}-${PKG_RELEASE} \
20 | " \
21 | && case "$dpkgArch" in \
22 | amd64|arm64) \
23 | # arches officialy built by upstream
24 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
25 | && apt-get update \
26 | ;; \
27 | *) \
28 | # we're on an architecture upstream doesn't officially build for
29 | # let's build binaries from the published packaging sources
30 | # new directory for storing sources and .deb files
31 | tempDir="$(mktemp -d)" \
32 | && chmod 777 "$tempDir" \
33 | # (777 to ensure APT's "_apt" user can access it too)
34 | \
35 | # save list of currently-installed packages so build dependencies can be cleanly removed later
36 | && savedAptMark="$(apt-mark showmanual)" \
37 | \
38 | # build .deb files from upstream's packaging sources
39 | && apt-get update \
40 | && apt-get install --no-install-recommends --no-install-suggests -y \
41 | curl \
42 | devscripts \
43 | equivs \
44 | git \
45 | libxml2-utils \
46 | lsb-release \
47 | xsltproc \
48 | && ( \
49 | cd "$tempDir" \
50 | && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \
51 | && REVISION=${REVISION%~*} \
52 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
53 | && PKGOSSCHECKSUM="517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${REVISION}.tar.gz" \
54 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
55 | echo "pkg-oss tarball checksum verification succeeded!"; \
56 | else \
57 | echo "pkg-oss tarball checksum verification failed!"; \
58 | exit 1; \
59 | fi \
60 | && tar xzvf ${REVISION}.tar.gz \
61 | && cd pkg-oss-${REVISION} \
62 | && cd debian \
63 | && for target in module-otel; do \
64 | make rules-$target; \
65 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
66 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \
67 | done \
68 | && make module-otel \
69 | ) \
70 | # we don't remove APT lists here because they get re-downloaded and removed later
71 | \
72 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
73 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
74 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
75 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
76 | \
77 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
78 | && ls -lAFh "$tempDir" \
79 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
80 | && grep '^Package: ' "$tempDir/Packages" \
81 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
82 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
83 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
84 | # ...
85 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
86 | && apt-get -o Acquire::GzipIndexes=false update \
87 | ;; \
88 | esac \
89 | \
90 | && apt-get install --no-install-recommends --no-install-suggests -y \
91 | $nginxPackages \
92 | gettext-base \
93 | curl \
94 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
95 | \
96 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
97 | && if [ -n "$tempDir" ]; then \
98 | apt-get purge -y --auto-remove \
99 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
100 | fi
101 |
--------------------------------------------------------------------------------
/stable/debian-perl/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh"
3 | #
4 | # PLEASE DO NOT EDIT IT DIRECTLY.
5 | #
6 | FROM nginx:1.28.0
7 |
8 | RUN set -x; \
9 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \
10 | dpkgArch="$(dpkg --print-architecture)" \
11 | && nginxPackages=" \
12 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \
13 | nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \
14 | nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \
15 | nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \
16 | nginx-module-perl=${NGINX_VERSION}-${DYNPKG_RELEASE} \
17 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \
18 | " \
19 | && case "$dpkgArch" in \
20 | amd64|arm64) \
21 | # arches officialy built by upstream
22 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \
23 | && apt-get update \
24 | ;; \
25 | *) \
26 | # we're on an architecture upstream doesn't officially build for
27 | # let's build binaries from the published packaging sources
28 | # new directory for storing sources and .deb files
29 | tempDir="$(mktemp -d)" \
30 | && chmod 777 "$tempDir" \
31 | # (777 to ensure APT's "_apt" user can access it too)
32 | \
33 | # save list of currently-installed packages so build dependencies can be cleanly removed later
34 | && savedAptMark="$(apt-mark showmanual)" \
35 | \
36 | # build .deb files from upstream's packaging sources
37 | && apt-get update \
38 | && apt-get install --no-install-recommends --no-install-suggests -y \
39 | curl \
40 | devscripts \
41 | equivs \
42 | git \
43 | libxml2-utils \
44 | lsb-release \
45 | xsltproc \
46 | && ( \
47 | cd "$tempDir" \
48 | && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \
49 | && REVISION=${REVISION%~*} \
50 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \
51 | && PKGOSSCHECKSUM="517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${REVISION}.tar.gz" \
52 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \
53 | echo "pkg-oss tarball checksum verification succeeded!"; \
54 | else \
55 | echo "pkg-oss tarball checksum verification failed!"; \
56 | exit 1; \
57 | fi \
58 | && tar xzvf ${REVISION}.tar.gz \
59 | && cd pkg-oss-${REVISION} \
60 | && cd debian \
61 | && for target in module-perl; do \
62 | make rules-$target; \
63 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \
64 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \
65 | done \
66 | && make module-perl \
67 | ) \
68 | # we don't remove APT lists here because they get re-downloaded and removed later
69 | \
70 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
71 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies)
72 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \
73 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \
74 | \
75 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be)
76 | && ls -lAFh "$tempDir" \
77 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \
78 | && grep '^Package: ' "$tempDir/Packages" \
79 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \
80 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes")
81 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
82 | # ...
83 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied)
84 | && apt-get -o Acquire::GzipIndexes=false update \
85 | ;; \
86 | esac \
87 | \
88 | && apt-get install --no-install-recommends --no-install-suggests -y \
89 | $nginxPackages \
90 | gettext-base \
91 | curl \
92 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \
93 | \
94 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps)
95 | && if [ -n "$tempDir" ]; then \
96 | apt-get purge -y --auto-remove \
97 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \
98 | fi
99 |
--------------------------------------------------------------------------------
/stable/debian/10-listen-on-ipv6-by-default.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | ME=$(basename "$0")
13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
14 |
15 | # check if we have ipv6 available
16 | if [ ! -f "/proc/net/if_inet6" ]; then
17 | entrypoint_log "$ME: info: ipv6 not available"
18 | exit 0
19 | fi
20 |
21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
23 | exit 0
24 | fi
25 |
26 | # check if the file can be modified, e.g. not on a r/o filesystem
27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
28 |
29 | # check if the file is already modified, e.g. on a container restart
30 | grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
31 |
32 | if [ -f "/etc/os-release" ]; then
33 | . /etc/os-release
34 | else
35 | entrypoint_log "$ME: info: can not guess the operating system"
36 | exit 0
37 | fi
38 |
39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
40 |
41 | case "$ID" in
42 | "debian")
43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
46 | exit 0
47 | }
48 | ;;
49 | "alpine")
50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
53 | exit 0
54 | }
55 | ;;
56 | *)
57 | entrypoint_log "$ME: info: Unsupported distribution"
58 | exit 0
59 | ;;
60 | esac
61 |
62 | # enable ipv6 on default.conf listen sockets
63 | sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
64 |
65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
66 |
67 | exit 0
68 |
--------------------------------------------------------------------------------
/stable/debian/15-local-resolvers.envsh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
8 |
9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
10 |
11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf)
12 |
13 | NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }"
14 |
15 | export NGINX_LOCAL_RESOLVERS
16 |
--------------------------------------------------------------------------------
/stable/debian/20-envsubst-on-templates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | ME=$(basename "$0")
6 |
7 | entrypoint_log() {
8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
9 | echo "$@"
10 | fi
11 | }
12 |
13 | add_stream_block() {
14 | local conffile="/etc/nginx/nginx.conf"
15 |
16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then
17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
18 | else
19 | # check if the file can be modified, e.g. not on a r/o filesystem
20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
22 | cat << END >> "$conffile"
23 | # added by "$ME" on "$(date)"
24 | stream {
25 | include $stream_output_dir/*.conf;
26 | }
27 | END
28 | fi
29 | }
30 |
31 | auto_envsubst() {
32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
37 | local filter="${NGINX_ENVSUBST_FILTER:-}"
38 |
39 | local template defined_envs relative_path output_path subdir
40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
41 | [ -d "$template_dir" ] || return 0
42 | if [ ! -w "$output_dir" ]; then
43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
44 | return 0
45 | fi
46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
47 | relative_path="${template#"$template_dir/"}"
48 | output_path="$output_dir/${relative_path%"$suffix"}"
49 | subdir=$(dirname "$relative_path")
50 | # create a subdirectory where the template file exists
51 | mkdir -p "$output_dir/$subdir"
52 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
53 | envsubst "$defined_envs" < "$template" > "$output_path"
54 | done
55 |
56 | # Print the first file with the stream suffix, this will be false if there are none
57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
58 | mkdir -p "$stream_output_dir"
59 | if [ ! -w "$stream_output_dir" ]; then
60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
61 | return 0
62 | fi
63 | add_stream_block
64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
65 | relative_path="${template#"$template_dir/"}"
66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}"
67 | subdir=$(dirname "$relative_path")
68 | # create a subdirectory where the template file exists
69 | mkdir -p "$stream_output_dir/$subdir"
70 | entrypoint_log "$ME: Running envsubst on $template to $output_path"
71 | envsubst "$defined_envs" < "$template" > "$output_path"
72 | done
73 | fi
74 | }
75 |
76 | auto_envsubst
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/stable/debian/30-tune-worker-processes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=2:ts=2:sts=2:et
3 |
4 | set -eu
5 |
6 | LC_ALL=C
7 | ME=$(basename "$0")
8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
9 |
10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
11 |
12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
13 |
14 | ceildiv() {
15 | num=$1
16 | div=$2
17 | echo $(( (num + div - 1) / div ))
18 | }
19 |
20 | get_cpuset() {
21 | cpusetroot=$1
22 | cpusetfile=$2
23 | ncpu=0
24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1
25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
26 | case "$token" in
27 | *-*)
28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
29 | ncpu=$(( ncpu+count ))
30 | ;;
31 | *)
32 | ncpu=$(( ncpu+1 ))
33 | ;;
34 | esac
35 | done
36 | echo "$ncpu"
37 | }
38 |
39 | get_quota() {
40 | cpuroot=$1
41 | ncpu=0
42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
46 | [ "$cfs_quota" = "-1" ] && return 1
47 | [ "$cfs_period" = "0" ] && return 1
48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
49 | [ "$ncpu" -gt 0 ] || return 1
50 | echo "$ncpu"
51 | }
52 |
53 | get_quota_v2() {
54 | cpuroot=$1
55 | ncpu=0
56 | [ -f "$cpuroot/cpu.max" ] || return 1
57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
59 | [ "$cfs_quota" = "max" ] && return 1
60 | [ "$cfs_period" = "0" ] && return 1
61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
62 | [ "$ncpu" -gt 0 ] || return 1
63 | echo "$ncpu"
64 | }
65 |
66 | get_cgroup_v1_path() {
67 | needle=$1
68 | found=
69 | foundroot=
70 | mountpoint=
71 |
72 | [ -r "/proc/self/mountinfo" ] || return 1
73 | [ -r "/proc/self/cgroup" ] || return 1
74 |
75 | while IFS= read -r line; do
76 | case "$needle" in
77 | "cpuset")
78 | case "$line" in
79 | *cpuset*)
80 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
81 | break
82 | ;;
83 | esac
84 | ;;
85 | "cpu")
86 | case "$line" in
87 | *cpuset*)
88 | ;;
89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
90 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
91 | break
92 | ;;
93 | esac
94 | esac
95 | done << __EOF__
96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo )
97 | __EOF__
98 |
99 | while IFS= read -r line; do
100 | controller=$( echo "$line" | cut -d: -f 2 )
101 | case "$needle" in
102 | "cpuset")
103 | case "$controller" in
104 | cpuset)
105 | mountpoint=$( echo "$line" | cut -d: -f 3 )
106 | break
107 | ;;
108 | esac
109 | ;;
110 | "cpu")
111 | case "$controller" in
112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
113 | mountpoint=$( echo "$line" | cut -d: -f 3 )
114 | break
115 | ;;
116 | esac
117 | ;;
118 | esac
119 | done << __EOF__
120 | $( grep -F -- 'cpu' /proc/self/cgroup )
121 | __EOF__
122 |
123 | case "${found%% *}" in
124 | "/")
125 | foundroot="${found##* }$mountpoint"
126 | ;;
127 | "$mountpoint")
128 | foundroot="${found##* }"
129 | ;;
130 | esac
131 | echo "$foundroot"
132 | }
133 |
134 | get_cgroup_v2_path() {
135 | found=
136 | foundroot=
137 | mountpoint=
138 |
139 | [ -r "/proc/self/mountinfo" ] || return 1
140 | [ -r "/proc/self/cgroup" ] || return 1
141 |
142 | while IFS= read -r line; do
143 | found=$( echo "$line" | cut -d ' ' -f 4,5 )
144 | done << __EOF__
145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
146 | __EOF__
147 |
148 | while IFS= read -r line; do
149 | mountpoint=$( echo "$line" | cut -d: -f 3 )
150 | done << __EOF__
151 | $( grep -F -- '0::' /proc/self/cgroup )
152 | __EOF__
153 |
154 | case "${found%% *}" in
155 | "")
156 | return 1
157 | ;;
158 | "/")
159 | foundroot="${found##* }$mountpoint"
160 | ;;
161 | "$mountpoint" | /../*)
162 | foundroot="${found##* }"
163 | ;;
164 | esac
165 | echo "$foundroot"
166 | }
167 |
168 | ncpu_online=$( getconf _NPROCESSORS_ONLN )
169 | ncpu_cpuset=
170 | ncpu_quota=
171 | ncpu_cpuset_v2=
172 | ncpu_quota_v2=
173 |
174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
178 |
179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
180 | "$ncpu_online" \
181 | "$ncpu_cpuset" \
182 | "$ncpu_quota" \
183 | "$ncpu_cpuset_v2" \
184 | "$ncpu_quota_v2" \
185 | | sort -n \
186 | | head -n 1 )
187 |
188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
189 |
--------------------------------------------------------------------------------
/stable/debian/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # vim:sw=4:ts=4:et
3 |
4 | set -e
5 |
6 | entrypoint_log() {
7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
8 | echo "$@"
9 | fi
10 | }
11 |
12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then
13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
15 |
16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
18 | case "$f" in
19 | *.envsh)
20 | if [ -x "$f" ]; then
21 | entrypoint_log "$0: Sourcing $f";
22 | . "$f"
23 | else
24 | # warn on shell scripts without exec bit
25 | entrypoint_log "$0: Ignoring $f, not executable";
26 | fi
27 | ;;
28 | *.sh)
29 | if [ -x "$f" ]; then
30 | entrypoint_log "$0: Launching $f";
31 | "$f"
32 | else
33 | # warn on shell scripts without exec bit
34 | entrypoint_log "$0: Ignoring $f, not executable";
35 | fi
36 | ;;
37 | *) entrypoint_log "$0: Ignoring $f";;
38 | esac
39 | done
40 |
41 | entrypoint_log "$0: Configuration complete; ready for start up"
42 | else
43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
44 | fi
45 | fi
46 |
47 | exec "$@"
48 |
--------------------------------------------------------------------------------
/sync-awsecr.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eu
3 |
4 | image="nginx"
5 | registry="public.ecr.aws/z9d2n7e1"
6 |
7 | declare -A aliases
8 | aliases=(
9 | [mainline]='1 1.27 latest'
10 | [stable]='1.28'
11 | )
12 |
13 | architectures=( amd64 arm64v8 )
14 |
15 | self="$(basename "$BASH_SOURCE")"
16 | cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
17 | base=debian
18 |
19 | versions=( mainline stable )
20 |
21 | pulllist=()
22 | declare -A taglist
23 | taglist=()
24 |
25 | # get the most recent commit which modified any of "$@"
26 | fileCommit() {
27 | git log -1 --format='format:%H' HEAD -- "$@"
28 | }
29 |
30 | # get the most recent commit which modified "$1/Dockerfile" or any file COPY'd from "$1/Dockerfile"
31 | dirCommit() {
32 | local dir="$1"; shift
33 | (
34 | cd "$dir"
35 | fileCommit \
36 | Dockerfile \
37 | $(git show HEAD:./Dockerfile | awk '
38 | toupper($1) == "COPY" {
39 | for (i = 2; i < NF; i++) {
40 | print $i
41 | }
42 | }
43 | ')
44 | )
45 | }
46 |
47 | # prints "$2$1$3$1...$N"
48 | join() {
49 | local sep="$1"; shift
50 | local out; printf -v out "${sep//%/%%}%s" "$@"
51 | echo "${out#$sep}"
52 | }
53 |
54 | for version in "${versions[@]}"; do
55 | commit="$(dirCommit "$version/$base")"
56 | fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
57 | pulllist+=( "$image:$fullVersion" )
58 | for variant in perl alpine alpine-perl alpine-slim; do
59 | pulllist+=( "$image:$fullVersion-$variant" )
60 | done
61 | done
62 |
63 | for version in "${versions[@]}"; do
64 | commit="$(dirCommit "$version/$base")"
65 |
66 | fullVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk '$1 == "ENV" && $2 == "NGINX_VERSION" { print $3; exit }')"
67 |
68 | versionAliases=( $fullVersion )
69 | if [ "$version" != "$fullVersion" ]; then
70 | versionAliases+=( $version )
71 | fi
72 | versionAliases+=( ${aliases[$version]:-} )
73 |
74 | debianVersion="$(git show "$commit":"$version/$base/Dockerfile" | awk -F"[-:]" '$1 == "FROM debian" { print $2; exit }')"
75 | debianAliases=( ${versionAliases[@]/%/-$debianVersion} )
76 | debianAliases=( "${debianAliases[@]//latest-/}" )
77 |
78 | for tag in ${versionAliases[@]:1} ${debianAliases[@]:1}; do
79 | taglist["$image:$tag"]="$image:$fullVersion"
80 | done
81 |
82 | for variant in debian-perl; do
83 | variantAliases=( "${versionAliases[@]/%/-perl}" )
84 | variantAliases+=( "${versionAliases[@]/%/-${variant/debian/$debianVersion}}" )
85 | variantAliases=( "${variantAliases[@]//latest-/}" )
86 |
87 | for tag in ${variantAliases[@]}; do
88 | if [ "$tag" != "${fullVersion}-perl" ]; then
89 | taglist["$image:$tag"]="$image:$fullVersion-perl"
90 | fi
91 | done
92 | done
93 |
94 | commit="$(dirCommit "$version/alpine-slim")"
95 | alpineVersion="$(git show "$commit":"$version/alpine-slim/Dockerfile" | awk -F: '$1 == "FROM alpine" { print $2; exit }')"
96 |
97 | for variant in alpine alpine-perl alpine-slim; do
98 | commit="$(dirCommit "$version/$variant")"
99 |
100 | variantAliases=( "${versionAliases[@]/%/-$variant}" )
101 | variantAliases+=( "${versionAliases[@]/%/-${variant/alpine/alpine$alpineVersion}}" )
102 | variantAliases=( "${variantAliases[@]//latest-/}" )
103 |
104 | for tag in ${variantAliases[@]}; do
105 | if [ "$tag" != "${fullVersion}-$variant" ]; then
106 | taglist["$image:$tag"]="$image:${fullVersion}-$variant"
107 | fi
108 | done
109 | done
110 |
111 | done
112 |
113 | echo "#!/bin/sh"
114 | echo "set -ex"
115 | echo
116 | echo "export DOCKER_CLI_EXPERIMENTAL=enabled"
117 | echo
118 | echo "# pulling stuff"
119 | for arch in ${architectures[@]}; do
120 | case $arch in
121 | arm64v8)
122 | parch="aarch64"
123 | ;;
124 | *)
125 | parch=$arch
126 | ;;
127 | esac
128 | for tag in ${pulllist[@]}; do
129 | echo "docker pull --platform linux/$parch $arch/$tag";
130 | done
131 | done
132 |
133 | echo
134 |
135 | echo "# tagging stuff"
136 |
137 | for arch in ${architectures[@]}; do
138 | for tag in ${pulllist[@]}; do
139 | echo "docker tag $arch/$tag $registry/$tag-$arch"
140 | done
141 | for tag in ${!taglist[@]}; do
142 | echo "docker tag $arch/${taglist[$tag]} $registry/$tag-$arch"
143 | done
144 | done
145 |
146 | echo "# pushing stuff"
147 |
148 | for arch in ${architectures[@]}; do
149 | for tag in ${pulllist[@]}; do
150 | echo "docker push $registry/$tag-$arch"
151 | done
152 | for tag in ${!taglist[@]}; do
153 | echo "docker push $registry/$tag-$arch"
154 | done
155 | done
156 |
157 | echo
158 | echo "# manifesting stuff"
159 | for tag in ${pulllist[@]} ${!taglist[@]}; do
160 | string="docker manifest create --amend $registry/$tag"
161 | for arch in ${architectures[@]}; do
162 | string+=" $registry/$tag-$arch"
163 | done
164 | echo $string
165 | done
166 |
167 | echo
168 | echo "# pushing manifests"
169 | for tag in ${pulllist[@]} ${!taglist[@]}; do
170 | echo "docker manifest push --purge $registry/$tag"
171 | done
172 |
--------------------------------------------------------------------------------