├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ ├── feature_request.yml │ └── security_report.yml ├── dependabot.yml ├── pull_request_template.md ├── scorecard.yml └── workflows │ ├── alpine-mainline.yml │ ├── alpine-stable.yml │ ├── debian-mainline.yml │ ├── debian-stable.yml │ ├── f5_cla.yml │ ├── image-cleanup.yml │ ├── ossf_scorecard.yml │ └── scripts │ └── delete-untagged-amazon-public-ecr-images.sh ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile-alpine-perl.template ├── Dockerfile-alpine-slim.template ├── Dockerfile-alpine.template ├── Dockerfile-debian-perl.template ├── Dockerfile-debian.template ├── LICENSE ├── README.md ├── SECURITY.md ├── SUPPORT.md ├── entrypoint ├── 10-listen-on-ipv6-by-default.sh ├── 15-local-resolvers.envsh ├── 20-envsubst-on-templates.sh ├── 30-tune-worker-processes.sh └── docker-entrypoint.sh ├── mainline ├── alpine-perl │ └── Dockerfile ├── alpine-slim │ ├── 10-listen-on-ipv6-by-default.sh │ ├── 15-local-resolvers.envsh │ ├── 20-envsubst-on-templates.sh │ ├── 30-tune-worker-processes.sh │ ├── Dockerfile │ └── docker-entrypoint.sh ├── alpine │ └── Dockerfile ├── debian-perl │ └── Dockerfile └── debian │ ├── 10-listen-on-ipv6-by-default.sh │ ├── 15-local-resolvers.envsh │ ├── 20-envsubst-on-templates.sh │ ├── 30-tune-worker-processes.sh │ ├── Dockerfile │ └── docker-entrypoint.sh ├── stable ├── alpine-perl │ └── Dockerfile ├── alpine-slim │ ├── 10-listen-on-ipv6-by-default.sh │ ├── 15-local-resolvers.envsh │ ├── 20-envsubst-on-templates.sh │ ├── 30-tune-worker-processes.sh │ ├── Dockerfile │ └── docker-entrypoint.sh ├── alpine │ └── Dockerfile ├── debian-perl │ └── Dockerfile └── debian │ ├── 10-listen-on-ipv6-by-default.sh │ ├── 15-local-resolvers.envsh │ ├── 20-envsubst-on-templates.sh │ ├── 30-tune-worker-processes.sh │ ├── Dockerfile │ └── docker-entrypoint.sh └── update.sh /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | ##################### 2 | # Main global owner # 3 | ##################### 4 | 5 | * @nginx/docker-unprivileged 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: 🐛 Bug report 3 | description: Create a report to help us improve 4 | labels: bug 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Thanks for taking the time to fill out this bug report! 10 | 11 | Before you continue filling out this report, please take a moment to check that your bug has not been [already reported on GitHub][issue search] 🙌 12 | 13 | Remember to redact any sensitive information such as authentication credentials and/or license keys! 14 | 15 | **Note:** If you are seeking community support or have a question, please consider starting a new thread via [GitHub discussions][discussions] or the [NGINX Community forum][forum]. 16 | 17 | [issue search]: ../search?q=is%3Aissue&type=issues 18 | 19 | [discussions]: ../discussions 20 | [forum]: https://community.nginx.org 21 | 22 | - type: textarea 23 | id: overview 24 | attributes: 25 | label: Bug Overview 26 | description: A clear and concise overview of the bug. 27 | placeholder: When I do "X" with the NGINX unprivileged Docker image, "Y" happens instead of "Z". 28 | validations: 29 | required: true 30 | 31 | - type: textarea 32 | id: behavior 33 | attributes: 34 | label: Expected Behavior 35 | description: A clear and concise description of what you expected to happen. 36 | placeholder: When I do "X" with the NGINX unprivileged Docker image, I expect "Z" to happen. 37 | validations: 38 | required: true 39 | 40 | - type: textarea 41 | id: steps 42 | attributes: 43 | label: Steps to Reproduce the Bug 44 | description: Detail the series of steps required to reproduce the bug. Deploy NGINX Unprivileged Docker image, View output/logs/configuration on '...', See error. 45 | placeholder: When I run the NGINX Docker unprivileged image using [...], the image fails with an error message. If I check the terminal outputs and/or logs, I see the following error info. 46 | validations: 47 | required: true 48 | 49 | - type: textarea 50 | id: environment 51 | attributes: 52 | label: Environment Details 53 | description: Please provide details about your environment. 54 | value: | 55 | - Version of Docker and method of installation: [e.g. Docker Desktop / Docker Server] 56 | - Version/tag of the NGINX Docker unprivileged image or specific commit: [e.g. 1.4.3/commit hash] 57 | - Target deployment platform: [e.g. OpenShift/Kubernetes/Docker Compose/local cluster/etc...] 58 | - Target OS: [e.g. RHEL 9/Ubuntu 24.04/etc...] 59 | validations: 60 | required: true 61 | 62 | - type: textarea 63 | id: context 64 | attributes: 65 | label: Additional Context 66 | description: Add any other context about the problem here. 67 | placeholder: Feel free to add any other context/information/screenshots/etc... that you think might be relevant to this issue in here. 68 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | blank_issues_enabled: false 3 | contact_links: 4 | - name: 💬 Talk to the NGINX community! 5 | url: https://community.nginx.org 6 | about: A community forum for NGINX users, developers, and contributors 7 | - name: 📝 Code of Conduct 8 | url: https://www.contributor-covenant.org/version/2/1/code_of_conduct 9 | about: NGINX follows the Contributor Covenant Code of Conduct to ensure a safe and inclusive community 10 | - name: 💼 For commercial & enterprise users 11 | url: https://www.f5.com/products/nginx 12 | about: F5 offers a wide range of NGINX products for commercial & enterprise users 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: ✨ Feature request 3 | description: Suggest an idea for this project 4 | labels: enhancement 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Thanks for taking the time to fill out this feature request! 10 | 11 | Before you continue filling out this request, please take a moment to check that your feature has not been [already requested on GitHub][issue search] 🙌 12 | 13 | **Note:** If you are seeking community support or have a question, please consider starting a new thread via [GitHub discussions][discussions] or the [NGINX Community forum][forum]. 14 | 15 | [issue search]: ../search?q=is%3Aissue&type=issues 16 | 17 | [discussions]: ../discussions 18 | [forum]: https://community.nginx.org 19 | 20 | - type: textarea 21 | id: overview 22 | attributes: 23 | label: Feature Overview 24 | description: A clear and concise description of what the feature request is. 25 | placeholder: I would like the NGINX Docker unprivileged image to be able to do "X". 26 | validations: 27 | required: true 28 | 29 | - type: textarea 30 | id: alternatives 31 | attributes: 32 | label: Alternatives Considered 33 | description: Detail any potential alternative solutions/workarounds you've used or considered. 34 | placeholder: I have done/might be able to do "X" using the NGINX Docker unprivileged image by doing "Y". 35 | 36 | - type: textarea 37 | id: context 38 | attributes: 39 | label: Additional Context 40 | description: Add any other context about the problem here. 41 | placeholder: Feel free to add any other context/information/screenshots/etc... that you think might be relevant to this feature request here. 42 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/security_report.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: ⚠️ Security advisory 3 | description: Warn of a security advisory 4 | labels: security 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Thanks for taking the time to fill out this security advisory! 10 | 11 | Before you continue filling out this request, please take a moment to check that your advisory complies with the guidance in the [security documentation][security] and has not been [already reported on GitHub][issue search] 🙌 12 | 13 | **Note:** If you are seeking community support or have a question, please consider starting a new thread via [GitHub discussions][discussions] or the [NGINX Community forum][forum]. 14 | 15 | [security]: https://github.com/nginx/docker-nginx-unprivileged/blob/main/SECURITY.md 16 | [issue search]: ../search?q=is%3Aissue&type=issues 17 | 18 | [discussions]: ../discussions 19 | [forum]: https://community.nginx.org 20 | 21 | - type: textarea 22 | id: overview 23 | attributes: 24 | label: Security advisory overview 25 | description: A clear and concise description of what the security advisory is. 26 | placeholder: The NGINX Docker unprivileged image is vulnerable to CVE "X". 27 | validations: 28 | required: true 29 | 30 | - type: textarea 31 | id: context 32 | attributes: 33 | label: Additional Context 34 | description: Add any other context about the problem here. 35 | placeholder: Feel free to add any other context/information/screenshots/etc... that you think might be relevant to this security advisory here. 36 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: github-actions 5 | directory: / 6 | schedule: 7 | interval: weekly 8 | day: monday 9 | time: "00:00" 10 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ### Proposed changes 2 | 3 | Describe the use case and detail of the change. If this PR addresses an issue on GitHub, make sure to include a link to that issue using one of the [supported keywords](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue) here in this description (not in the title of the PR). 4 | 5 | ### Checklist 6 | 7 | Before creating a PR, run through this checklist and mark each as complete: 8 | 9 | - [ ] I have read the [contributing guidelines](/CONTRIBUTING.md) 10 | - [ ] I have signed the [F5 Contributor License Agreement (CLA)](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md) 11 | - [ ] I have run the [`update.sh`](/update.sh) script and ensured all entrypoint/Dockerfile template changes have been applied to the relevant image entrypoint scripts & Dockerfiles 12 | - [ ] I have tested that the NGINX Docker unprivileged image builds and runs correctly on all supported architectures on an unprivileged environment (check out the [`README`](/README.md) for more details) 13 | - [ ] I have updated any relevant documentation ([`README.md`](/README.md)) 14 | -------------------------------------------------------------------------------- /.github/scorecard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | annotations: 3 | - checks: 4 | - fuzzing 5 | - sast 6 | reasons: 7 | - reason: not-applicable 8 | -------------------------------------------------------------------------------- /.github/workflows/debian-stable.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Debian Stable 3 | on: 4 | pull_request: 5 | schedule: 6 | - cron: "0 0 * * 1" 7 | workflow_dispatch: 8 | jobs: 9 | version: 10 | name: Fetch NGINX stable version 11 | runs-on: ubuntu-24.04 12 | outputs: 13 | major: ${{ steps.nginx_version.outputs.major }} 14 | minor: ${{ steps.nginx_version.outputs.minor }} 15 | patch: ${{ steps.nginx_version.outputs.patch }} 16 | distro: ${{ steps.distro_version.outputs.release }} 17 | steps: 18 | - name: Check out the codebase 19 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 20 | 21 | - name: Parse NGINX stable version 22 | id: nginx_version 23 | run: | 24 | echo "major=$(cat update.sh | grep -m1 '\[stable\]=' | cut -d"'" -f2 | cut -d"." -f1)" >> "$GITHUB_OUTPUT" 25 | echo "minor=$(cat update.sh | grep -m1 '\[stable\]=' | cut -d"'" -f2 | cut -d"." -f2)" >> "$GITHUB_OUTPUT" 26 | echo "patch=$(cat update.sh | grep -m1 '\[stable\]=' | cut -d"'" -f2 | cut -d"." -f3)" >> "$GITHUB_OUTPUT" 27 | 28 | - name: Parse Alpine version 29 | id: distro_version 30 | run: | 31 | echo "release=$(cat update.sh | grep -m6 '\[stable\]=' | tail -n1 | cut -d"'" -f2)" >> "$GITHUB_OUTPUT" 32 | 33 | core: 34 | name: Build Debian NGINX stable Docker image 35 | needs: version 36 | runs-on: ubuntu-24.04 37 | strategy: 38 | fail-fast: false 39 | steps: 40 | - name: Check out the codebase 41 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 42 | 43 | - name: Set up QEMU 44 | uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 45 | 46 | - name: Set up Docker Buildx 47 | uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 48 | 49 | - name: Configure AWS credentials 50 | if: ${{ github.event_name != 'pull_request' }} 51 | uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 52 | with: 53 | aws-region: ${{ secrets.AWS_REGION }} 54 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 55 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 56 | 57 | - name: Login to Amazon ECR Public Gallery 58 | if: ${{ github.event_name != 'pull_request' }} 59 | uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 60 | with: 61 | registry: public.ecr.aws 62 | 63 | - name: Login to Docker Hub 64 | if: ${{ github.event_name != 'pull_request' }} 65 | uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 66 | with: 67 | username: ${{ secrets.DOCKERHUB_USERNAME }} 68 | password: ${{ secrets.DOCKERHUB_TOKEN }} 69 | 70 | - name: Login to GitHub Container Registry 71 | if: ${{ github.event_name != 'pull_request' }} 72 | uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 73 | with: 74 | registry: ghcr.io 75 | username: ${{ github.actor }} 76 | password: ${{ secrets.GITHUB_TOKEN }} 77 | 78 | - name: Login to Quay 79 | if: ${{ github.event_name != 'pull_request' }} 80 | uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 81 | with: 82 | registry: quay.io 83 | username: ${{ secrets.QUAY_USERNAME }} 84 | password: ${{ secrets.QUAY_TOKEN }} 85 | 86 | - name: Extract metadata (annotations, labels, tags) for Docker 87 | id: meta 88 | uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0 89 | with: 90 | images: | 91 | docker.io/nginxinc/nginx-unprivileged 92 | ghcr.io/nginx/nginx-unprivileged 93 | public.ecr.aws/nginx/nginx-unprivileged 94 | quay.io/nginx/nginx-unprivileged 95 | tags: | 96 | type=raw,value=${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}.${{ needs.version.outputs.patch }} 97 | type=raw,value=${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}.${{ needs.version.outputs.patch }}-${{ needs.version.outputs.distro }} 98 | type=raw,value=${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }} 99 | type=raw,value=${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}-${{ needs.version.outputs.distro }} 100 | type=raw,value=stable 101 | type=raw,value=stable-${{ needs.version.outputs.distro }} 102 | env: 103 | DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index 104 | 105 | - name: Build and push NGINX stable Debian image to Amazon ECR Public Gallery, Docker Hub, GitHub Container Registry, and Quay 106 | id: build 107 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0 108 | with: 109 | platforms: linux/amd64, linux/arm/v7, linux/arm64, linux/386, linux/mips64le, linux/ppc64le, linux/s390x 110 | # platforms: linux/amd64, linux/arm/v5, linux/arm/v7, linux/arm64, linux/386, linux/mips64le, linux/ppc64le, linux/s390x 111 | context: "{{ defaultContext }}:stable/debian" 112 | labels: ${{ steps.meta.outputs.labels }} 113 | annotations: ${{ steps.meta.outputs.annotations }} 114 | tags: ${{ steps.meta.outputs.tags }} 115 | push: ${{ github.event_name != 'pull_request' }} 116 | # cache-from: type=gha,scope=stable-debian 117 | # cache-to: type=gha,mode=min,scope=stable-debian 118 | 119 | - name: Sign Docker Hub Manifest 120 | if: ${{ github.event_name != 'pull_request' }} 121 | run: | 122 | set -ex 123 | sudo apt update 124 | sudo apt install -y notary 125 | mkdir -p ~/.docker/trust/private 126 | echo "$DOCKER_CONTENT_TRUST_REPOSITORY_KEY" > ~/.docker/trust/private/$DOCKER_CONTENT_TRUST_REPOSITORY_KEY_ID.key 127 | chmod 0400 ~/.docker/trust/private/$DOCKER_CONTENT_TRUST_REPOSITORY_KEY_ID.key 128 | docker trust key load ~/.docker/trust/private/$DOCKER_CONTENT_TRUST_REPOSITORY_KEY_ID.key --name nginx 129 | DIGEST=$(printf '${{ steps.build.outputs.metadata }}' | jq -r '."containerimage.descriptor".digest' | cut -d ':' -f2) 130 | SIZE=$(printf '${{ steps.build.outputs.metadata }}' | jq -r '."containerimage.descriptor".size') 131 | export NOTARY_AUTH=$(printf "${{ secrets.DOCKERHUB_USERNAME }}:${{ secrets.DOCKERHUB_TOKEN }}" | base64 -w0) 132 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged ${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}.${{ needs.version.outputs.patch }} $SIZE --sha256 $DIGEST --publish --verbose 133 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged ${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}.${{ needs.version.outputs.patch }}-${{ needs.version.outputs.distro }} $SIZE --sha256 $DIGEST --publish --verbose 134 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged ${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }} $SIZE --sha256 $DIGEST --publish --verbose 135 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged ${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}-${{ needs.version.outputs.distro }} $SIZE --sha256 $DIGEST --publish --verbose 136 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged stable $SIZE --sha256 $DIGEST --publish --verbose 137 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged stable-${{ needs.version.outputs.distro }} $SIZE --sha256 $DIGEST --publish --verbose 138 | env: 139 | DOCKER_CONTENT_TRUST_REPOSITORY_KEY: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_KEY }} 140 | DOCKER_CONTENT_TRUST_REPOSITORY_KEY_ID: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_KEY_ID }} 141 | DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE }} 142 | NOTARY_TARGETS_PASSPHRASE: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE }} 143 | 144 | perl: 145 | name: Build Debian NGINX stable perl Docker image 146 | needs: [version, core] 147 | runs-on: ubuntu-24.04 148 | strategy: 149 | fail-fast: false 150 | steps: 151 | - name: Check out the codebase 152 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 153 | 154 | - name: Set up QEMU 155 | uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 156 | 157 | - name: Set up Docker Buildx 158 | uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 159 | 160 | - name: Configure AWS credentials 161 | if: ${{ github.event_name != 'pull_request' }} 162 | uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 163 | with: 164 | aws-region: ${{ secrets.AWS_REGION }} 165 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 166 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 167 | 168 | - name: Login to Amazon ECR Public Gallery 169 | if: ${{ github.event_name != 'pull_request' }} 170 | uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 171 | with: 172 | registry: public.ecr.aws 173 | 174 | - name: Login to Docker Hub 175 | if: ${{ github.event_name != 'pull_request' }} 176 | uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 177 | with: 178 | username: ${{ secrets.DOCKERHUB_USERNAME }} 179 | password: ${{ secrets.DOCKERHUB_TOKEN }} 180 | 181 | - name: Login to GitHub Container Registry 182 | if: ${{ github.event_name != 'pull_request' }} 183 | uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 184 | with: 185 | registry: ghcr.io 186 | username: ${{ github.actor }} 187 | password: ${{ secrets.GITHUB_TOKEN }} 188 | 189 | - name: Login to Quay 190 | if: ${{ github.event_name != 'pull_request' }} 191 | uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 192 | with: 193 | registry: quay.io 194 | username: ${{ secrets.QUAY_USERNAME }} 195 | password: ${{ secrets.QUAY_TOKEN }} 196 | 197 | - name: Extract metadata (annotations, labels, tags) for Docker 198 | id: meta 199 | uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0 200 | with: 201 | images: | 202 | docker.io/nginxinc/nginx-unprivileged 203 | ghcr.io/nginx/nginx-unprivileged 204 | public.ecr.aws/nginx/nginx-unprivileged 205 | quay.io/nginx/nginx-unprivileged 206 | tags: | 207 | type=raw,value=${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}.${{ needs.version.outputs.patch }}-perl 208 | type=raw,value=${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}.${{ needs.version.outputs.patch }}-${{ needs.version.outputs.distro }}-perl 209 | type=raw,value=${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}-perl 210 | type=raw,value=${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}-${{ needs.version.outputs.distro }}-perl 211 | type=raw,value=stable-perl 212 | type=raw,value=stable-${{ needs.version.outputs.distro }}-perl 213 | env: 214 | DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index 215 | 216 | - name: Build and push NGINX stable perl Debian image to Amazon ECR Public Gallery, Docker Hub, GitHub Container Registry, and Quay 217 | id: build 218 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0 219 | with: 220 | platforms: linux/amd64, linux/arm/v7, linux/arm64, linux/386, linux/mips64le, linux/ppc64le, linux/s390x 221 | # platforms: linux/amd64, linux/arm/v5, linux/arm/v7, linux/arm64, linux/386, linux/mips64le, linux/ppc64le, linux/s390x 222 | context: "{{ defaultContext }}:stable/debian-perl" 223 | labels: ${{ steps.meta.outputs.labels }} 224 | annotations: ${{ steps.meta.outputs.annotations }} 225 | tags: ${{ steps.meta.outputs.tags }} 226 | push: ${{ github.event_name != 'pull_request' }} 227 | # cache-from: type=gha,scope=stable-debian-perl 228 | # cache-to: type=gha,mode=min,scope=stable-debian-perl 229 | 230 | - name: Sign Docker Hub Manifest 231 | if: ${{ github.event_name != 'pull_request' }} 232 | run: | 233 | set -ex 234 | sudo apt update 235 | sudo apt install -y notary 236 | mkdir -p ~/.docker/trust/private 237 | echo "$DOCKER_CONTENT_TRUST_REPOSITORY_KEY" > ~/.docker/trust/private/$DOCKER_CONTENT_TRUST_REPOSITORY_KEY_ID.key 238 | chmod 0400 ~/.docker/trust/private/$DOCKER_CONTENT_TRUST_REPOSITORY_KEY_ID.key 239 | docker trust key load ~/.docker/trust/private/$DOCKER_CONTENT_TRUST_REPOSITORY_KEY_ID.key --name nginx 240 | DIGEST=$(printf '${{ steps.build.outputs.metadata }}' | jq -r '."containerimage.descriptor".digest' | cut -d ':' -f2) 241 | SIZE=$(printf '${{ steps.build.outputs.metadata }}' | jq -r '."containerimage.descriptor".size') 242 | export NOTARY_AUTH=$(printf "${{ secrets.DOCKERHUB_USERNAME }}:${{ secrets.DOCKERHUB_TOKEN }}" | base64 -w0) 243 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged ${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}.${{ needs.version.outputs.patch }}-perl $SIZE --sha256 $DIGEST --publish --verbose 244 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged ${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}.${{ needs.version.outputs.patch }}-${{ needs.version.outputs.distro }}-perl $SIZE --sha256 $DIGEST --publish --verbose 245 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged ${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}-perl $SIZE --sha256 $DIGEST --publish --verbose 246 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged ${{ needs.version.outputs.major }}.${{ needs.version.outputs.minor }}-${{ needs.version.outputs.distro }}-perl $SIZE --sha256 $DIGEST --publish --verbose 247 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged stable-perl $SIZE --sha256 $DIGEST --publish --verbose 248 | notary -d ~/.docker/trust/ -s https://notary.docker.io addhash docker.io/nginxinc/nginx-unprivileged stable--${{ needs.version.outputs.distro }}perl $SIZE --sha256 $DIGEST --publish --verbose 249 | env: 250 | DOCKER_CONTENT_TRUST_REPOSITORY_KEY: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_KEY }} 251 | DOCKER_CONTENT_TRUST_REPOSITORY_KEY_ID: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_KEY_ID }} 252 | DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE }} 253 | NOTARY_TARGETS_PASSPHRASE: ${{ secrets.DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE }} 254 | -------------------------------------------------------------------------------- /.github/workflows/f5_cla.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: F5 CLA 3 | on: 4 | issue_comment: 5 | types: [created] 6 | pull_request_target: 7 | types: [opened, closed, synchronize] 8 | permissions: read-all 9 | jobs: 10 | f5-cla: 11 | name: F5 CLA 12 | runs-on: ubuntu-24.04 13 | permissions: 14 | actions: write 15 | pull-requests: write 16 | statuses: write 17 | steps: 18 | - name: Run F5 Contributor License Agreement (CLA) assistant 19 | if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have hereby read the F5 CLA and agree to its terms') || github.event_name == 'pull_request_target' 20 | uses: contributor-assistant/github-action@ca4a40a7d1004f18d9960b404b97e5f30a505a08 # v2.6.1 21 | with: 22 | # Any pull request targeting the following branch will trigger a CLA check. 23 | # NOTE: You might need to edit this value to 'master'. 24 | branch: main 25 | # Path to the CLA document. 26 | path-to-document: https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md 27 | # Custom CLA messages. 28 | custom-notsigned-prcomment: '🎉 Thank you for your contribution! It appears you have not yet signed the [F5 Contributor License Agreement (CLA)](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md), which is required for your changes to be incorporated into an F5 Open Source Software (OSS) project. Please kindly read the [F5 CLA](https://github.com/f5/f5-cla/blob/main/docs/f5_cla.md) and reply on a new comment with the following text to agree:' 29 | custom-pr-sign-comment: 'I have hereby read the F5 CLA and agree to its terms' 30 | custom-allsigned-prcomment: '✅ All required contributors have signed the F5 CLA for this PR. Thank you!' 31 | # Remote repository storing CLA signatures. 32 | remote-organization-name: f5 33 | remote-repository-name: f5-cla-data 34 | path-to-signatures: signatures/signatures.json 35 | # Comma separated list of usernames for maintainers or any other individuals who should not be prompted for a CLA. 36 | # NOTE: You will want to edit the usernames to suit your project needs. 37 | allowlist: bot* 38 | # Do not lock PRs after a merge. 39 | lock-pullrequest-aftermerge: false 40 | env: 41 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 42 | PERSONAL_ACCESS_TOKEN: ${{ secrets.F5_CLA_TOKEN }} 43 | -------------------------------------------------------------------------------- /.github/workflows/image-cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Image Cleanup 3 | on: 4 | workflow_dispatch: 5 | workflow_run: 6 | workflows: [Alpine Mainline, Alpine Stable, Debian Mainline, Debian Stable] 7 | types: [completed] 8 | jobs: 9 | cleanup: 10 | name: Delete untagged NGINX Unprivileged Docker images on the Amazon ECR Public Gallery and the GitHub Container Registry 11 | if: ${{ github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success' }} 12 | runs-on: ubuntu-24.04 13 | strategy: 14 | fail-fast: false 15 | steps: 16 | - name: Check out the codebase 17 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 18 | 19 | - name: Configure AWS credentials 20 | uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 21 | with: 22 | aws-region: ${{ secrets.AWS_REGION }} 23 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 24 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 25 | 26 | - name: Delete untagged NGINX Unprivileged Docker images on the Amazon ECR Public Gallery 27 | run: | 28 | .github/workflows/scripts/delete-untagged-amazon-public-ecr-images.sh 29 | 30 | - name: Delete untagged NGINX Unprivileged Docker images on the GitHub Container Registry 31 | uses: dataaxiom/ghcr-cleanup-action@cd0cdb900b5dbf3a6f2cc869f0dbb0b8211f50c4 # v1.0.16 32 | with: 33 | package: nginx-unprivileged 34 | delete-ghost-images: true 35 | delete-untagged: true 36 | delete-partial-images: true 37 | delete-orphaned-images: true 38 | older-than: 2 years 39 | token: ${{ secrets.GITHUB_TOKEN }} 40 | validate: true 41 | -------------------------------------------------------------------------------- /.github/workflows/ossf_scorecard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This workflow uses actions that are not certified by GitHub. They are provided by a third-party and are governed by separate terms of service, privacy policy, and support documentation. 3 | name: OSSF Scorecard 4 | on: 5 | # For Branch-Protection check. Only the default branch is supported. See https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection. 6 | branch_protection_rule: 7 | # To guarantee Maintained check is occasionally updated. See https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained. 8 | schedule: 9 | - cron: "0 0 * * 1" 10 | push: 11 | branches: [main] 12 | workflow_dispatch: 13 | # Declare default permissions as read only. 14 | permissions: read-all 15 | jobs: 16 | analysis: 17 | name: Scorecard analysis 18 | runs-on: ubuntu-24.04 19 | # Delete the conditional below if you are using the OSSF Scorecard on a private repository. 20 | if: ${{ github.event.repository.private == false }} 21 | permissions: 22 | # Needed if using Code Scanning alerts. 23 | security-events: write 24 | # Needed for GitHub OIDC token if publish_results is true. 25 | id-token: write 26 | steps: 27 | - name: Check out the codebase 28 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 29 | with: 30 | persist-credentials: false 31 | 32 | - name: Run analysis 33 | uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 34 | with: 35 | results_file: results.sarif 36 | results_format: sarif 37 | # Publish the results for public repositories to enable scorecard badges. For more details, see https://github.com/ossf/scorecard-action#publishing-results. 38 | publish_results: true 39 | 40 | # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF format to the repository Actions tab. 41 | - name: Upload artifact 42 | uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 43 | with: 44 | name: SARIF file 45 | path: results.sarif 46 | retention-days: 5 47 | 48 | # Upload the results to GitHub's code scanning dashboard. 49 | - name: Upload SARIF results to code scanning 50 | uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 51 | with: 52 | sarif_file: results.sarif 53 | -------------------------------------------------------------------------------- /.github/workflows/scripts/delete-untagged-amazon-public-ecr-images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # vim:sw=2:ts=2:sts=2:et 3 | # Inspired by https://github.com/zeek/zeek/blob/master/ci/public-ecr-cleanup.sh 4 | 5 | set -eu 6 | 7 | REPOSITORY_NAME=nginx-unprivileged 8 | BATCH_DELETE_SIZE=100 # The max delete size allowed by the 'batch-delete-image' AWS CLI command is 100 9 | CUTOFF_DATE=$(date -d '2 years ago' +%Y-%m-%d) 10 | 11 | function batch_delete { 12 | while read -r batch; do 13 | if [ -z "${batch}" ]; then 14 | break 15 | fi 16 | 17 | echo "Deleting ${batch}" 18 | aws ecr-public batch-delete-image --repository-name "${REPOSITORY_NAME}" --image-ids ${batch} 19 | 20 | done < <(xargs -L ${BATCH_DELETE_SIZE} <<<"$1") 21 | } 22 | 23 | # Find untagged manifest lists and delete them first as 24 | # otherwise any referenced untagged images can not be deleted. 25 | IMAGE_DIGESTS=$(aws ecr-public describe-images \ 26 | --repository-name "${REPOSITORY_NAME}" \ 27 | --query 'imageDetails[?!imageTags && (contains(imageManifestMediaType, `manifest.list.v2`) || contains(imageManifestMediaType, `image.index.v1`)) && imagePushedAt < `'$CUTOFF_DATE'`].{imageDigest: join(`=`, [`imageDigest`, imageDigest])}' \ 28 | --output text) 29 | 30 | batch_delete "${IMAGE_DIGESTS}" 31 | 32 | # Find untagged images and delete them. 33 | IMAGE_DIGESTS=$(aws ecr-public describe-images \ 34 | --repository-name "${REPOSITORY_NAME}" \ 35 | --query 'imageDetails[?!imageTags && imagePushedAt < `'$CUTOFF_DATE'` ].{imageDigest: join(`=`, [`imageDigest`, imageDigest])}' \ 36 | --output text) 37 | 38 | batch_delete "${IMAGE_DIGESTS}" 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Any private crt and keys # 2 | ############################ 3 | *.crt 4 | *.key 5 | *~ 6 | \#* 7 | 8 | # OS Specific # 9 | ############### 10 | Thumbs.db 11 | .DS_Store 12 | .vscode 13 | 14 | # Logs # 15 | ######## 16 | *.log 17 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. 6 | 7 | We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. 8 | 9 | ## Our Standards 10 | 11 | Examples of behavior that contributes to a positive environment for our community include: 12 | 13 | - Demonstrating empathy and kindness toward other people. 14 | - Being respectful of differing opinions, viewpoints, and experiences. 15 | - Giving and gracefully accepting constructive feedback. 16 | - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience. 17 | - Focusing on what is best not just for us as individuals, but for the overall community. 18 | 19 | Examples of unacceptable behavior include: 20 | 21 | - The use of sexualized language or imagery, and sexual attention or advances of any kind. 22 | - Trolling, insulting or derogatory comments, and personal or political attacks. 23 | - Public or private harassment. 24 | - Publishing others' private information, such as a physical or email address, without their explicit permission. 25 | - Other conduct which could reasonably be considered inappropriate in a professional setting. 26 | 27 | ## Enforcement Responsibilities 28 | 29 | Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. 32 | 33 | ## Scope 34 | 35 | This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official email address, posting via an official social media account, or acting as an appointed representative at an online or offline event. 36 | 37 | ## Enforcement 38 | 39 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at . All complaints will be reviewed and investigated promptly and fairly. 40 | 41 | All community leaders are obligated to respect the privacy and security of the reporter of any incident. 42 | 43 | ## Enforcement Guidelines 44 | 45 | Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: 46 | 47 | ### 1. Correction 48 | 49 | **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. 50 | 51 | **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. 52 | 53 | ### 2. Warning 54 | 55 | **Community Impact**: A violation through a single incident or series of actions. 56 | 57 | **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. 58 | 59 | ### 3. Temporary Ban 60 | 61 | **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. 62 | 63 | **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. 64 | 65 | ### 4. Permanent Ban 66 | 67 | **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. 68 | 69 | **Consequence**: A permanent ban from any sort of public interaction within the community. 70 | 71 | ## Attribution 72 | 73 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.1, available at . 74 | 75 | Community Impact Guidelines were inspired by 76 | [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/inclusion). 77 | 78 | For answers to common questions about this code of conduct, see the FAQ at . Translations are available at . 79 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | The following is a set of guidelines for contributing to the NGINX Docker unprivileged image. We really appreciate that you are considering contributing! 4 | 5 | #### Table Of Contents 6 | 7 | - [Getting Started](#getting-started) 8 | - [Contributing](#contributing) 9 | - [Code Guidelines](#code-guidelines) 10 | 11 | ## Getting Started 12 | 13 | Look at the upstream Docker image [how to use this image guide](https://hub.docker.com/_/nginx/) to get the NGINX Docker unprivileged image up and running. 14 | 15 | ## Contributing 16 | 17 | ### Report a Bug 18 | 19 | To report a bug, open an issue on GitHub with the label `bug` using the available [bug report issue form](/.github/ISSUE_TEMPLATE/bug_report.yml). Please ensure the bug has not already been reported. **If the bug is a potential security vulnerability, please report it using our [security policy](/SECURITY.md).** 20 | 21 | ### Suggest a Feature or Enhancement 22 | 23 | To suggest a feature or enhancement, please create an issue on GitHub with the label `enhancement` using the available [feature request issue form](/.github/ISSUE_TEMPLATE/feature_request.yml). Please ensure the feature or enhancement has not already been suggested. 24 | 25 | ### Open a Pull Request (PR) 26 | 27 | - Fork the repo, create a branch, implement your changes, test that the corresponding Docker images can be built and run as intended, and submit a PR when your changes are **tested** and ready for review. 28 | - Fill in the [PR template](/.github/pull_request_template.md). 29 | - This repository is a mirror image of the upstream [NGINX Docker image](https://github.com/nginx/docker-nginx) with minor changes in order to support running NGINX in an unprivileged environment. As such only two types of PRs will be considered: 30 | 31 | 1. PRs that incorporate changes made to the upstream image that have not yet been ported to this image (e.g. there's a new NGINX release). 32 | 2. PRs that add a critical feature or a nice-to-have enhancement for running these images on an unprivileged environment (e.g. allowing users specify to the UID/GID of the image user). 33 | 34 | **Note:** If you'd like to implement a new feature, please consider creating a [feature request issue](/.github/ISSUE_TEMPLATE/feature_request.yml) first to start a discussion about the feature. 35 | 36 | #### F5 Contributor License Agreement (CLA) 37 | 38 | F5 requires all contributors to agree to the terms of the F5 CLA (available [here](https://github.com/f5/f5-cla/.github/blob/main/docs/f5_cla.md)) before any of their changes can be incorporated into an F5 Open Source repository (even contributions to the F5 CLA itself!). 39 | 40 | If you have not yet agreed to the F5 CLA terms and submit a PR to this repository, a bot will prompt you to view and agree to the F5 CLA. You will have to agree to the F5 CLA terms through a comment in the PR before any of your changes can be merged. Your agreement signature will be safely stored by F5 and no longer be required in future PRs. 41 | 42 | ## Code Guidelines 43 | 44 | ### Docker Guidelines 45 | 46 | - Update any entrypoint scripts via the the scripts contained in the [`/entrypoint`](/entrypoint) directory. 47 | - Update any Dockerfiles via the Dockerfile templates in the root directory (e.g. [`Dockerfile-alpine.template`](/Dockerfile-alpine.template)). 48 | - Run the [`./update.sh`](/update.sh) script to apply all entrypoint/Dockerfile template changes to the relevant image entrypoints & Dockerfiles. 49 | 50 | ### Git Guidelines 51 | 52 | - Keep a clean, concise and meaningful git commit history on your branch (within reason), rebasing locally and squashing before submitting a PR. 53 | - If possible and/or relevant, use the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format when writing a commit message, so that changelogs can be automatically generated. 54 | - Follow the guidelines of writing a good commit message as described here and summarized in the next few points: 55 | - In the subject line, use the present tense ("Add feature" not "Added feature"). 56 | - In the subject line, use the imperative mood ("Move cursor to..." not "Moves cursor to..."). 57 | - Limit the subject line to 72 characters or less. 58 | - Reference issues and pull requests liberally after the subject line. 59 | - Add more detailed description in the body of the git message (`git commit -a` to give you more space and time in your text editor to write a good message instead of `git commit -am`). 60 | -------------------------------------------------------------------------------- /Dockerfile-alpine-perl.template: -------------------------------------------------------------------------------- 1 | ARG IMAGE=nginxinc/nginx-unprivileged:%%NGINX_VERSION%%-alpine 2 | FROM $IMAGE 3 | 4 | ARG UID=101 5 | ARG GID=101 6 | 7 | USER root 8 | 9 | RUN set -x \ 10 | && apkArch="$(cat /etc/apk/arch)" \ 11 | && nginxPackages="%%PACKAGES%% 12 | " \ 13 | # install prerequisites for public key and pkg-oss checks 14 | && apk add --no-cache --virtual .checksum-deps \ 15 | openssl \ 16 | && case "$apkArch" in \ 17 | x86_64|aarch64) \ 18 | # arches officially built by upstream 19 | apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ 20 | ;; \ 21 | *) \ 22 | # we're on an architecture upstream doesn't officially build for 23 | # let's build binaries from the published packaging sources 24 | set -x \ 25 | && tempDir="$(mktemp -d)" \ 26 | && chown nobody:nobody $tempDir \ 27 | && apk add --no-cache --virtual .build-deps \ 28 | gcc \ 29 | libc-dev \ 30 | make \ 31 | openssl-dev \ 32 | pcre2-dev \ 33 | zlib-dev \ 34 | linux-headers \ 35 | perl-dev \ 36 | bash \ 37 | alpine-sdk \ 38 | findutils \ 39 | curl \ 40 | && su nobody -s /bin/sh -c " \ 41 | export HOME=${tempDir} \ 42 | && cd ${tempDir} \ 43 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \ 44 | && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \ 45 | && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \ 46 | echo \"pkg-oss tarball checksum verification succeeded!\"; \ 47 | else \ 48 | echo \"pkg-oss tarball checksum verification failed!\"; \ 49 | exit 1; \ 50 | fi \ 51 | && tar xzvf %%REVISION%%.tar.gz \ 52 | && cd pkg-oss-%%REVISION%% \ 53 | && cd alpine \ 54 | && make %%BUILDTARGET%% \ 55 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \ 56 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \ 57 | " \ 58 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \ 59 | && apk del --no-network .build-deps \ 60 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \ 61 | ;; \ 62 | esac \ 63 | # remove checksum deps 64 | && apk del --no-network .checksum-deps \ 65 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 66 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \ 67 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi 68 | 69 | USER $UID 70 | -------------------------------------------------------------------------------- /Dockerfile-alpine-slim.template: -------------------------------------------------------------------------------- 1 | ARG IMAGE=alpine:%%ALPINE_VERSION%% 2 | FROM $IMAGE 3 | 4 | LABEL maintainer="NGINX Docker Maintainers " 5 | 6 | ENV NGINX_VERSION=%%NGINX_VERSION%% 7 | ENV PKG_RELEASE=%%PKG_RELEASE%% 8 | ENV DYNPKG_RELEASE=%%DYNPKG_RELEASE%% 9 | 10 | ARG UID=101 11 | ARG GID=101 12 | 13 | RUN set -x \ 14 | # create nginx user/group first, to be consistent throughout docker variants 15 | && addgroup -g $GID -S nginx || true \ 16 | && adduser -S -D -H -u $UID -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx || true \ 17 | && apkArch="$(cat /etc/apk/arch)" \ 18 | && nginxPackages="%%PACKAGES%% 19 | " \ 20 | # install prerequisites for public key and pkg-oss checks 21 | && apk add --no-cache --virtual .checksum-deps \ 22 | openssl \ 23 | && case "$apkArch" in \ 24 | x86_64|aarch64) \ 25 | # arches officially built by upstream 26 | set -x \ 27 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \ 28 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \ 29 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \ 30 | echo "key verification succeeded!"; \ 31 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \ 32 | else \ 33 | echo "key verification failed!"; \ 34 | exit 1; \ 35 | fi \ 36 | && apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ 37 | ;; \ 38 | *) \ 39 | # we're on an architecture upstream doesn't officially build for 40 | # let's build binaries from the published packaging sources 41 | set -x \ 42 | && tempDir="$(mktemp -d)" \ 43 | && chown nobody:nobody $tempDir \ 44 | && apk add --no-cache --virtual .build-deps \ 45 | gcc \ 46 | libc-dev \ 47 | make \ 48 | openssl-dev \ 49 | pcre2-dev \ 50 | zlib-dev \ 51 | linux-headers \ 52 | bash \ 53 | alpine-sdk \ 54 | findutils \ 55 | curl \ 56 | && su nobody -s /bin/sh -c " \ 57 | export HOME=${tempDir} \ 58 | && cd ${tempDir} \ 59 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \ 60 | && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \ 61 | && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \ 62 | echo \"pkg-oss tarball checksum verification succeeded!\"; \ 63 | else \ 64 | echo \"pkg-oss tarball checksum verification failed!\"; \ 65 | exit 1; \ 66 | fi \ 67 | && tar xzvf %%REVISION%%.tar.gz \ 68 | && cd pkg-oss-%%REVISION%% \ 69 | && cd alpine \ 70 | && make %%BUILDTARGET%% \ 71 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \ 72 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \ 73 | " \ 74 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \ 75 | && apk del --no-network .build-deps \ 76 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \ 77 | ;; \ 78 | esac \ 79 | # remove checksum deps 80 | && apk del --no-network .checksum-deps \ 81 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 82 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \ 83 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \ 84 | # Add `envsubst` for templating environment variables 85 | && apk add --no-cache gettext-envsubst \ 86 | # Bring in tzdata so users could set the timezones through the environment 87 | # variables 88 | && apk add --no-cache tzdata \ 89 | # forward request and error logs to docker log collector 90 | && ln -sf /dev/stdout /var/log/nginx/access.log \ 91 | && ln -sf /dev/stderr /var/log/nginx/error.log \ 92 | # create a docker-entrypoint.d directory 93 | && mkdir /docker-entrypoint.d 94 | 95 | # implement changes required to run NGINX as an unprivileged user 96 | RUN sed -i 's,listen 80;,listen 8080;,' /etc/nginx/conf.d/default.conf \ 97 | && sed -i '/user nginx;/d' /etc/nginx/nginx.conf \ 98 | && sed -i 's,\(/var\)\{0\,1\}/run/nginx.pid,/tmp/nginx.pid,' /etc/nginx/nginx.conf \ 99 | && sed -i "/^http {/a \ proxy_temp_path /tmp/proxy_temp;\n client_body_temp_path /tmp/client_temp;\n fastcgi_temp_path /tmp/fastcgi_temp;\n uwsgi_temp_path /tmp/uwsgi_temp;\n scgi_temp_path /tmp/scgi_temp;\n" /etc/nginx/nginx.conf \ 100 | # nginx user must own the cache and etc directory to write cache and tweak the nginx config 101 | && chown -R $UID:0 /var/cache/nginx \ 102 | && chmod -R g+w /var/cache/nginx \ 103 | && chown -R $UID:0 /etc/nginx \ 104 | && chmod -R g+w /etc/nginx 105 | 106 | COPY docker-entrypoint.sh / 107 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d 108 | COPY 15-local-resolvers.envsh /docker-entrypoint.d 109 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d 110 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d 111 | ENTRYPOINT ["/docker-entrypoint.sh"] 112 | 113 | EXPOSE 8080 114 | 115 | STOPSIGNAL SIGQUIT 116 | 117 | USER $UID 118 | 119 | CMD ["nginx", "-g", "daemon off;"] 120 | -------------------------------------------------------------------------------- /Dockerfile-alpine.template: -------------------------------------------------------------------------------- 1 | ARG IMAGE=nginxinc/nginx-unprivileged:%%NGINX_VERSION%%-alpine-slim 2 | FROM $IMAGE 3 | 4 | ENV NJS_VERSION=%%NJS_VERSION%% 5 | ENV NJS_RELEASE=%%NJS_RELEASE%% 6 | 7 | ARG UID=101 8 | ARG GID=101 9 | 10 | USER root 11 | 12 | RUN set -x \ 13 | && apkArch="$(cat /etc/apk/arch)" \ 14 | && nginxPackages="%%PACKAGES%% 15 | " \ 16 | # install prerequisites for public key and pkg-oss checks 17 | && apk add --no-cache --virtual .checksum-deps \ 18 | openssl \ 19 | && case "$apkArch" in \ 20 | x86_64|aarch64) \ 21 | # arches officially built by upstream 22 | apk add -X "%%PACKAGEREPO%%v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ 23 | ;; \ 24 | *) \ 25 | # we're on an architecture upstream doesn't officially build for 26 | # let's build binaries from the published packaging sources 27 | set -x \ 28 | && tempDir="$(mktemp -d)" \ 29 | && chown nobody:nobody $tempDir \ 30 | && apk add --no-cache --virtual .build-deps \ 31 | gcc \ 32 | libc-dev \ 33 | make \ 34 | openssl-dev \ 35 | pcre2-dev \ 36 | zlib-dev \ 37 | linux-headers \ 38 | libxslt-dev \ 39 | gd-dev \ 40 | geoip-dev \ 41 | libedit-dev \ 42 | bash \ 43 | alpine-sdk \ 44 | findutils \ 45 | curl \ 46 | && su nobody -s /bin/sh -c " \ 47 | export HOME=${tempDir} \ 48 | && cd ${tempDir} \ 49 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/%%REVISION%%.tar.gz \ 50 | && PKGOSSCHECKSUM=\"%%PKGOSSCHECKSUM%% *%%REVISION%%.tar.gz\" \ 51 | && if [ \"\$(openssl sha512 -r %%REVISION%%.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \ 52 | echo \"pkg-oss tarball checksum verification succeeded!\"; \ 53 | else \ 54 | echo \"pkg-oss tarball checksum verification failed!\"; \ 55 | exit 1; \ 56 | fi \ 57 | && tar xzvf %%REVISION%%.tar.gz \ 58 | && cd pkg-oss-%%REVISION%% \ 59 | && cd alpine \ 60 | && make %%BUILDTARGET%% \ 61 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \ 62 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \ 63 | " \ 64 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \ 65 | && apk del --no-network .build-deps \ 66 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \ 67 | ;; \ 68 | esac \ 69 | # remove checksum deps 70 | && apk del --no-network .checksum-deps \ 71 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 72 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \ 73 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \ 74 | # Bring in curl and ca-certificates to make registering on DNS SD easier 75 | && apk add --no-cache curl ca-certificates 76 | 77 | USER $UID 78 | -------------------------------------------------------------------------------- /Dockerfile-debian-perl.template: -------------------------------------------------------------------------------- 1 | ARG IMAGE=nginxinc/nginx-unprivileged:%%NGINX_VERSION%% 2 | FROM $IMAGE 3 | 4 | ARG UID=101 5 | ARG GID=101 6 | 7 | USER root 8 | 9 | RUN set -x; \ 10 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \ 11 | dpkgArch="$(dpkg --print-architecture)" \ 12 | && nginxPackages="%%PACKAGES%% 13 | " \ 14 | && case "$dpkgArch" in \ 15 | amd64|arm64) \ 16 | # arches officialy built by upstream 17 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \ 18 | && apt-get update \ 19 | ;; \ 20 | *) \ 21 | # we're on an architecture upstream doesn't officially build for 22 | # let's build binaries from the published packaging sources 23 | # new directory for storing sources and .deb files 24 | tempDir="$(mktemp -d)" \ 25 | && chmod 777 "$tempDir" \ 26 | # (777 to ensure APT's "_apt" user can access it too) 27 | \ 28 | # save list of currently-installed packages so build dependencies can be cleanly removed later 29 | && savedAptMark="$(apt-mark showmanual)" \ 30 | \ 31 | # build .deb files from upstream's packaging sources 32 | && apt-get update \ 33 | && apt-get install --no-install-recommends --no-install-suggests -y \ 34 | curl \ 35 | devscripts \ 36 | equivs \ 37 | git \ 38 | libxml2-utils \ 39 | lsb-release \ 40 | xsltproc \ 41 | && ( \ 42 | cd "$tempDir" \ 43 | && REVISION="%%REVISION%%" \ 44 | && REVISION=${REVISION%~*} \ 45 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \ 46 | && PKGOSSCHECKSUM="%%PKGOSSCHECKSUM%% *${REVISION}.tar.gz" \ 47 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \ 48 | echo "pkg-oss tarball checksum verification succeeded!"; \ 49 | else \ 50 | echo "pkg-oss tarball checksum verification failed!"; \ 51 | exit 1; \ 52 | fi \ 53 | && tar xzvf ${REVISION}.tar.gz \ 54 | && cd pkg-oss-${REVISION} \ 55 | && cd debian \ 56 | && for target in %%BUILDTARGET%%; do \ 57 | make rules-$target; \ 58 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \ 59 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \ 60 | done \ 61 | && make %%BUILDTARGET%% \ 62 | ) \ 63 | # we don't remove APT lists here because they get re-downloaded and removed later 64 | \ 65 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies 66 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies) 67 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \ 68 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \ 69 | \ 70 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be) 71 | && ls -lAFh "$tempDir" \ 72 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \ 73 | && grep '^Package: ' "$tempDir/Packages" \ 74 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \ 75 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes") 76 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 77 | # ... 78 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 79 | && apt-get -o Acquire::GzipIndexes=false update \ 80 | ;; \ 81 | esac \ 82 | \ 83 | && apt-get install --no-install-recommends --no-install-suggests -y \ 84 | $nginxPackages \ 85 | gettext-base \ 86 | curl \ 87 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \ 88 | \ 89 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 90 | && if [ -n "$tempDir" ]; then \ 91 | apt-get purge -y --auto-remove \ 92 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \ 93 | fi 94 | 95 | USER $UID 96 | -------------------------------------------------------------------------------- /Dockerfile-debian.template: -------------------------------------------------------------------------------- 1 | ARG IMAGE=debian:%%DEBIAN_VERSION%%-slim 2 | FROM $IMAGE 3 | 4 | LABEL maintainer="NGINX Docker Maintainers " 5 | 6 | ENV NGINX_VERSION=%%NGINX_VERSION%% 7 | ENV NJS_VERSION=%%NJS_VERSION%% 8 | ENV NJS_RELEASE=%%NJS_RELEASE%% 9 | ENV PKG_RELEASE=%%PKG_RELEASE%% 10 | ENV DYNPKG_RELEASE=%%DYNPKG_RELEASE%% 11 | 12 | ARG UID=101 13 | ARG GID=101 14 | 15 | RUN set -x \ 16 | # create nginx user/group first, to be consistent throughout docker variants 17 | && groupadd --system --gid $GID nginx || true \ 18 | && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid $UID nginx || true \ 19 | && apt-get update \ 20 | && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \ 21 | && \ 22 | NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \ 23 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \ 24 | export GNUPGHOME="$(mktemp -d)"; \ 25 | found=''; \ 26 | for NGINX_GPGKEY in $NGINX_GPGKEYS; do \ 27 | for server in \ 28 | hkp://keyserver.ubuntu.com:80 \ 29 | pgp.mit.edu \ 30 | ; do \ 31 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \ 32 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \ 33 | done; \ 34 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \ 35 | done; \ 36 | gpg1 --export "$NGINX_GPGKEYS" > "$NGINX_GPGKEY_PATH" ; \ 37 | rm -rf "$GNUPGHOME"; \ 38 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \ 39 | && dpkgArch="$(dpkg --print-architecture)" \ 40 | && nginxPackages="%%PACKAGES%% 41 | " \ 42 | && case "$dpkgArch" in \ 43 | amd64|arm64) \ 44 | # arches officialy built by upstream 45 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] %%PACKAGEREPO%% %%DEBIAN_VERSION%% nginx" >> /etc/apt/sources.list.d/nginx.list \ 46 | && apt-get update \ 47 | ;; \ 48 | *) \ 49 | # we're on an architecture upstream doesn't officially build for 50 | # let's build binaries from the published packaging sources 51 | # new directory for storing sources and .deb files 52 | tempDir="$(mktemp -d)" \ 53 | && chmod 777 "$tempDir" \ 54 | # (777 to ensure APT's "_apt" user can access it too) 55 | \ 56 | # save list of currently-installed packages so build dependencies can be cleanly removed later 57 | && savedAptMark="$(apt-mark showmanual)" \ 58 | \ 59 | # build .deb files from upstream's packaging sources 60 | && apt-get update \ 61 | && apt-get install --no-install-recommends --no-install-suggests -y \ 62 | curl \ 63 | devscripts \ 64 | equivs \ 65 | git \ 66 | libxml2-utils \ 67 | lsb-release \ 68 | xsltproc \ 69 | && ( \ 70 | cd "$tempDir" \ 71 | && REVISION="%%REVISION%%" \ 72 | && REVISION=${REVISION%~*} \ 73 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \ 74 | && PKGOSSCHECKSUM="%%PKGOSSCHECKSUM%% *${REVISION}.tar.gz" \ 75 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \ 76 | echo "pkg-oss tarball checksum verification succeeded!"; \ 77 | else \ 78 | echo "pkg-oss tarball checksum verification failed!"; \ 79 | exit 1; \ 80 | fi \ 81 | && tar xzvf ${REVISION}.tar.gz \ 82 | && cd pkg-oss-${REVISION} \ 83 | && cd debian \ 84 | && for target in %%BUILDTARGET%%; do \ 85 | make rules-$target; \ 86 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \ 87 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \ 88 | done \ 89 | && make %%BUILDTARGET%% \ 90 | ) \ 91 | # we don't remove APT lists here because they get re-downloaded and removed later 92 | \ 93 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies 94 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies) 95 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \ 96 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \ 97 | \ 98 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be) 99 | && ls -lAFh "$tempDir" \ 100 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \ 101 | && grep '^Package: ' "$tempDir/Packages" \ 102 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \ 103 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes") 104 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 105 | # ... 106 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 107 | && apt-get -o Acquire::GzipIndexes=false update \ 108 | ;; \ 109 | esac \ 110 | \ 111 | && apt-get install --no-install-recommends --no-install-suggests -y \ 112 | $nginxPackages \ 113 | gettext-base \ 114 | curl \ 115 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \ 116 | \ 117 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 118 | && if [ -n "$tempDir" ]; then \ 119 | apt-get purge -y --auto-remove \ 120 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \ 121 | fi \ 122 | # forward request and error logs to docker log collector 123 | && ln -sf /dev/stdout /var/log/nginx/access.log \ 124 | && ln -sf /dev/stderr /var/log/nginx/error.log \ 125 | # create a docker-entrypoint.d directory 126 | && mkdir /docker-entrypoint.d 127 | 128 | # implement changes required to run NGINX as an unprivileged user 129 | RUN sed -i 's,listen 80;,listen 8080;,' /etc/nginx/conf.d/default.conf \ 130 | && sed -i '/user nginx;/d' /etc/nginx/nginx.conf \ 131 | && sed -i 's,\(/var\)\{0\,1\}/run/nginx.pid,/tmp/nginx.pid,' /etc/nginx/nginx.conf \ 132 | && sed -i "/^http {/a \ proxy_temp_path /tmp/proxy_temp;\n client_body_temp_path /tmp/client_temp;\n fastcgi_temp_path /tmp/fastcgi_temp;\n uwsgi_temp_path /tmp/uwsgi_temp;\n scgi_temp_path /tmp/scgi_temp;\n" /etc/nginx/nginx.conf \ 133 | # nginx user must own the cache and etc directory to write cache and tweak the nginx config 134 | && chown -R $UID:0 /var/cache/nginx \ 135 | && chmod -R g+w /var/cache/nginx \ 136 | && chown -R $UID:0 /etc/nginx \ 137 | && chmod -R g+w /etc/nginx 138 | 139 | COPY docker-entrypoint.sh / 140 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d 141 | COPY 15-local-resolvers.envsh /docker-entrypoint.d 142 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d 143 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d 144 | ENTRYPOINT ["/docker-entrypoint.sh"] 145 | 146 | EXPOSE 8080 147 | 148 | STOPSIGNAL SIGQUIT 149 | 150 | USER $UID 151 | 152 | CMD ["nginx", "-g", "daemon off;"] 153 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/nginx/docker-nginx-unprivileged/badge)](https://securityscorecards.dev/viewer/?uri=github.com/nginx/docker-nginx-unprivileged) 2 | [![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active) 3 | [![Community Support](https://badgen.net/badge/support/community/cyan?icon=awesome)](/SUPPORT.md) 4 | [![Community Forum](https://img.shields.io/badge/community-forum-009639?logo=discourse&link=https%3A%2F%2Fcommunity.nginx.org)](https://community.nginx.org) 5 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/license/apache-2-0) 6 | [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](/CODE_OF_CONDUCT.md) 7 | 8 | # NGINX Unprivileged Docker Image 9 | 10 | This repo contains a series of Dockerfiles to create an NGINX Docker image that runs NGINX as a non root, unprivileged user. Notable differences with respect to the official [NGINX Docker](https://github.com/nginx/docker-nginx) image include: 11 | 12 | - The default NGINX listen port is now `8080` instead of `80` (this is no longer necessary as of Docker `20.03` but it's still required in other container runtimes) 13 | - The default NGINX user directive in `/etc/nginx/nginx.conf` has been removed 14 | - The default NGINX PID has been moved from `/var/run/nginx.pid` (prior to NGINX 1.27.5) and `/run/nginx.pid` (NGINX 1.27.5 and later) to `/tmp/nginx.pid` 15 | - Change `*_temp_path` variables to `/tmp/*` 16 | 17 | Check out the [docs](https://hub.docker.com/_/nginx) for the upstream Docker NGINX image for a detailed explanation on how to use this image. 18 | 19 | ## Supported Image Registries and Platforms 20 | 21 | ### Image Registries 22 | 23 | You can find pre-built images in each of the following registries: 24 | 25 | - Amazon ECR - 26 | - Docker Hub - 27 | - GitHub Container Registry - 28 | - **Note**: For releases prior to NGINX 1.27.4 (mainline branch) and 1.26.3 (stable branch), use the old registry link 29 | - Quay - 30 | 31 | ### Image Builds and Retention Policy 32 | 33 | #### Image Builds 34 | 35 | New images are built whenever there is a new NGINX release or a critical CVE is found and fixed (check the [security documentation](/SECURITY.md) for more details). New images are also built and pushed to all registries on a weekly basis every Monday night. Whenever a new image is built, the current NGINX mainline and stable tags get switched to the latest build, and the image that gets replaced will become untagged. If you wish to point your builds to a specific image over time, use the specific image digest instead of the tag. 36 | 37 | #### Image Retention Policy 38 | 39 | Untagged images on Amazon ECR and the GitHub Container Registry are cleaned up on a two year basis. Untagged images on Docker Hub are not cleaned up at this time (this might change with the incoming storage changes). Untagged images on Quay are continuously removed due to its built in garbage collector. The last built tag of every release is kept indefinitely in every of the aforementioned registries. 40 | 41 | ### Architectures 42 | 43 | Most images are built for the `amd64`, `arm32v5` (for Debian), `arm32v6` (for Alpine), `arm32v7`, `arm64v8`, `i386`, `mips64le` (for Debian), `ppc64le` and `s390x` architectures. 44 | 45 | ## Troubleshooting Tips 46 | 47 | - If you wish to use a different user ID and/or group ID when running the Docker Unprivileged image, rebuild the image using the following Docker build arguments: 48 | 49 | ```bash 50 | docker build --build-arg UID= --build-arg GID= -t nginx-unprivileged . 51 | ``` 52 | 53 | - If you override the default `nginx.conf` file you may encounter various types of error messages: 54 | - To fix `nginx: [emerg] open() "/var/run/nginx.pid" failed (13: Permission denied)`, you have to specify a valid `pid` location by adding the line `pid /tmp/nginx.pid;` at the top level of your config. NOTE: NGINX 1.27.5 will complain about permissions for `/run/nginx.pid` due to a policy change for this path. 55 | - To fix `nginx: [emerg] mkdir() "/var/cache/nginx/client_temp" failed (30: Read-only file system)`, you have to specify a valid location for the various NGINX temporary paths by adding these lines within the `http` context: 56 | 57 | ```nginx 58 | http { 59 | client_body_temp_path /tmp/client_temp; 60 | proxy_temp_path /tmp/proxy_temp_path; 61 | fastcgi_temp_path /tmp/fastcgi_temp; 62 | uwsgi_temp_path /tmp/uwsgi_temp; 63 | scgi_temp_path /tmp/scgi_temp; 64 | ... 65 | } 66 | ``` 67 | 68 | ## On Reporting Issues and Opening PRs 69 | 70 | Whilst issues and PRs are welcome, please do note that: 71 | 72 | 1. Issues related to security vulnerabilities will be promptly closed unless they are accompanied by a solid reasoning as to why the vulnerability poses a real security threat to this image. Check out the [`security documentation`](/SECURITY.md) for more details. 73 | 2. These images are unprivileged ports of the upstream [Docker NGINX](https://github.com/nginx/docker-nginx) images. Any changes that do not specifically involve the changes made to run NGINX on an unprivileged system should be reported in the [Docker NGINX](https://github.com/nginx/docker-nginx) upstream repo. They will not get addressed here. 74 | 3. Following from 2., base images (e.g. Alpine x.x or Debian x) in the [Docker NGINX](https://github.com/nginx/docker-nginx) upstream repo get updated when a new version of NGINX is released, never within the same release version. Similarly, new NGINX releases usually make their way to the [Docker NGINX](https://github.com/nginx/docker-nginx) image a couple days after their standard release. Please refrain from opening an issue or PR here if the upstream repo hasn't been updated -- it will be closed. 75 | 76 | ## Contributing 77 | 78 | Please see the [contributing guide](/CONTRIBUTING.md) for guidelines on how to best contribute to this project. 79 | 80 | ## License 81 | 82 | [Apache License, Version 2.0](/LICENSE) 83 | 84 | © [F5, Inc.](https://www.f5.com/) 2018 - 2025 85 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Latest Versions 4 | 5 | We advise users to run or update to the most recent release of the NGINX Docker unprivileged image. Older versions of the NGINX Docker unprivileged image may not have all enhancements and/or bug fixes applied to them. 6 | 7 | ## Reporting a Vulnerability 8 | 9 | The F5 Security Incident Response Team (F5 SIRT) offers two methods to easily report potential security vulnerabilities: 10 | 11 | - If you’re an F5 customer with an active support contract, please contact [F5 Technical Support](https://www.f5.com/support). 12 | - If you aren’t an F5 customer, please report any potential or current instances of security vulnerabilities in any F5 product to the F5 Security Incident Response Team at . 13 | 14 | For more information, please read the F5 SIRT vulnerability reporting guidelines available at [https://www.f5.com/support/report-a-vulnerability](https://www.f5.com/support/report-a-vulnerability). 15 | 16 | ## CVEs to be considered 17 | 18 | If you find a security vulnerability that directly affects a direct NGINX library dependency we encourage you open an issue detailing the security vulnerability. ***Only vulnerabilities related to to direct NGINX library dependencies will be considered. Other security vulnerabilities will be addressed by the weekly Monday night build and as such will be ignored/promptly closed.*** 19 | 20 | For reference, the direct NGINX library dependencies are: 21 | 22 | - For Debian: 23 | - `libc6` 24 | - `libcrypt1` 25 | - `libpcre2` 26 | - `libssl` 27 | - `zlib1g` 28 | 29 | - For Alpine Linux: 30 | - `libc` 31 | - `libcrypto` 32 | - `libpcre2` 33 | - `libssl` 34 | - `libz` 35 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # Support 2 | 3 | ## Ask a Question 4 | 5 | We use GitHub for tracking bugs and feature requests related to all Docker NGINX unprivileged images (including all variants and container registries). 6 | 7 | Don't know how something in this project works? Curious if this project can achieve your desired functionality? Please open an issue on GitHub with the label `question`. Alternatively, start a GitHub discussion! 8 | 9 | ## NGINX Specific Questions and/or Issues 10 | 11 | This isn't the right place to get support for NGINX specific questions, but the following resources are available below. Thanks for your understanding! 12 | 13 | ### Community Forum 14 | 15 | We have a community [forum](https://community.nginx.org/)! If you have any questions and/or issues, try checking out the [`Troubleshooting`](https://community.nginx.org/c/troubleshooting/8) and [`How do I...?`](https://community.nginx.org/c/how-do-i/9) categories. Both fellow community members and NGINXers might be able to help you! :) 16 | 17 | ### Documentation 18 | 19 | For a comprehensive list of all NGINX directives, check out . 20 | 21 | For a comprehensive list of administration and deployment guides for all NGINX products, check out . 22 | 23 | ### Mailing List 24 | 25 | Want to get in touch with the NGINX development team directly? Try using the relevant mailing list found at ! 26 | 27 | ## Contributing 28 | 29 | Please see the [contributing guide](/CONTRIBUTING.md) for guidelines on how to best contribute to this project. 30 | 31 | ## Commercial Support 32 | 33 | Commercial support for this project may be available. Please get in touch with [NGINX sales](https://www.f5.com/products/get-f5/) or check your contract details for more info! 34 | 35 | ## Community Support 36 | 37 | Community support is offered on a best effort basis through either GitHub issues/PRs/discussions or through any of our active communities. 38 | -------------------------------------------------------------------------------- /entrypoint/10-listen-on-ipv6-by-default.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=4:ts=4:et 3 | 4 | set -e 5 | 6 | entrypoint_log() { 7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 8 | echo "$@" 9 | fi 10 | } 11 | 12 | ME=$(basename "$0") 13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf" 14 | 15 | # check if we have ipv6 available 16 | if [ ! -f "/proc/net/if_inet6" ]; then 17 | entrypoint_log "$ME: info: ipv6 not available" 18 | exit 0 19 | fi 20 | 21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then 22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist" 23 | exit 0 24 | fi 25 | 26 | # check if the file can be modified, e.g. not on a r/o filesystem 27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; } 28 | 29 | # check if the file is already modified, e.g. on a container restart 30 | grep -q "listen \[::]\:8080;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; } 31 | 32 | if [ -f "/etc/os-release" ]; then 33 | . /etc/os-release 34 | else 35 | entrypoint_log "$ME: info: can not guess the operating system" 36 | exit 0 37 | fi 38 | 39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE" 40 | 41 | case "$ID" in 42 | "debian") 43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3) 44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || { 45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version" 46 | exit 0 47 | } 48 | ;; 49 | "alpine") 50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2) 51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || { 52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version" 53 | exit 0 54 | } 55 | ;; 56 | *) 57 | entrypoint_log "$ME: info: Unsupported distribution" 58 | exit 0 59 | ;; 60 | esac 61 | 62 | # enable ipv6 on default.conf listen sockets 63 | sed -i -E 's,listen 8080;,listen 8080;\n listen [::]:8080;,' /$DEFAULT_CONF_FILE 64 | 65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE" 66 | 67 | exit 0 68 | -------------------------------------------------------------------------------- /entrypoint/15-local-resolvers.envsh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=2:ts=2:sts=2:et 3 | 4 | set -eu 5 | 6 | LC_ALL=C 7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 8 | 9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0 10 | 11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf) 12 | 13 | NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }" 14 | 15 | export NGINX_LOCAL_RESOLVERS 16 | -------------------------------------------------------------------------------- /entrypoint/20-envsubst-on-templates.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | ME=$(basename "$0") 6 | 7 | entrypoint_log() { 8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 9 | echo "$@" 10 | fi 11 | } 12 | 13 | add_stream_block() { 14 | local conffile="/etc/nginx/nginx.conf" 15 | 16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then 17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates" 18 | else 19 | # check if the file can be modified, e.g. not on a r/o filesystem 20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; } 21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf" 22 | cat << END >> "$conffile" 23 | # added by "$ME" on "$(date)" 24 | stream { 25 | include $stream_output_dir/*.conf; 26 | } 27 | END 28 | fi 29 | } 30 | 31 | auto_envsubst() { 32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}" 33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}" 34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}" 35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}" 36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}" 37 | local filter="${NGINX_ENVSUBST_FILTER:-}" 38 | 39 | local template defined_envs relative_path output_path subdir 40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null )) 41 | [ -d "$template_dir" ] || return 0 42 | if [ ! -w "$output_dir" ]; then 43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable" 44 | return 0 45 | fi 46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do 47 | relative_path="${template#"$template_dir/"}" 48 | output_path="$output_dir/${relative_path%"$suffix"}" 49 | subdir=$(dirname "$relative_path") 50 | # create a subdirectory where the template file exists 51 | mkdir -p "$output_dir/$subdir" 52 | entrypoint_log "$ME: Running envsubst on $template to $output_path" 53 | envsubst "$defined_envs" < "$template" > "$output_path" 54 | done 55 | 56 | # Print the first file with the stream suffix, this will be false if there are none 57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then 58 | mkdir -p "$stream_output_dir" 59 | if [ ! -w "$stream_output_dir" ]; then 60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable" 61 | return 0 62 | fi 63 | add_stream_block 64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do 65 | relative_path="${template#"$template_dir/"}" 66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}" 67 | subdir=$(dirname "$relative_path") 68 | # create a subdirectory where the template file exists 69 | mkdir -p "$stream_output_dir/$subdir" 70 | entrypoint_log "$ME: Running envsubst on $template to $output_path" 71 | envsubst "$defined_envs" < "$template" > "$output_path" 72 | done 73 | fi 74 | } 75 | 76 | auto_envsubst 77 | 78 | exit 0 79 | -------------------------------------------------------------------------------- /entrypoint/30-tune-worker-processes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=2:ts=2:sts=2:et 3 | 4 | set -eu 5 | 6 | LC_ALL=C 7 | ME=$(basename "$0") 8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 9 | 10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0 11 | 12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; } 13 | 14 | ceildiv() { 15 | num=$1 16 | div=$2 17 | echo $(( (num + div - 1) / div )) 18 | } 19 | 20 | get_cpuset() { 21 | cpusetroot=$1 22 | cpusetfile=$2 23 | ncpu=0 24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1 25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do 26 | case "$token" in 27 | *-*) 28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l ) 29 | ncpu=$(( ncpu+count )) 30 | ;; 31 | *) 32 | ncpu=$(( ncpu+1 )) 33 | ;; 34 | esac 35 | done 36 | echo "$ncpu" 37 | } 38 | 39 | get_quota() { 40 | cpuroot=$1 41 | ncpu=0 42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1 43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1 44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" ) 45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" ) 46 | [ "$cfs_quota" = "-1" ] && return 1 47 | [ "$cfs_period" = "0" ] && return 1 48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" ) 49 | [ "$ncpu" -gt 0 ] || return 1 50 | echo "$ncpu" 51 | } 52 | 53 | get_quota_v2() { 54 | cpuroot=$1 55 | ncpu=0 56 | [ -f "$cpuroot/cpu.max" ] || return 1 57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" ) 58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" ) 59 | [ "$cfs_quota" = "max" ] && return 1 60 | [ "$cfs_period" = "0" ] && return 1 61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" ) 62 | [ "$ncpu" -gt 0 ] || return 1 63 | echo "$ncpu" 64 | } 65 | 66 | get_cgroup_v1_path() { 67 | needle=$1 68 | found= 69 | foundroot= 70 | mountpoint= 71 | 72 | [ -r "/proc/self/mountinfo" ] || return 1 73 | [ -r "/proc/self/cgroup" ] || return 1 74 | 75 | while IFS= read -r line; do 76 | case "$needle" in 77 | "cpuset") 78 | case "$line" in 79 | *cpuset*) 80 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 81 | break 82 | ;; 83 | esac 84 | ;; 85 | "cpu") 86 | case "$line" in 87 | *cpuset*) 88 | ;; 89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*) 90 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 91 | break 92 | ;; 93 | esac 94 | esac 95 | done << __EOF__ 96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo ) 97 | __EOF__ 98 | 99 | while IFS= read -r line; do 100 | controller=$( echo "$line" | cut -d: -f 2 ) 101 | case "$needle" in 102 | "cpuset") 103 | case "$controller" in 104 | cpuset) 105 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 106 | break 107 | ;; 108 | esac 109 | ;; 110 | "cpu") 111 | case "$controller" in 112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu) 113 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 114 | break 115 | ;; 116 | esac 117 | ;; 118 | esac 119 | done << __EOF__ 120 | $( grep -F -- 'cpu' /proc/self/cgroup ) 121 | __EOF__ 122 | 123 | case "${found%% *}" in 124 | "/") 125 | foundroot="${found##* }$mountpoint" 126 | ;; 127 | "$mountpoint") 128 | foundroot="${found##* }" 129 | ;; 130 | esac 131 | echo "$foundroot" 132 | } 133 | 134 | get_cgroup_v2_path() { 135 | found= 136 | foundroot= 137 | mountpoint= 138 | 139 | [ -r "/proc/self/mountinfo" ] || return 1 140 | [ -r "/proc/self/cgroup" ] || return 1 141 | 142 | while IFS= read -r line; do 143 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 144 | done << __EOF__ 145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo ) 146 | __EOF__ 147 | 148 | while IFS= read -r line; do 149 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 150 | done << __EOF__ 151 | $( grep -F -- '0::' /proc/self/cgroup ) 152 | __EOF__ 153 | 154 | case "${found%% *}" in 155 | "") 156 | return 1 157 | ;; 158 | "/") 159 | foundroot="${found##* }$mountpoint" 160 | ;; 161 | "$mountpoint" | /../*) 162 | foundroot="${found##* }" 163 | ;; 164 | esac 165 | echo "$foundroot" 166 | } 167 | 168 | ncpu_online=$( getconf _NPROCESSORS_ONLN ) 169 | ncpu_cpuset= 170 | ncpu_quota= 171 | ncpu_cpuset_v2= 172 | ncpu_quota_v2= 173 | 174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online 175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online 176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online 177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online 178 | 179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \ 180 | "$ncpu_online" \ 181 | "$ncpu_cpuset" \ 182 | "$ncpu_quota" \ 183 | "$ncpu_cpuset_v2" \ 184 | "$ncpu_quota_v2" \ 185 | | sort -n \ 186 | | head -n 1 ) 187 | 188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf 189 | -------------------------------------------------------------------------------- /entrypoint/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=4:ts=4:et 3 | 4 | set -e 5 | 6 | entrypoint_log() { 7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 8 | echo "$@" 9 | fi 10 | } 11 | 12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then 13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then 14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration" 15 | 16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/" 17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do 18 | case "$f" in 19 | *.envsh) 20 | if [ -x "$f" ]; then 21 | entrypoint_log "$0: Sourcing $f"; 22 | . "$f" 23 | else 24 | # warn on shell scripts without exec bit 25 | entrypoint_log "$0: Ignoring $f, not executable"; 26 | fi 27 | ;; 28 | *.sh) 29 | if [ -x "$f" ]; then 30 | entrypoint_log "$0: Launching $f"; 31 | "$f" 32 | else 33 | # warn on shell scripts without exec bit 34 | entrypoint_log "$0: Ignoring $f, not executable"; 35 | fi 36 | ;; 37 | *) entrypoint_log "$0: Ignoring $f";; 38 | esac 39 | done 40 | 41 | entrypoint_log "$0: Configuration complete; ready for start up" 42 | else 43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration" 44 | fi 45 | fi 46 | 47 | exec "$@" 48 | -------------------------------------------------------------------------------- /mainline/alpine-perl/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh" 3 | # 4 | # PLEASE DO NOT EDIT IT DIRECTLY. 5 | # 6 | ARG IMAGE=nginxinc/nginx-unprivileged:1.27.5-alpine 7 | FROM $IMAGE 8 | 9 | ARG UID=101 10 | ARG GID=101 11 | 12 | USER root 13 | 14 | RUN set -x \ 15 | && apkArch="$(cat /etc/apk/arch)" \ 16 | && nginxPackages=" \ 17 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \ 18 | nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 19 | nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 20 | nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 21 | nginx-module-perl=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 22 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \ 23 | " \ 24 | # install prerequisites for public key and pkg-oss checks 25 | && apk add --no-cache --virtual .checksum-deps \ 26 | openssl \ 27 | && case "$apkArch" in \ 28 | x86_64|aarch64) \ 29 | # arches officially built by upstream 30 | apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ 31 | ;; \ 32 | *) \ 33 | # we're on an architecture upstream doesn't officially build for 34 | # let's build binaries from the published packaging sources 35 | set -x \ 36 | && tempDir="$(mktemp -d)" \ 37 | && chown nobody:nobody $tempDir \ 38 | && apk add --no-cache --virtual .build-deps \ 39 | gcc \ 40 | libc-dev \ 41 | make \ 42 | openssl-dev \ 43 | pcre2-dev \ 44 | zlib-dev \ 45 | linux-headers \ 46 | perl-dev \ 47 | bash \ 48 | alpine-sdk \ 49 | findutils \ 50 | curl \ 51 | && su nobody -s /bin/sh -c " \ 52 | export HOME=${tempDir} \ 53 | && cd ${tempDir} \ 54 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 55 | && PKGOSSCHECKSUM=\"c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \ 56 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \ 57 | echo \"pkg-oss tarball checksum verification succeeded!\"; \ 58 | else \ 59 | echo \"pkg-oss tarball checksum verification failed!\"; \ 60 | exit 1; \ 61 | fi \ 62 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 63 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \ 64 | && cd alpine \ 65 | && make module-perl \ 66 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \ 67 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \ 68 | " \ 69 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \ 70 | && apk del --no-network .build-deps \ 71 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \ 72 | ;; \ 73 | esac \ 74 | # remove checksum deps 75 | && apk del --no-network .checksum-deps \ 76 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 77 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \ 78 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi 79 | 80 | USER $UID 81 | -------------------------------------------------------------------------------- /mainline/alpine-slim/10-listen-on-ipv6-by-default.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=4:ts=4:et 3 | 4 | set -e 5 | 6 | entrypoint_log() { 7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 8 | echo "$@" 9 | fi 10 | } 11 | 12 | ME=$(basename "$0") 13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf" 14 | 15 | # check if we have ipv6 available 16 | if [ ! -f "/proc/net/if_inet6" ]; then 17 | entrypoint_log "$ME: info: ipv6 not available" 18 | exit 0 19 | fi 20 | 21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then 22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist" 23 | exit 0 24 | fi 25 | 26 | # check if the file can be modified, e.g. not on a r/o filesystem 27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; } 28 | 29 | # check if the file is already modified, e.g. on a container restart 30 | grep -q "listen \[::]\:8080;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; } 31 | 32 | if [ -f "/etc/os-release" ]; then 33 | . /etc/os-release 34 | else 35 | entrypoint_log "$ME: info: can not guess the operating system" 36 | exit 0 37 | fi 38 | 39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE" 40 | 41 | case "$ID" in 42 | "debian") 43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3) 44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || { 45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version" 46 | exit 0 47 | } 48 | ;; 49 | "alpine") 50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2) 51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || { 52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version" 53 | exit 0 54 | } 55 | ;; 56 | *) 57 | entrypoint_log "$ME: info: Unsupported distribution" 58 | exit 0 59 | ;; 60 | esac 61 | 62 | # enable ipv6 on default.conf listen sockets 63 | sed -i -E 's,listen 8080;,listen 8080;\n listen [::]:8080;,' /$DEFAULT_CONF_FILE 64 | 65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE" 66 | 67 | exit 0 68 | -------------------------------------------------------------------------------- /mainline/alpine-slim/15-local-resolvers.envsh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=2:ts=2:sts=2:et 3 | 4 | set -eu 5 | 6 | LC_ALL=C 7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 8 | 9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0 10 | 11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf) 12 | 13 | NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }" 14 | 15 | export NGINX_LOCAL_RESOLVERS 16 | -------------------------------------------------------------------------------- /mainline/alpine-slim/20-envsubst-on-templates.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | ME=$(basename "$0") 6 | 7 | entrypoint_log() { 8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 9 | echo "$@" 10 | fi 11 | } 12 | 13 | add_stream_block() { 14 | local conffile="/etc/nginx/nginx.conf" 15 | 16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then 17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates" 18 | else 19 | # check if the file can be modified, e.g. not on a r/o filesystem 20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; } 21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf" 22 | cat << END >> "$conffile" 23 | # added by "$ME" on "$(date)" 24 | stream { 25 | include $stream_output_dir/*.conf; 26 | } 27 | END 28 | fi 29 | } 30 | 31 | auto_envsubst() { 32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}" 33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}" 34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}" 35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}" 36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}" 37 | local filter="${NGINX_ENVSUBST_FILTER:-}" 38 | 39 | local template defined_envs relative_path output_path subdir 40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null )) 41 | [ -d "$template_dir" ] || return 0 42 | if [ ! -w "$output_dir" ]; then 43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable" 44 | return 0 45 | fi 46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do 47 | relative_path="${template#"$template_dir/"}" 48 | output_path="$output_dir/${relative_path%"$suffix"}" 49 | subdir=$(dirname "$relative_path") 50 | # create a subdirectory where the template file exists 51 | mkdir -p "$output_dir/$subdir" 52 | entrypoint_log "$ME: Running envsubst on $template to $output_path" 53 | envsubst "$defined_envs" < "$template" > "$output_path" 54 | done 55 | 56 | # Print the first file with the stream suffix, this will be false if there are none 57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then 58 | mkdir -p "$stream_output_dir" 59 | if [ ! -w "$stream_output_dir" ]; then 60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable" 61 | return 0 62 | fi 63 | add_stream_block 64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do 65 | relative_path="${template#"$template_dir/"}" 66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}" 67 | subdir=$(dirname "$relative_path") 68 | # create a subdirectory where the template file exists 69 | mkdir -p "$stream_output_dir/$subdir" 70 | entrypoint_log "$ME: Running envsubst on $template to $output_path" 71 | envsubst "$defined_envs" < "$template" > "$output_path" 72 | done 73 | fi 74 | } 75 | 76 | auto_envsubst 77 | 78 | exit 0 79 | -------------------------------------------------------------------------------- /mainline/alpine-slim/30-tune-worker-processes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=2:ts=2:sts=2:et 3 | 4 | set -eu 5 | 6 | LC_ALL=C 7 | ME=$(basename "$0") 8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 9 | 10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0 11 | 12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; } 13 | 14 | ceildiv() { 15 | num=$1 16 | div=$2 17 | echo $(( (num + div - 1) / div )) 18 | } 19 | 20 | get_cpuset() { 21 | cpusetroot=$1 22 | cpusetfile=$2 23 | ncpu=0 24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1 25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do 26 | case "$token" in 27 | *-*) 28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l ) 29 | ncpu=$(( ncpu+count )) 30 | ;; 31 | *) 32 | ncpu=$(( ncpu+1 )) 33 | ;; 34 | esac 35 | done 36 | echo "$ncpu" 37 | } 38 | 39 | get_quota() { 40 | cpuroot=$1 41 | ncpu=0 42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1 43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1 44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" ) 45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" ) 46 | [ "$cfs_quota" = "-1" ] && return 1 47 | [ "$cfs_period" = "0" ] && return 1 48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" ) 49 | [ "$ncpu" -gt 0 ] || return 1 50 | echo "$ncpu" 51 | } 52 | 53 | get_quota_v2() { 54 | cpuroot=$1 55 | ncpu=0 56 | [ -f "$cpuroot/cpu.max" ] || return 1 57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" ) 58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" ) 59 | [ "$cfs_quota" = "max" ] && return 1 60 | [ "$cfs_period" = "0" ] && return 1 61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" ) 62 | [ "$ncpu" -gt 0 ] || return 1 63 | echo "$ncpu" 64 | } 65 | 66 | get_cgroup_v1_path() { 67 | needle=$1 68 | found= 69 | foundroot= 70 | mountpoint= 71 | 72 | [ -r "/proc/self/mountinfo" ] || return 1 73 | [ -r "/proc/self/cgroup" ] || return 1 74 | 75 | while IFS= read -r line; do 76 | case "$needle" in 77 | "cpuset") 78 | case "$line" in 79 | *cpuset*) 80 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 81 | break 82 | ;; 83 | esac 84 | ;; 85 | "cpu") 86 | case "$line" in 87 | *cpuset*) 88 | ;; 89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*) 90 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 91 | break 92 | ;; 93 | esac 94 | esac 95 | done << __EOF__ 96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo ) 97 | __EOF__ 98 | 99 | while IFS= read -r line; do 100 | controller=$( echo "$line" | cut -d: -f 2 ) 101 | case "$needle" in 102 | "cpuset") 103 | case "$controller" in 104 | cpuset) 105 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 106 | break 107 | ;; 108 | esac 109 | ;; 110 | "cpu") 111 | case "$controller" in 112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu) 113 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 114 | break 115 | ;; 116 | esac 117 | ;; 118 | esac 119 | done << __EOF__ 120 | $( grep -F -- 'cpu' /proc/self/cgroup ) 121 | __EOF__ 122 | 123 | case "${found%% *}" in 124 | "/") 125 | foundroot="${found##* }$mountpoint" 126 | ;; 127 | "$mountpoint") 128 | foundroot="${found##* }" 129 | ;; 130 | esac 131 | echo "$foundroot" 132 | } 133 | 134 | get_cgroup_v2_path() { 135 | found= 136 | foundroot= 137 | mountpoint= 138 | 139 | [ -r "/proc/self/mountinfo" ] || return 1 140 | [ -r "/proc/self/cgroup" ] || return 1 141 | 142 | while IFS= read -r line; do 143 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 144 | done << __EOF__ 145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo ) 146 | __EOF__ 147 | 148 | while IFS= read -r line; do 149 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 150 | done << __EOF__ 151 | $( grep -F -- '0::' /proc/self/cgroup ) 152 | __EOF__ 153 | 154 | case "${found%% *}" in 155 | "") 156 | return 1 157 | ;; 158 | "/") 159 | foundroot="${found##* }$mountpoint" 160 | ;; 161 | "$mountpoint" | /../*) 162 | foundroot="${found##* }" 163 | ;; 164 | esac 165 | echo "$foundroot" 166 | } 167 | 168 | ncpu_online=$( getconf _NPROCESSORS_ONLN ) 169 | ncpu_cpuset= 170 | ncpu_quota= 171 | ncpu_cpuset_v2= 172 | ncpu_quota_v2= 173 | 174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online 175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online 176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online 177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online 178 | 179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \ 180 | "$ncpu_online" \ 181 | "$ncpu_cpuset" \ 182 | "$ncpu_quota" \ 183 | "$ncpu_cpuset_v2" \ 184 | "$ncpu_quota_v2" \ 185 | | sort -n \ 186 | | head -n 1 ) 187 | 188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf 189 | -------------------------------------------------------------------------------- /mainline/alpine-slim/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh" 3 | # 4 | # PLEASE DO NOT EDIT IT DIRECTLY. 5 | # 6 | ARG IMAGE=alpine:3.21 7 | FROM $IMAGE 8 | 9 | LABEL maintainer="NGINX Docker Maintainers " 10 | 11 | ENV NGINX_VERSION=1.27.5 12 | ENV PKG_RELEASE=1 13 | ENV DYNPKG_RELEASE=1 14 | 15 | ARG UID=101 16 | ARG GID=101 17 | 18 | RUN set -x \ 19 | # create nginx user/group first, to be consistent throughout docker variants 20 | && addgroup -g $GID -S nginx || true \ 21 | && adduser -S -D -H -u $UID -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx || true \ 22 | && apkArch="$(cat /etc/apk/arch)" \ 23 | && nginxPackages=" \ 24 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \ 25 | " \ 26 | # install prerequisites for public key and pkg-oss checks 27 | && apk add --no-cache --virtual .checksum-deps \ 28 | openssl \ 29 | && case "$apkArch" in \ 30 | x86_64|aarch64) \ 31 | # arches officially built by upstream 32 | set -x \ 33 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \ 34 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \ 35 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \ 36 | echo "key verification succeeded!"; \ 37 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \ 38 | else \ 39 | echo "key verification failed!"; \ 40 | exit 1; \ 41 | fi \ 42 | && apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ 43 | ;; \ 44 | *) \ 45 | # we're on an architecture upstream doesn't officially build for 46 | # let's build binaries from the published packaging sources 47 | set -x \ 48 | && tempDir="$(mktemp -d)" \ 49 | && chown nobody:nobody $tempDir \ 50 | && apk add --no-cache --virtual .build-deps \ 51 | gcc \ 52 | libc-dev \ 53 | make \ 54 | openssl-dev \ 55 | pcre2-dev \ 56 | zlib-dev \ 57 | linux-headers \ 58 | bash \ 59 | alpine-sdk \ 60 | findutils \ 61 | curl \ 62 | && su nobody -s /bin/sh -c " \ 63 | export HOME=${tempDir} \ 64 | && cd ${tempDir} \ 65 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 66 | && PKGOSSCHECKSUM=\"c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \ 67 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \ 68 | echo \"pkg-oss tarball checksum verification succeeded!\"; \ 69 | else \ 70 | echo \"pkg-oss tarball checksum verification failed!\"; \ 71 | exit 1; \ 72 | fi \ 73 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 74 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \ 75 | && cd alpine \ 76 | && make base \ 77 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \ 78 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \ 79 | " \ 80 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \ 81 | && apk del --no-network .build-deps \ 82 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \ 83 | ;; \ 84 | esac \ 85 | # remove checksum deps 86 | && apk del --no-network .checksum-deps \ 87 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 88 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \ 89 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \ 90 | # Add `envsubst` for templating environment variables 91 | && apk add --no-cache gettext-envsubst \ 92 | # Bring in tzdata so users could set the timezones through the environment 93 | # variables 94 | && apk add --no-cache tzdata \ 95 | # forward request and error logs to docker log collector 96 | && ln -sf /dev/stdout /var/log/nginx/access.log \ 97 | && ln -sf /dev/stderr /var/log/nginx/error.log \ 98 | # create a docker-entrypoint.d directory 99 | && mkdir /docker-entrypoint.d 100 | 101 | # implement changes required to run NGINX as an unprivileged user 102 | RUN sed -i 's,listen 80;,listen 8080;,' /etc/nginx/conf.d/default.conf \ 103 | && sed -i '/user nginx;/d' /etc/nginx/nginx.conf \ 104 | && sed -i 's,\(/var\)\{0\,1\}/run/nginx.pid,/tmp/nginx.pid,' /etc/nginx/nginx.conf \ 105 | && sed -i "/^http {/a \ proxy_temp_path /tmp/proxy_temp;\n client_body_temp_path /tmp/client_temp;\n fastcgi_temp_path /tmp/fastcgi_temp;\n uwsgi_temp_path /tmp/uwsgi_temp;\n scgi_temp_path /tmp/scgi_temp;\n" /etc/nginx/nginx.conf \ 106 | # nginx user must own the cache and etc directory to write cache and tweak the nginx config 107 | && chown -R $UID:0 /var/cache/nginx \ 108 | && chmod -R g+w /var/cache/nginx \ 109 | && chown -R $UID:0 /etc/nginx \ 110 | && chmod -R g+w /etc/nginx 111 | 112 | COPY docker-entrypoint.sh / 113 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d 114 | COPY 15-local-resolvers.envsh /docker-entrypoint.d 115 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d 116 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d 117 | ENTRYPOINT ["/docker-entrypoint.sh"] 118 | 119 | EXPOSE 8080 120 | 121 | STOPSIGNAL SIGQUIT 122 | 123 | USER $UID 124 | 125 | CMD ["nginx", "-g", "daemon off;"] 126 | -------------------------------------------------------------------------------- /mainline/alpine-slim/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=4:ts=4:et 3 | 4 | set -e 5 | 6 | entrypoint_log() { 7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 8 | echo "$@" 9 | fi 10 | } 11 | 12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then 13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then 14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration" 15 | 16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/" 17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do 18 | case "$f" in 19 | *.envsh) 20 | if [ -x "$f" ]; then 21 | entrypoint_log "$0: Sourcing $f"; 22 | . "$f" 23 | else 24 | # warn on shell scripts without exec bit 25 | entrypoint_log "$0: Ignoring $f, not executable"; 26 | fi 27 | ;; 28 | *.sh) 29 | if [ -x "$f" ]; then 30 | entrypoint_log "$0: Launching $f"; 31 | "$f" 32 | else 33 | # warn on shell scripts without exec bit 34 | entrypoint_log "$0: Ignoring $f, not executable"; 35 | fi 36 | ;; 37 | *) entrypoint_log "$0: Ignoring $f";; 38 | esac 39 | done 40 | 41 | entrypoint_log "$0: Configuration complete; ready for start up" 42 | else 43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration" 44 | fi 45 | fi 46 | 47 | exec "$@" 48 | -------------------------------------------------------------------------------- /mainline/alpine/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh" 3 | # 4 | # PLEASE DO NOT EDIT IT DIRECTLY. 5 | # 6 | ARG IMAGE=nginxinc/nginx-unprivileged:1.27.5-alpine-slim 7 | FROM $IMAGE 8 | 9 | ENV NJS_VERSION=0.8.10 10 | ENV NJS_RELEASE=1 11 | 12 | ARG UID=101 13 | ARG GID=101 14 | 15 | USER root 16 | 17 | RUN set -x \ 18 | && apkArch="$(cat /etc/apk/arch)" \ 19 | && nginxPackages=" \ 20 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \ 21 | nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 22 | nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 23 | nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 24 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \ 25 | " \ 26 | # install prerequisites for public key and pkg-oss checks 27 | && apk add --no-cache --virtual .checksum-deps \ 28 | openssl \ 29 | && case "$apkArch" in \ 30 | x86_64|aarch64) \ 31 | # arches officially built by upstream 32 | apk add -X "https://nginx.org/packages/mainline/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ 33 | ;; \ 34 | *) \ 35 | # we're on an architecture upstream doesn't officially build for 36 | # let's build binaries from the published packaging sources 37 | set -x \ 38 | && tempDir="$(mktemp -d)" \ 39 | && chown nobody:nobody $tempDir \ 40 | && apk add --no-cache --virtual .build-deps \ 41 | gcc \ 42 | libc-dev \ 43 | make \ 44 | openssl-dev \ 45 | pcre2-dev \ 46 | zlib-dev \ 47 | linux-headers \ 48 | libxslt-dev \ 49 | gd-dev \ 50 | geoip-dev \ 51 | libedit-dev \ 52 | bash \ 53 | alpine-sdk \ 54 | findutils \ 55 | curl \ 56 | && su nobody -s /bin/sh -c " \ 57 | export HOME=${tempDir} \ 58 | && cd ${tempDir} \ 59 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 60 | && PKGOSSCHECKSUM=\"c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \ 61 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \ 62 | echo \"pkg-oss tarball checksum verification succeeded!\"; \ 63 | else \ 64 | echo \"pkg-oss tarball checksum verification failed!\"; \ 65 | exit 1; \ 66 | fi \ 67 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 68 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \ 69 | && cd alpine \ 70 | && make module-geoip module-image-filter module-njs module-xslt \ 71 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \ 72 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \ 73 | " \ 74 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \ 75 | && apk del --no-network .build-deps \ 76 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \ 77 | ;; \ 78 | esac \ 79 | # remove checksum deps 80 | && apk del --no-network .checksum-deps \ 81 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 82 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \ 83 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \ 84 | # Bring in curl and ca-certificates to make registering on DNS SD easier 85 | && apk add --no-cache curl ca-certificates 86 | 87 | USER $UID 88 | -------------------------------------------------------------------------------- /mainline/debian-perl/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh" 3 | # 4 | # PLEASE DO NOT EDIT IT DIRECTLY. 5 | # 6 | ARG IMAGE=nginxinc/nginx-unprivileged:1.27.5 7 | FROM $IMAGE 8 | 9 | ARG UID=101 10 | ARG GID=101 11 | 12 | USER root 13 | 14 | RUN set -x; \ 15 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \ 16 | dpkgArch="$(dpkg --print-architecture)" \ 17 | && nginxPackages=" \ 18 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \ 19 | nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 20 | nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 21 | nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 22 | nginx-module-perl=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 23 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \ 24 | " \ 25 | && case "$dpkgArch" in \ 26 | amd64|arm64) \ 27 | # arches officialy built by upstream 28 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \ 29 | && apt-get update \ 30 | ;; \ 31 | *) \ 32 | # we're on an architecture upstream doesn't officially build for 33 | # let's build binaries from the published packaging sources 34 | # new directory for storing sources and .deb files 35 | tempDir="$(mktemp -d)" \ 36 | && chmod 777 "$tempDir" \ 37 | # (777 to ensure APT's "_apt" user can access it too) 38 | \ 39 | # save list of currently-installed packages so build dependencies can be cleanly removed later 40 | && savedAptMark="$(apt-mark showmanual)" \ 41 | \ 42 | # build .deb files from upstream's packaging sources 43 | && apt-get update \ 44 | && apt-get install --no-install-recommends --no-install-suggests -y \ 45 | curl \ 46 | devscripts \ 47 | equivs \ 48 | git \ 49 | libxml2-utils \ 50 | lsb-release \ 51 | xsltproc \ 52 | && ( \ 53 | cd "$tempDir" \ 54 | && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \ 55 | && REVISION=${REVISION%~*} \ 56 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \ 57 | && PKGOSSCHECKSUM="c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745 *${REVISION}.tar.gz" \ 58 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \ 59 | echo "pkg-oss tarball checksum verification succeeded!"; \ 60 | else \ 61 | echo "pkg-oss tarball checksum verification failed!"; \ 62 | exit 1; \ 63 | fi \ 64 | && tar xzvf ${REVISION}.tar.gz \ 65 | && cd pkg-oss-${REVISION} \ 66 | && cd debian \ 67 | && for target in module-perl; do \ 68 | make rules-$target; \ 69 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \ 70 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \ 71 | done \ 72 | && make module-perl \ 73 | ) \ 74 | # we don't remove APT lists here because they get re-downloaded and removed later 75 | \ 76 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies 77 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies) 78 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \ 79 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \ 80 | \ 81 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be) 82 | && ls -lAFh "$tempDir" \ 83 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \ 84 | && grep '^Package: ' "$tempDir/Packages" \ 85 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \ 86 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes") 87 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 88 | # ... 89 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 90 | && apt-get -o Acquire::GzipIndexes=false update \ 91 | ;; \ 92 | esac \ 93 | \ 94 | && apt-get install --no-install-recommends --no-install-suggests -y \ 95 | $nginxPackages \ 96 | gettext-base \ 97 | curl \ 98 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \ 99 | \ 100 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 101 | && if [ -n "$tempDir" ]; then \ 102 | apt-get purge -y --auto-remove \ 103 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \ 104 | fi 105 | 106 | USER $UID 107 | -------------------------------------------------------------------------------- /mainline/debian/10-listen-on-ipv6-by-default.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=4:ts=4:et 3 | 4 | set -e 5 | 6 | entrypoint_log() { 7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 8 | echo "$@" 9 | fi 10 | } 11 | 12 | ME=$(basename "$0") 13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf" 14 | 15 | # check if we have ipv6 available 16 | if [ ! -f "/proc/net/if_inet6" ]; then 17 | entrypoint_log "$ME: info: ipv6 not available" 18 | exit 0 19 | fi 20 | 21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then 22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist" 23 | exit 0 24 | fi 25 | 26 | # check if the file can be modified, e.g. not on a r/o filesystem 27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; } 28 | 29 | # check if the file is already modified, e.g. on a container restart 30 | grep -q "listen \[::]\:8080;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; } 31 | 32 | if [ -f "/etc/os-release" ]; then 33 | . /etc/os-release 34 | else 35 | entrypoint_log "$ME: info: can not guess the operating system" 36 | exit 0 37 | fi 38 | 39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE" 40 | 41 | case "$ID" in 42 | "debian") 43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3) 44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || { 45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version" 46 | exit 0 47 | } 48 | ;; 49 | "alpine") 50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2) 51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || { 52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version" 53 | exit 0 54 | } 55 | ;; 56 | *) 57 | entrypoint_log "$ME: info: Unsupported distribution" 58 | exit 0 59 | ;; 60 | esac 61 | 62 | # enable ipv6 on default.conf listen sockets 63 | sed -i -E 's,listen 8080;,listen 8080;\n listen [::]:8080;,' /$DEFAULT_CONF_FILE 64 | 65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE" 66 | 67 | exit 0 68 | -------------------------------------------------------------------------------- /mainline/debian/15-local-resolvers.envsh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=2:ts=2:sts=2:et 3 | 4 | set -eu 5 | 6 | LC_ALL=C 7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 8 | 9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0 10 | 11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf) 12 | 13 | NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }" 14 | 15 | export NGINX_LOCAL_RESOLVERS 16 | -------------------------------------------------------------------------------- /mainline/debian/20-envsubst-on-templates.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | ME=$(basename "$0") 6 | 7 | entrypoint_log() { 8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 9 | echo "$@" 10 | fi 11 | } 12 | 13 | add_stream_block() { 14 | local conffile="/etc/nginx/nginx.conf" 15 | 16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then 17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates" 18 | else 19 | # check if the file can be modified, e.g. not on a r/o filesystem 20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; } 21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf" 22 | cat << END >> "$conffile" 23 | # added by "$ME" on "$(date)" 24 | stream { 25 | include $stream_output_dir/*.conf; 26 | } 27 | END 28 | fi 29 | } 30 | 31 | auto_envsubst() { 32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}" 33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}" 34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}" 35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}" 36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}" 37 | local filter="${NGINX_ENVSUBST_FILTER:-}" 38 | 39 | local template defined_envs relative_path output_path subdir 40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null )) 41 | [ -d "$template_dir" ] || return 0 42 | if [ ! -w "$output_dir" ]; then 43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable" 44 | return 0 45 | fi 46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do 47 | relative_path="${template#"$template_dir/"}" 48 | output_path="$output_dir/${relative_path%"$suffix"}" 49 | subdir=$(dirname "$relative_path") 50 | # create a subdirectory where the template file exists 51 | mkdir -p "$output_dir/$subdir" 52 | entrypoint_log "$ME: Running envsubst on $template to $output_path" 53 | envsubst "$defined_envs" < "$template" > "$output_path" 54 | done 55 | 56 | # Print the first file with the stream suffix, this will be false if there are none 57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then 58 | mkdir -p "$stream_output_dir" 59 | if [ ! -w "$stream_output_dir" ]; then 60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable" 61 | return 0 62 | fi 63 | add_stream_block 64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do 65 | relative_path="${template#"$template_dir/"}" 66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}" 67 | subdir=$(dirname "$relative_path") 68 | # create a subdirectory where the template file exists 69 | mkdir -p "$stream_output_dir/$subdir" 70 | entrypoint_log "$ME: Running envsubst on $template to $output_path" 71 | envsubst "$defined_envs" < "$template" > "$output_path" 72 | done 73 | fi 74 | } 75 | 76 | auto_envsubst 77 | 78 | exit 0 79 | -------------------------------------------------------------------------------- /mainline/debian/30-tune-worker-processes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=2:ts=2:sts=2:et 3 | 4 | set -eu 5 | 6 | LC_ALL=C 7 | ME=$(basename "$0") 8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 9 | 10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0 11 | 12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; } 13 | 14 | ceildiv() { 15 | num=$1 16 | div=$2 17 | echo $(( (num + div - 1) / div )) 18 | } 19 | 20 | get_cpuset() { 21 | cpusetroot=$1 22 | cpusetfile=$2 23 | ncpu=0 24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1 25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do 26 | case "$token" in 27 | *-*) 28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l ) 29 | ncpu=$(( ncpu+count )) 30 | ;; 31 | *) 32 | ncpu=$(( ncpu+1 )) 33 | ;; 34 | esac 35 | done 36 | echo "$ncpu" 37 | } 38 | 39 | get_quota() { 40 | cpuroot=$1 41 | ncpu=0 42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1 43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1 44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" ) 45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" ) 46 | [ "$cfs_quota" = "-1" ] && return 1 47 | [ "$cfs_period" = "0" ] && return 1 48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" ) 49 | [ "$ncpu" -gt 0 ] || return 1 50 | echo "$ncpu" 51 | } 52 | 53 | get_quota_v2() { 54 | cpuroot=$1 55 | ncpu=0 56 | [ -f "$cpuroot/cpu.max" ] || return 1 57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" ) 58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" ) 59 | [ "$cfs_quota" = "max" ] && return 1 60 | [ "$cfs_period" = "0" ] && return 1 61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" ) 62 | [ "$ncpu" -gt 0 ] || return 1 63 | echo "$ncpu" 64 | } 65 | 66 | get_cgroup_v1_path() { 67 | needle=$1 68 | found= 69 | foundroot= 70 | mountpoint= 71 | 72 | [ -r "/proc/self/mountinfo" ] || return 1 73 | [ -r "/proc/self/cgroup" ] || return 1 74 | 75 | while IFS= read -r line; do 76 | case "$needle" in 77 | "cpuset") 78 | case "$line" in 79 | *cpuset*) 80 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 81 | break 82 | ;; 83 | esac 84 | ;; 85 | "cpu") 86 | case "$line" in 87 | *cpuset*) 88 | ;; 89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*) 90 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 91 | break 92 | ;; 93 | esac 94 | esac 95 | done << __EOF__ 96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo ) 97 | __EOF__ 98 | 99 | while IFS= read -r line; do 100 | controller=$( echo "$line" | cut -d: -f 2 ) 101 | case "$needle" in 102 | "cpuset") 103 | case "$controller" in 104 | cpuset) 105 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 106 | break 107 | ;; 108 | esac 109 | ;; 110 | "cpu") 111 | case "$controller" in 112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu) 113 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 114 | break 115 | ;; 116 | esac 117 | ;; 118 | esac 119 | done << __EOF__ 120 | $( grep -F -- 'cpu' /proc/self/cgroup ) 121 | __EOF__ 122 | 123 | case "${found%% *}" in 124 | "/") 125 | foundroot="${found##* }$mountpoint" 126 | ;; 127 | "$mountpoint") 128 | foundroot="${found##* }" 129 | ;; 130 | esac 131 | echo "$foundroot" 132 | } 133 | 134 | get_cgroup_v2_path() { 135 | found= 136 | foundroot= 137 | mountpoint= 138 | 139 | [ -r "/proc/self/mountinfo" ] || return 1 140 | [ -r "/proc/self/cgroup" ] || return 1 141 | 142 | while IFS= read -r line; do 143 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 144 | done << __EOF__ 145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo ) 146 | __EOF__ 147 | 148 | while IFS= read -r line; do 149 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 150 | done << __EOF__ 151 | $( grep -F -- '0::' /proc/self/cgroup ) 152 | __EOF__ 153 | 154 | case "${found%% *}" in 155 | "") 156 | return 1 157 | ;; 158 | "/") 159 | foundroot="${found##* }$mountpoint" 160 | ;; 161 | "$mountpoint" | /../*) 162 | foundroot="${found##* }" 163 | ;; 164 | esac 165 | echo "$foundroot" 166 | } 167 | 168 | ncpu_online=$( getconf _NPROCESSORS_ONLN ) 169 | ncpu_cpuset= 170 | ncpu_quota= 171 | ncpu_cpuset_v2= 172 | ncpu_quota_v2= 173 | 174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online 175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online 176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online 177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online 178 | 179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \ 180 | "$ncpu_online" \ 181 | "$ncpu_cpuset" \ 182 | "$ncpu_quota" \ 183 | "$ncpu_cpuset_v2" \ 184 | "$ncpu_quota_v2" \ 185 | | sort -n \ 186 | | head -n 1 ) 187 | 188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf 189 | -------------------------------------------------------------------------------- /mainline/debian/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh" 3 | # 4 | # PLEASE DO NOT EDIT IT DIRECTLY. 5 | # 6 | ARG IMAGE=debian:bookworm-slim 7 | FROM $IMAGE 8 | 9 | LABEL maintainer="NGINX Docker Maintainers " 10 | 11 | ENV NGINX_VERSION=1.27.5 12 | ENV NJS_VERSION=0.8.10 13 | ENV NJS_RELEASE=1~bookworm 14 | ENV PKG_RELEASE=1~bookworm 15 | ENV DYNPKG_RELEASE=1~bookworm 16 | 17 | ARG UID=101 18 | ARG GID=101 19 | 20 | RUN set -x \ 21 | # create nginx user/group first, to be consistent throughout docker variants 22 | && groupadd --system --gid $GID nginx || true \ 23 | && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid $UID nginx || true \ 24 | && apt-get update \ 25 | && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \ 26 | && \ 27 | NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \ 28 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \ 29 | export GNUPGHOME="$(mktemp -d)"; \ 30 | found=''; \ 31 | for NGINX_GPGKEY in $NGINX_GPGKEYS; do \ 32 | for server in \ 33 | hkp://keyserver.ubuntu.com:80 \ 34 | pgp.mit.edu \ 35 | ; do \ 36 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \ 37 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \ 38 | done; \ 39 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \ 40 | done; \ 41 | gpg1 --export "$NGINX_GPGKEYS" > "$NGINX_GPGKEY_PATH" ; \ 42 | rm -rf "$GNUPGHOME"; \ 43 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \ 44 | && dpkgArch="$(dpkg --print-architecture)" \ 45 | && nginxPackages=" \ 46 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \ 47 | nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 48 | nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 49 | nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 50 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \ 51 | " \ 52 | && case "$dpkgArch" in \ 53 | amd64|arm64) \ 54 | # arches officialy built by upstream 55 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/mainline/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \ 56 | && apt-get update \ 57 | ;; \ 58 | *) \ 59 | # we're on an architecture upstream doesn't officially build for 60 | # let's build binaries from the published packaging sources 61 | # new directory for storing sources and .deb files 62 | tempDir="$(mktemp -d)" \ 63 | && chmod 777 "$tempDir" \ 64 | # (777 to ensure APT's "_apt" user can access it too) 65 | \ 66 | # save list of currently-installed packages so build dependencies can be cleanly removed later 67 | && savedAptMark="$(apt-mark showmanual)" \ 68 | \ 69 | # build .deb files from upstream's packaging sources 70 | && apt-get update \ 71 | && apt-get install --no-install-recommends --no-install-suggests -y \ 72 | curl \ 73 | devscripts \ 74 | equivs \ 75 | git \ 76 | libxml2-utils \ 77 | lsb-release \ 78 | xsltproc \ 79 | && ( \ 80 | cd "$tempDir" \ 81 | && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \ 82 | && REVISION=${REVISION%~*} \ 83 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \ 84 | && PKGOSSCHECKSUM="c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745 *${REVISION}.tar.gz" \ 85 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \ 86 | echo "pkg-oss tarball checksum verification succeeded!"; \ 87 | else \ 88 | echo "pkg-oss tarball checksum verification failed!"; \ 89 | exit 1; \ 90 | fi \ 91 | && tar xzvf ${REVISION}.tar.gz \ 92 | && cd pkg-oss-${REVISION} \ 93 | && cd debian \ 94 | && for target in base module-geoip module-image-filter module-njs module-xslt; do \ 95 | make rules-$target; \ 96 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \ 97 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \ 98 | done \ 99 | && make base module-geoip module-image-filter module-njs module-xslt \ 100 | ) \ 101 | # we don't remove APT lists here because they get re-downloaded and removed later 102 | \ 103 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies 104 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies) 105 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \ 106 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \ 107 | \ 108 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be) 109 | && ls -lAFh "$tempDir" \ 110 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \ 111 | && grep '^Package: ' "$tempDir/Packages" \ 112 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \ 113 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes") 114 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 115 | # ... 116 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 117 | && apt-get -o Acquire::GzipIndexes=false update \ 118 | ;; \ 119 | esac \ 120 | \ 121 | && apt-get install --no-install-recommends --no-install-suggests -y \ 122 | $nginxPackages \ 123 | gettext-base \ 124 | curl \ 125 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \ 126 | \ 127 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 128 | && if [ -n "$tempDir" ]; then \ 129 | apt-get purge -y --auto-remove \ 130 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \ 131 | fi \ 132 | # forward request and error logs to docker log collector 133 | && ln -sf /dev/stdout /var/log/nginx/access.log \ 134 | && ln -sf /dev/stderr /var/log/nginx/error.log \ 135 | # create a docker-entrypoint.d directory 136 | && mkdir /docker-entrypoint.d 137 | 138 | # implement changes required to run NGINX as an unprivileged user 139 | RUN sed -i 's,listen 80;,listen 8080;,' /etc/nginx/conf.d/default.conf \ 140 | && sed -i '/user nginx;/d' /etc/nginx/nginx.conf \ 141 | && sed -i 's,\(/var\)\{0\,1\}/run/nginx.pid,/tmp/nginx.pid,' /etc/nginx/nginx.conf \ 142 | && sed -i "/^http {/a \ proxy_temp_path /tmp/proxy_temp;\n client_body_temp_path /tmp/client_temp;\n fastcgi_temp_path /tmp/fastcgi_temp;\n uwsgi_temp_path /tmp/uwsgi_temp;\n scgi_temp_path /tmp/scgi_temp;\n" /etc/nginx/nginx.conf \ 143 | # nginx user must own the cache and etc directory to write cache and tweak the nginx config 144 | && chown -R $UID:0 /var/cache/nginx \ 145 | && chmod -R g+w /var/cache/nginx \ 146 | && chown -R $UID:0 /etc/nginx \ 147 | && chmod -R g+w /etc/nginx 148 | 149 | COPY docker-entrypoint.sh / 150 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d 151 | COPY 15-local-resolvers.envsh /docker-entrypoint.d 152 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d 153 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d 154 | ENTRYPOINT ["/docker-entrypoint.sh"] 155 | 156 | EXPOSE 8080 157 | 158 | STOPSIGNAL SIGQUIT 159 | 160 | USER $UID 161 | 162 | CMD ["nginx", "-g", "daemon off;"] 163 | -------------------------------------------------------------------------------- /mainline/debian/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=4:ts=4:et 3 | 4 | set -e 5 | 6 | entrypoint_log() { 7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 8 | echo "$@" 9 | fi 10 | } 11 | 12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then 13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then 14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration" 15 | 16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/" 17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do 18 | case "$f" in 19 | *.envsh) 20 | if [ -x "$f" ]; then 21 | entrypoint_log "$0: Sourcing $f"; 22 | . "$f" 23 | else 24 | # warn on shell scripts without exec bit 25 | entrypoint_log "$0: Ignoring $f, not executable"; 26 | fi 27 | ;; 28 | *.sh) 29 | if [ -x "$f" ]; then 30 | entrypoint_log "$0: Launching $f"; 31 | "$f" 32 | else 33 | # warn on shell scripts without exec bit 34 | entrypoint_log "$0: Ignoring $f, not executable"; 35 | fi 36 | ;; 37 | *) entrypoint_log "$0: Ignoring $f";; 38 | esac 39 | done 40 | 41 | entrypoint_log "$0: Configuration complete; ready for start up" 42 | else 43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration" 44 | fi 45 | fi 46 | 47 | exec "$@" 48 | -------------------------------------------------------------------------------- /stable/alpine-perl/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh" 3 | # 4 | # PLEASE DO NOT EDIT IT DIRECTLY. 5 | # 6 | ARG IMAGE=nginxinc/nginx-unprivileged:1.28.0-alpine 7 | FROM $IMAGE 8 | 9 | ARG UID=101 10 | ARG GID=101 11 | 12 | USER root 13 | 14 | RUN set -x \ 15 | && apkArch="$(cat /etc/apk/arch)" \ 16 | && nginxPackages=" \ 17 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \ 18 | nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 19 | nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 20 | nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 21 | nginx-module-perl=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 22 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \ 23 | " \ 24 | # install prerequisites for public key and pkg-oss checks 25 | && apk add --no-cache --virtual .checksum-deps \ 26 | openssl \ 27 | && case "$apkArch" in \ 28 | x86_64|aarch64) \ 29 | # arches officially built by upstream 30 | apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ 31 | ;; \ 32 | *) \ 33 | # we're on an architecture upstream doesn't officially build for 34 | # let's build binaries from the published packaging sources 35 | set -x \ 36 | && tempDir="$(mktemp -d)" \ 37 | && chown nobody:nobody $tempDir \ 38 | && apk add --no-cache --virtual .build-deps \ 39 | gcc \ 40 | libc-dev \ 41 | make \ 42 | openssl-dev \ 43 | pcre2-dev \ 44 | zlib-dev \ 45 | linux-headers \ 46 | perl-dev \ 47 | bash \ 48 | alpine-sdk \ 49 | findutils \ 50 | curl \ 51 | && su nobody -s /bin/sh -c " \ 52 | export HOME=${tempDir} \ 53 | && cd ${tempDir} \ 54 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 55 | && PKGOSSCHECKSUM=\"517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \ 56 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \ 57 | echo \"pkg-oss tarball checksum verification succeeded!\"; \ 58 | else \ 59 | echo \"pkg-oss tarball checksum verification failed!\"; \ 60 | exit 1; \ 61 | fi \ 62 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 63 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \ 64 | && cd alpine \ 65 | && make module-perl \ 66 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \ 67 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \ 68 | " \ 69 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \ 70 | && apk del --no-network .build-deps \ 71 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \ 72 | ;; \ 73 | esac \ 74 | # remove checksum deps 75 | && apk del --no-network .checksum-deps \ 76 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 77 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \ 78 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi 79 | 80 | USER $UID 81 | -------------------------------------------------------------------------------- /stable/alpine-slim/10-listen-on-ipv6-by-default.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=4:ts=4:et 3 | 4 | set -e 5 | 6 | entrypoint_log() { 7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 8 | echo "$@" 9 | fi 10 | } 11 | 12 | ME=$(basename "$0") 13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf" 14 | 15 | # check if we have ipv6 available 16 | if [ ! -f "/proc/net/if_inet6" ]; then 17 | entrypoint_log "$ME: info: ipv6 not available" 18 | exit 0 19 | fi 20 | 21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then 22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist" 23 | exit 0 24 | fi 25 | 26 | # check if the file can be modified, e.g. not on a r/o filesystem 27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; } 28 | 29 | # check if the file is already modified, e.g. on a container restart 30 | grep -q "listen \[::]\:8080;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; } 31 | 32 | if [ -f "/etc/os-release" ]; then 33 | . /etc/os-release 34 | else 35 | entrypoint_log "$ME: info: can not guess the operating system" 36 | exit 0 37 | fi 38 | 39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE" 40 | 41 | case "$ID" in 42 | "debian") 43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3) 44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || { 45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version" 46 | exit 0 47 | } 48 | ;; 49 | "alpine") 50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2) 51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || { 52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version" 53 | exit 0 54 | } 55 | ;; 56 | *) 57 | entrypoint_log "$ME: info: Unsupported distribution" 58 | exit 0 59 | ;; 60 | esac 61 | 62 | # enable ipv6 on default.conf listen sockets 63 | sed -i -E 's,listen 8080;,listen 8080;\n listen [::]:8080;,' /$DEFAULT_CONF_FILE 64 | 65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE" 66 | 67 | exit 0 68 | -------------------------------------------------------------------------------- /stable/alpine-slim/15-local-resolvers.envsh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=2:ts=2:sts=2:et 3 | 4 | set -eu 5 | 6 | LC_ALL=C 7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 8 | 9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0 10 | 11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf) 12 | 13 | NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }" 14 | 15 | export NGINX_LOCAL_RESOLVERS 16 | -------------------------------------------------------------------------------- /stable/alpine-slim/20-envsubst-on-templates.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | ME=$(basename "$0") 6 | 7 | entrypoint_log() { 8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 9 | echo "$@" 10 | fi 11 | } 12 | 13 | add_stream_block() { 14 | local conffile="/etc/nginx/nginx.conf" 15 | 16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then 17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates" 18 | else 19 | # check if the file can be modified, e.g. not on a r/o filesystem 20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; } 21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf" 22 | cat << END >> "$conffile" 23 | # added by "$ME" on "$(date)" 24 | stream { 25 | include $stream_output_dir/*.conf; 26 | } 27 | END 28 | fi 29 | } 30 | 31 | auto_envsubst() { 32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}" 33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}" 34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}" 35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}" 36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}" 37 | local filter="${NGINX_ENVSUBST_FILTER:-}" 38 | 39 | local template defined_envs relative_path output_path subdir 40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null )) 41 | [ -d "$template_dir" ] || return 0 42 | if [ ! -w "$output_dir" ]; then 43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable" 44 | return 0 45 | fi 46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do 47 | relative_path="${template#"$template_dir/"}" 48 | output_path="$output_dir/${relative_path%"$suffix"}" 49 | subdir=$(dirname "$relative_path") 50 | # create a subdirectory where the template file exists 51 | mkdir -p "$output_dir/$subdir" 52 | entrypoint_log "$ME: Running envsubst on $template to $output_path" 53 | envsubst "$defined_envs" < "$template" > "$output_path" 54 | done 55 | 56 | # Print the first file with the stream suffix, this will be false if there are none 57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then 58 | mkdir -p "$stream_output_dir" 59 | if [ ! -w "$stream_output_dir" ]; then 60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable" 61 | return 0 62 | fi 63 | add_stream_block 64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do 65 | relative_path="${template#"$template_dir/"}" 66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}" 67 | subdir=$(dirname "$relative_path") 68 | # create a subdirectory where the template file exists 69 | mkdir -p "$stream_output_dir/$subdir" 70 | entrypoint_log "$ME: Running envsubst on $template to $output_path" 71 | envsubst "$defined_envs" < "$template" > "$output_path" 72 | done 73 | fi 74 | } 75 | 76 | auto_envsubst 77 | 78 | exit 0 79 | -------------------------------------------------------------------------------- /stable/alpine-slim/30-tune-worker-processes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=2:ts=2:sts=2:et 3 | 4 | set -eu 5 | 6 | LC_ALL=C 7 | ME=$(basename "$0") 8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 9 | 10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0 11 | 12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; } 13 | 14 | ceildiv() { 15 | num=$1 16 | div=$2 17 | echo $(( (num + div - 1) / div )) 18 | } 19 | 20 | get_cpuset() { 21 | cpusetroot=$1 22 | cpusetfile=$2 23 | ncpu=0 24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1 25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do 26 | case "$token" in 27 | *-*) 28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l ) 29 | ncpu=$(( ncpu+count )) 30 | ;; 31 | *) 32 | ncpu=$(( ncpu+1 )) 33 | ;; 34 | esac 35 | done 36 | echo "$ncpu" 37 | } 38 | 39 | get_quota() { 40 | cpuroot=$1 41 | ncpu=0 42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1 43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1 44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" ) 45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" ) 46 | [ "$cfs_quota" = "-1" ] && return 1 47 | [ "$cfs_period" = "0" ] && return 1 48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" ) 49 | [ "$ncpu" -gt 0 ] || return 1 50 | echo "$ncpu" 51 | } 52 | 53 | get_quota_v2() { 54 | cpuroot=$1 55 | ncpu=0 56 | [ -f "$cpuroot/cpu.max" ] || return 1 57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" ) 58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" ) 59 | [ "$cfs_quota" = "max" ] && return 1 60 | [ "$cfs_period" = "0" ] && return 1 61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" ) 62 | [ "$ncpu" -gt 0 ] || return 1 63 | echo "$ncpu" 64 | } 65 | 66 | get_cgroup_v1_path() { 67 | needle=$1 68 | found= 69 | foundroot= 70 | mountpoint= 71 | 72 | [ -r "/proc/self/mountinfo" ] || return 1 73 | [ -r "/proc/self/cgroup" ] || return 1 74 | 75 | while IFS= read -r line; do 76 | case "$needle" in 77 | "cpuset") 78 | case "$line" in 79 | *cpuset*) 80 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 81 | break 82 | ;; 83 | esac 84 | ;; 85 | "cpu") 86 | case "$line" in 87 | *cpuset*) 88 | ;; 89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*) 90 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 91 | break 92 | ;; 93 | esac 94 | esac 95 | done << __EOF__ 96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo ) 97 | __EOF__ 98 | 99 | while IFS= read -r line; do 100 | controller=$( echo "$line" | cut -d: -f 2 ) 101 | case "$needle" in 102 | "cpuset") 103 | case "$controller" in 104 | cpuset) 105 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 106 | break 107 | ;; 108 | esac 109 | ;; 110 | "cpu") 111 | case "$controller" in 112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu) 113 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 114 | break 115 | ;; 116 | esac 117 | ;; 118 | esac 119 | done << __EOF__ 120 | $( grep -F -- 'cpu' /proc/self/cgroup ) 121 | __EOF__ 122 | 123 | case "${found%% *}" in 124 | "/") 125 | foundroot="${found##* }$mountpoint" 126 | ;; 127 | "$mountpoint") 128 | foundroot="${found##* }" 129 | ;; 130 | esac 131 | echo "$foundroot" 132 | } 133 | 134 | get_cgroup_v2_path() { 135 | found= 136 | foundroot= 137 | mountpoint= 138 | 139 | [ -r "/proc/self/mountinfo" ] || return 1 140 | [ -r "/proc/self/cgroup" ] || return 1 141 | 142 | while IFS= read -r line; do 143 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 144 | done << __EOF__ 145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo ) 146 | __EOF__ 147 | 148 | while IFS= read -r line; do 149 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 150 | done << __EOF__ 151 | $( grep -F -- '0::' /proc/self/cgroup ) 152 | __EOF__ 153 | 154 | case "${found%% *}" in 155 | "") 156 | return 1 157 | ;; 158 | "/") 159 | foundroot="${found##* }$mountpoint" 160 | ;; 161 | "$mountpoint" | /../*) 162 | foundroot="${found##* }" 163 | ;; 164 | esac 165 | echo "$foundroot" 166 | } 167 | 168 | ncpu_online=$( getconf _NPROCESSORS_ONLN ) 169 | ncpu_cpuset= 170 | ncpu_quota= 171 | ncpu_cpuset_v2= 172 | ncpu_quota_v2= 173 | 174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online 175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online 176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online 177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online 178 | 179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \ 180 | "$ncpu_online" \ 181 | "$ncpu_cpuset" \ 182 | "$ncpu_quota" \ 183 | "$ncpu_cpuset_v2" \ 184 | "$ncpu_quota_v2" \ 185 | | sort -n \ 186 | | head -n 1 ) 187 | 188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf 189 | -------------------------------------------------------------------------------- /stable/alpine-slim/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh" 3 | # 4 | # PLEASE DO NOT EDIT IT DIRECTLY. 5 | # 6 | ARG IMAGE=alpine:3.21 7 | FROM $IMAGE 8 | 9 | LABEL maintainer="NGINX Docker Maintainers " 10 | 11 | ENV NGINX_VERSION=1.28.0 12 | ENV PKG_RELEASE=1 13 | ENV DYNPKG_RELEASE=1 14 | 15 | ARG UID=101 16 | ARG GID=101 17 | 18 | RUN set -x \ 19 | # create nginx user/group first, to be consistent throughout docker variants 20 | && addgroup -g $GID -S nginx || true \ 21 | && adduser -S -D -H -u $UID -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx || true \ 22 | && apkArch="$(cat /etc/apk/arch)" \ 23 | && nginxPackages=" \ 24 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \ 25 | " \ 26 | # install prerequisites for public key and pkg-oss checks 27 | && apk add --no-cache --virtual .checksum-deps \ 28 | openssl \ 29 | && case "$apkArch" in \ 30 | x86_64|aarch64) \ 31 | # arches officially built by upstream 32 | set -x \ 33 | && KEY_SHA512="e09fa32f0a0eab2b879ccbbc4d0e4fb9751486eedda75e35fac65802cc9faa266425edf83e261137a2f4d16281ce2c1a5f4502930fe75154723da014214f0655" \ 34 | && wget -O /tmp/nginx_signing.rsa.pub https://nginx.org/keys/nginx_signing.rsa.pub \ 35 | && if echo "$KEY_SHA512 */tmp/nginx_signing.rsa.pub" | sha512sum -c -; then \ 36 | echo "key verification succeeded!"; \ 37 | mv /tmp/nginx_signing.rsa.pub /etc/apk/keys/; \ 38 | else \ 39 | echo "key verification failed!"; \ 40 | exit 1; \ 41 | fi \ 42 | && apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ 43 | ;; \ 44 | *) \ 45 | # we're on an architecture upstream doesn't officially build for 46 | # let's build binaries from the published packaging sources 47 | set -x \ 48 | && tempDir="$(mktemp -d)" \ 49 | && chown nobody:nobody $tempDir \ 50 | && apk add --no-cache --virtual .build-deps \ 51 | gcc \ 52 | libc-dev \ 53 | make \ 54 | openssl-dev \ 55 | pcre2-dev \ 56 | zlib-dev \ 57 | linux-headers \ 58 | bash \ 59 | alpine-sdk \ 60 | findutils \ 61 | curl \ 62 | && su nobody -s /bin/sh -c " \ 63 | export HOME=${tempDir} \ 64 | && cd ${tempDir} \ 65 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 66 | && PKGOSSCHECKSUM=\"517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \ 67 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \ 68 | echo \"pkg-oss tarball checksum verification succeeded!\"; \ 69 | else \ 70 | echo \"pkg-oss tarball checksum verification failed!\"; \ 71 | exit 1; \ 72 | fi \ 73 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 74 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \ 75 | && cd alpine \ 76 | && make base \ 77 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \ 78 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \ 79 | " \ 80 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \ 81 | && apk del --no-network .build-deps \ 82 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \ 83 | ;; \ 84 | esac \ 85 | # remove checksum deps 86 | && apk del --no-network .checksum-deps \ 87 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 88 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \ 89 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \ 90 | # Add `envsubst` for templating environment variables 91 | && apk add --no-cache gettext-envsubst \ 92 | # Bring in tzdata so users could set the timezones through the environment 93 | # variables 94 | && apk add --no-cache tzdata \ 95 | # forward request and error logs to docker log collector 96 | && ln -sf /dev/stdout /var/log/nginx/access.log \ 97 | && ln -sf /dev/stderr /var/log/nginx/error.log \ 98 | # create a docker-entrypoint.d directory 99 | && mkdir /docker-entrypoint.d 100 | 101 | # implement changes required to run NGINX as an unprivileged user 102 | RUN sed -i 's,listen 80;,listen 8080;,' /etc/nginx/conf.d/default.conf \ 103 | && sed -i '/user nginx;/d' /etc/nginx/nginx.conf \ 104 | && sed -i 's,\(/var\)\{0\,1\}/run/nginx.pid,/tmp/nginx.pid,' /etc/nginx/nginx.conf \ 105 | && sed -i "/^http {/a \ proxy_temp_path /tmp/proxy_temp;\n client_body_temp_path /tmp/client_temp;\n fastcgi_temp_path /tmp/fastcgi_temp;\n uwsgi_temp_path /tmp/uwsgi_temp;\n scgi_temp_path /tmp/scgi_temp;\n" /etc/nginx/nginx.conf \ 106 | # nginx user must own the cache and etc directory to write cache and tweak the nginx config 107 | && chown -R $UID:0 /var/cache/nginx \ 108 | && chmod -R g+w /var/cache/nginx \ 109 | && chown -R $UID:0 /etc/nginx \ 110 | && chmod -R g+w /etc/nginx 111 | 112 | COPY docker-entrypoint.sh / 113 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d 114 | COPY 15-local-resolvers.envsh /docker-entrypoint.d 115 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d 116 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d 117 | ENTRYPOINT ["/docker-entrypoint.sh"] 118 | 119 | EXPOSE 8080 120 | 121 | STOPSIGNAL SIGQUIT 122 | 123 | USER $UID 124 | 125 | CMD ["nginx", "-g", "daemon off;"] 126 | -------------------------------------------------------------------------------- /stable/alpine-slim/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=4:ts=4:et 3 | 4 | set -e 5 | 6 | entrypoint_log() { 7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 8 | echo "$@" 9 | fi 10 | } 11 | 12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then 13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then 14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration" 15 | 16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/" 17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do 18 | case "$f" in 19 | *.envsh) 20 | if [ -x "$f" ]; then 21 | entrypoint_log "$0: Sourcing $f"; 22 | . "$f" 23 | else 24 | # warn on shell scripts without exec bit 25 | entrypoint_log "$0: Ignoring $f, not executable"; 26 | fi 27 | ;; 28 | *.sh) 29 | if [ -x "$f" ]; then 30 | entrypoint_log "$0: Launching $f"; 31 | "$f" 32 | else 33 | # warn on shell scripts without exec bit 34 | entrypoint_log "$0: Ignoring $f, not executable"; 35 | fi 36 | ;; 37 | *) entrypoint_log "$0: Ignoring $f";; 38 | esac 39 | done 40 | 41 | entrypoint_log "$0: Configuration complete; ready for start up" 42 | else 43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration" 44 | fi 45 | fi 46 | 47 | exec "$@" 48 | -------------------------------------------------------------------------------- /stable/alpine/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh" 3 | # 4 | # PLEASE DO NOT EDIT IT DIRECTLY. 5 | # 6 | ARG IMAGE=nginxinc/nginx-unprivileged:1.28.0-alpine-slim 7 | FROM $IMAGE 8 | 9 | ENV NJS_VERSION=0.8.10 10 | ENV NJS_RELEASE=1 11 | 12 | ARG UID=101 13 | ARG GID=101 14 | 15 | USER root 16 | 17 | RUN set -x \ 18 | && apkArch="$(cat /etc/apk/arch)" \ 19 | && nginxPackages=" \ 20 | nginx=${NGINX_VERSION}-r${PKG_RELEASE} \ 21 | nginx-module-xslt=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 22 | nginx-module-geoip=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 23 | nginx-module-image-filter=${NGINX_VERSION}-r${DYNPKG_RELEASE} \ 24 | nginx-module-njs=${NGINX_VERSION}.${NJS_VERSION}-r${NJS_RELEASE} \ 25 | " \ 26 | # install prerequisites for public key and pkg-oss checks 27 | && apk add --no-cache --virtual .checksum-deps \ 28 | openssl \ 29 | && case "$apkArch" in \ 30 | x86_64|aarch64) \ 31 | # arches officially built by upstream 32 | apk add -X "https://nginx.org/packages/alpine/v$(egrep -o '^[0-9]+\.[0-9]+' /etc/alpine-release)/main" --no-cache $nginxPackages \ 33 | ;; \ 34 | *) \ 35 | # we're on an architecture upstream doesn't officially build for 36 | # let's build binaries from the published packaging sources 37 | set -x \ 38 | && tempDir="$(mktemp -d)" \ 39 | && chown nobody:nobody $tempDir \ 40 | && apk add --no-cache --virtual .build-deps \ 41 | gcc \ 42 | libc-dev \ 43 | make \ 44 | openssl-dev \ 45 | pcre2-dev \ 46 | zlib-dev \ 47 | linux-headers \ 48 | libxslt-dev \ 49 | gd-dev \ 50 | geoip-dev \ 51 | libedit-dev \ 52 | bash \ 53 | alpine-sdk \ 54 | findutils \ 55 | curl \ 56 | && su nobody -s /bin/sh -c " \ 57 | export HOME=${tempDir} \ 58 | && cd ${tempDir} \ 59 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 60 | && PKGOSSCHECKSUM=\"517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${NGINX_VERSION}-${PKG_RELEASE}.tar.gz\" \ 61 | && if [ \"\$(openssl sha512 -r ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz)\" = \"\$PKGOSSCHECKSUM\" ]; then \ 62 | echo \"pkg-oss tarball checksum verification succeeded!\"; \ 63 | else \ 64 | echo \"pkg-oss tarball checksum verification failed!\"; \ 65 | exit 1; \ 66 | fi \ 67 | && tar xzvf ${NGINX_VERSION}-${PKG_RELEASE}.tar.gz \ 68 | && cd pkg-oss-${NGINX_VERSION}-${PKG_RELEASE} \ 69 | && cd alpine \ 70 | && make module-geoip module-image-filter module-njs module-xslt \ 71 | && apk index --allow-untrusted -o ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz ${tempDir}/packages/alpine/${apkArch}/*.apk \ 72 | && abuild-sign -k ${tempDir}/.abuild/abuild-key.rsa ${tempDir}/packages/alpine/${apkArch}/APKINDEX.tar.gz \ 73 | " \ 74 | && cp ${tempDir}/.abuild/abuild-key.rsa.pub /etc/apk/keys/ \ 75 | && apk del --no-network .build-deps \ 76 | && apk add -X ${tempDir}/packages/alpine/ --no-cache $nginxPackages \ 77 | ;; \ 78 | esac \ 79 | # remove checksum deps 80 | && apk del --no-network .checksum-deps \ 81 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 82 | && if [ -n "$tempDir" ]; then rm -rf "$tempDir"; fi \ 83 | && if [ -f "/etc/apk/keys/abuild-key.rsa.pub" ]; then rm -f /etc/apk/keys/abuild-key.rsa.pub; fi \ 84 | # Bring in curl and ca-certificates to make registering on DNS SD easier 85 | && apk add --no-cache curl ca-certificates 86 | 87 | USER $UID 88 | -------------------------------------------------------------------------------- /stable/debian-perl/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh" 3 | # 4 | # PLEASE DO NOT EDIT IT DIRECTLY. 5 | # 6 | ARG IMAGE=nginxinc/nginx-unprivileged:1.28.0 7 | FROM $IMAGE 8 | 9 | ARG UID=101 10 | ARG GID=101 11 | 12 | USER root 13 | 14 | RUN set -x; \ 15 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \ 16 | dpkgArch="$(dpkg --print-architecture)" \ 17 | && nginxPackages=" \ 18 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \ 19 | nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 20 | nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 21 | nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 22 | nginx-module-perl=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 23 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \ 24 | " \ 25 | && case "$dpkgArch" in \ 26 | amd64|arm64) \ 27 | # arches officialy built by upstream 28 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \ 29 | && apt-get update \ 30 | ;; \ 31 | *) \ 32 | # we're on an architecture upstream doesn't officially build for 33 | # let's build binaries from the published packaging sources 34 | # new directory for storing sources and .deb files 35 | tempDir="$(mktemp -d)" \ 36 | && chmod 777 "$tempDir" \ 37 | # (777 to ensure APT's "_apt" user can access it too) 38 | \ 39 | # save list of currently-installed packages so build dependencies can be cleanly removed later 40 | && savedAptMark="$(apt-mark showmanual)" \ 41 | \ 42 | # build .deb files from upstream's packaging sources 43 | && apt-get update \ 44 | && apt-get install --no-install-recommends --no-install-suggests -y \ 45 | curl \ 46 | devscripts \ 47 | equivs \ 48 | git \ 49 | libxml2-utils \ 50 | lsb-release \ 51 | xsltproc \ 52 | && ( \ 53 | cd "$tempDir" \ 54 | && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \ 55 | && REVISION=${REVISION%~*} \ 56 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \ 57 | && PKGOSSCHECKSUM="517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${REVISION}.tar.gz" \ 58 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \ 59 | echo "pkg-oss tarball checksum verification succeeded!"; \ 60 | else \ 61 | echo "pkg-oss tarball checksum verification failed!"; \ 62 | exit 1; \ 63 | fi \ 64 | && tar xzvf ${REVISION}.tar.gz \ 65 | && cd pkg-oss-${REVISION} \ 66 | && cd debian \ 67 | && for target in module-perl; do \ 68 | make rules-$target; \ 69 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \ 70 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \ 71 | done \ 72 | && make module-perl \ 73 | ) \ 74 | # we don't remove APT lists here because they get re-downloaded and removed later 75 | \ 76 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies 77 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies) 78 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \ 79 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \ 80 | \ 81 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be) 82 | && ls -lAFh "$tempDir" \ 83 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \ 84 | && grep '^Package: ' "$tempDir/Packages" \ 85 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \ 86 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes") 87 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 88 | # ... 89 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 90 | && apt-get -o Acquire::GzipIndexes=false update \ 91 | ;; \ 92 | esac \ 93 | \ 94 | && apt-get install --no-install-recommends --no-install-suggests -y \ 95 | $nginxPackages \ 96 | gettext-base \ 97 | curl \ 98 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \ 99 | \ 100 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 101 | && if [ -n "$tempDir" ]; then \ 102 | apt-get purge -y --auto-remove \ 103 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \ 104 | fi 105 | 106 | USER $UID 107 | -------------------------------------------------------------------------------- /stable/debian/10-listen-on-ipv6-by-default.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=4:ts=4:et 3 | 4 | set -e 5 | 6 | entrypoint_log() { 7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 8 | echo "$@" 9 | fi 10 | } 11 | 12 | ME=$(basename "$0") 13 | DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf" 14 | 15 | # check if we have ipv6 available 16 | if [ ! -f "/proc/net/if_inet6" ]; then 17 | entrypoint_log "$ME: info: ipv6 not available" 18 | exit 0 19 | fi 20 | 21 | if [ ! -f "/$DEFAULT_CONF_FILE" ]; then 22 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist" 23 | exit 0 24 | fi 25 | 26 | # check if the file can be modified, e.g. not on a r/o filesystem 27 | touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; } 28 | 29 | # check if the file is already modified, e.g. on a container restart 30 | grep -q "listen \[::]\:8080;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; } 31 | 32 | if [ -f "/etc/os-release" ]; then 33 | . /etc/os-release 34 | else 35 | entrypoint_log "$ME: info: can not guess the operating system" 36 | exit 0 37 | fi 38 | 39 | entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE" 40 | 41 | case "$ID" in 42 | "debian") 43 | CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3) 44 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || { 45 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version" 46 | exit 0 47 | } 48 | ;; 49 | "alpine") 50 | CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2) 51 | echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || { 52 | entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version" 53 | exit 0 54 | } 55 | ;; 56 | *) 57 | entrypoint_log "$ME: info: Unsupported distribution" 58 | exit 0 59 | ;; 60 | esac 61 | 62 | # enable ipv6 on default.conf listen sockets 63 | sed -i -E 's,listen 8080;,listen 8080;\n listen [::]:8080;,' /$DEFAULT_CONF_FILE 64 | 65 | entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE" 66 | 67 | exit 0 68 | -------------------------------------------------------------------------------- /stable/debian/15-local-resolvers.envsh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=2:ts=2:sts=2:et 3 | 4 | set -eu 5 | 6 | LC_ALL=C 7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 8 | 9 | [ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0 10 | 11 | NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {if ($2 ~ ":") {print "["$2"]"} else {print $2}}' /etc/resolv.conf) 12 | 13 | NGINX_LOCAL_RESOLVERS="${NGINX_LOCAL_RESOLVERS% }" 14 | 15 | export NGINX_LOCAL_RESOLVERS 16 | -------------------------------------------------------------------------------- /stable/debian/20-envsubst-on-templates.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | ME=$(basename "$0") 6 | 7 | entrypoint_log() { 8 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 9 | echo "$@" 10 | fi 11 | } 12 | 13 | add_stream_block() { 14 | local conffile="/etc/nginx/nginx.conf" 15 | 16 | if grep -q -E "\s*stream\s*\{" "$conffile"; then 17 | entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates" 18 | else 19 | # check if the file can be modified, e.g. not on a r/o filesystem 20 | touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; } 21 | entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf" 22 | cat << END >> "$conffile" 23 | # added by "$ME" on "$(date)" 24 | stream { 25 | include $stream_output_dir/*.conf; 26 | } 27 | END 28 | fi 29 | } 30 | 31 | auto_envsubst() { 32 | local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}" 33 | local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}" 34 | local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}" 35 | local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}" 36 | local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}" 37 | local filter="${NGINX_ENVSUBST_FILTER:-}" 38 | 39 | local template defined_envs relative_path output_path subdir 40 | defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null )) 41 | [ -d "$template_dir" ] || return 0 42 | if [ ! -w "$output_dir" ]; then 43 | entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable" 44 | return 0 45 | fi 46 | find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do 47 | relative_path="${template#"$template_dir/"}" 48 | output_path="$output_dir/${relative_path%"$suffix"}" 49 | subdir=$(dirname "$relative_path") 50 | # create a subdirectory where the template file exists 51 | mkdir -p "$output_dir/$subdir" 52 | entrypoint_log "$ME: Running envsubst on $template to $output_path" 53 | envsubst "$defined_envs" < "$template" > "$output_path" 54 | done 55 | 56 | # Print the first file with the stream suffix, this will be false if there are none 57 | if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then 58 | mkdir -p "$stream_output_dir" 59 | if [ ! -w "$stream_output_dir" ]; then 60 | entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable" 61 | return 0 62 | fi 63 | add_stream_block 64 | find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do 65 | relative_path="${template#"$template_dir/"}" 66 | output_path="$stream_output_dir/${relative_path%"$stream_suffix"}" 67 | subdir=$(dirname "$relative_path") 68 | # create a subdirectory where the template file exists 69 | mkdir -p "$stream_output_dir/$subdir" 70 | entrypoint_log "$ME: Running envsubst on $template to $output_path" 71 | envsubst "$defined_envs" < "$template" > "$output_path" 72 | done 73 | fi 74 | } 75 | 76 | auto_envsubst 77 | 78 | exit 0 79 | -------------------------------------------------------------------------------- /stable/debian/30-tune-worker-processes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=2:ts=2:sts=2:et 3 | 4 | set -eu 5 | 6 | LC_ALL=C 7 | ME=$(basename "$0") 8 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 9 | 10 | [ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0 11 | 12 | touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; } 13 | 14 | ceildiv() { 15 | num=$1 16 | div=$2 17 | echo $(( (num + div - 1) / div )) 18 | } 19 | 20 | get_cpuset() { 21 | cpusetroot=$1 22 | cpusetfile=$2 23 | ncpu=0 24 | [ -f "$cpusetroot/$cpusetfile" ] || return 1 25 | for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do 26 | case "$token" in 27 | *-*) 28 | count=$( seq $(echo "$token" | tr '-' ' ') | wc -l ) 29 | ncpu=$(( ncpu+count )) 30 | ;; 31 | *) 32 | ncpu=$(( ncpu+1 )) 33 | ;; 34 | esac 35 | done 36 | echo "$ncpu" 37 | } 38 | 39 | get_quota() { 40 | cpuroot=$1 41 | ncpu=0 42 | [ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1 43 | [ -f "$cpuroot/cpu.cfs_period_us" ] || return 1 44 | cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" ) 45 | cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" ) 46 | [ "$cfs_quota" = "-1" ] && return 1 47 | [ "$cfs_period" = "0" ] && return 1 48 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" ) 49 | [ "$ncpu" -gt 0 ] || return 1 50 | echo "$ncpu" 51 | } 52 | 53 | get_quota_v2() { 54 | cpuroot=$1 55 | ncpu=0 56 | [ -f "$cpuroot/cpu.max" ] || return 1 57 | cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" ) 58 | cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" ) 59 | [ "$cfs_quota" = "max" ] && return 1 60 | [ "$cfs_period" = "0" ] && return 1 61 | ncpu=$( ceildiv "$cfs_quota" "$cfs_period" ) 62 | [ "$ncpu" -gt 0 ] || return 1 63 | echo "$ncpu" 64 | } 65 | 66 | get_cgroup_v1_path() { 67 | needle=$1 68 | found= 69 | foundroot= 70 | mountpoint= 71 | 72 | [ -r "/proc/self/mountinfo" ] || return 1 73 | [ -r "/proc/self/cgroup" ] || return 1 74 | 75 | while IFS= read -r line; do 76 | case "$needle" in 77 | "cpuset") 78 | case "$line" in 79 | *cpuset*) 80 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 81 | break 82 | ;; 83 | esac 84 | ;; 85 | "cpu") 86 | case "$line" in 87 | *cpuset*) 88 | ;; 89 | *cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*) 90 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 91 | break 92 | ;; 93 | esac 94 | esac 95 | done << __EOF__ 96 | $( grep -F -- '- cgroup ' /proc/self/mountinfo ) 97 | __EOF__ 98 | 99 | while IFS= read -r line; do 100 | controller=$( echo "$line" | cut -d: -f 2 ) 101 | case "$needle" in 102 | "cpuset") 103 | case "$controller" in 104 | cpuset) 105 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 106 | break 107 | ;; 108 | esac 109 | ;; 110 | "cpu") 111 | case "$controller" in 112 | cpu,cpuacct|cpuacct,cpu|cpuacct|cpu) 113 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 114 | break 115 | ;; 116 | esac 117 | ;; 118 | esac 119 | done << __EOF__ 120 | $( grep -F -- 'cpu' /proc/self/cgroup ) 121 | __EOF__ 122 | 123 | case "${found%% *}" in 124 | "/") 125 | foundroot="${found##* }$mountpoint" 126 | ;; 127 | "$mountpoint") 128 | foundroot="${found##* }" 129 | ;; 130 | esac 131 | echo "$foundroot" 132 | } 133 | 134 | get_cgroup_v2_path() { 135 | found= 136 | foundroot= 137 | mountpoint= 138 | 139 | [ -r "/proc/self/mountinfo" ] || return 1 140 | [ -r "/proc/self/cgroup" ] || return 1 141 | 142 | while IFS= read -r line; do 143 | found=$( echo "$line" | cut -d ' ' -f 4,5 ) 144 | done << __EOF__ 145 | $( grep -F -- '- cgroup2 ' /proc/self/mountinfo ) 146 | __EOF__ 147 | 148 | while IFS= read -r line; do 149 | mountpoint=$( echo "$line" | cut -d: -f 3 ) 150 | done << __EOF__ 151 | $( grep -F -- '0::' /proc/self/cgroup ) 152 | __EOF__ 153 | 154 | case "${found%% *}" in 155 | "") 156 | return 1 157 | ;; 158 | "/") 159 | foundroot="${found##* }$mountpoint" 160 | ;; 161 | "$mountpoint" | /../*) 162 | foundroot="${found##* }" 163 | ;; 164 | esac 165 | echo "$foundroot" 166 | } 167 | 168 | ncpu_online=$( getconf _NPROCESSORS_ONLN ) 169 | ncpu_cpuset= 170 | ncpu_quota= 171 | ncpu_cpuset_v2= 172 | ncpu_quota_v2= 173 | 174 | cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online 175 | cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online 176 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online 177 | cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online 178 | 179 | ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \ 180 | "$ncpu_online" \ 181 | "$ncpu_cpuset" \ 182 | "$ncpu_quota" \ 183 | "$ncpu_cpuset_v2" \ 184 | "$ncpu_quota_v2" \ 185 | | sort -n \ 186 | | head -n 1 ) 187 | 188 | sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf 189 | -------------------------------------------------------------------------------- /stable/debian/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh" 3 | # 4 | # PLEASE DO NOT EDIT IT DIRECTLY. 5 | # 6 | ARG IMAGE=debian:bookworm-slim 7 | FROM $IMAGE 8 | 9 | LABEL maintainer="NGINX Docker Maintainers " 10 | 11 | ENV NGINX_VERSION=1.28.0 12 | ENV NJS_VERSION=0.8.10 13 | ENV NJS_RELEASE=1~bookworm 14 | ENV PKG_RELEASE=1~bookworm 15 | ENV DYNPKG_RELEASE=1~bookworm 16 | 17 | ARG UID=101 18 | ARG GID=101 19 | 20 | RUN set -x \ 21 | # create nginx user/group first, to be consistent throughout docker variants 22 | && groupadd --system --gid $GID nginx || true \ 23 | && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid $UID nginx || true \ 24 | && apt-get update \ 25 | && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \ 26 | && \ 27 | NGINX_GPGKEYS="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 8540A6F18833A80E9C1653A42FD21310B49F6B46 9E9BE90EACBCDE69FE9B204CBCDCD8A38D88A2B3"; \ 28 | NGINX_GPGKEY_PATH=/etc/apt/keyrings/nginx-archive-keyring.gpg; \ 29 | export GNUPGHOME="$(mktemp -d)"; \ 30 | found=''; \ 31 | for NGINX_GPGKEY in $NGINX_GPGKEYS; do \ 32 | for server in \ 33 | hkp://keyserver.ubuntu.com:80 \ 34 | pgp.mit.edu \ 35 | ; do \ 36 | echo "Fetching GPG key $NGINX_GPGKEY from $server"; \ 37 | gpg1 --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \ 38 | done; \ 39 | test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \ 40 | done; \ 41 | gpg1 --export "$NGINX_GPGKEYS" > "$NGINX_GPGKEY_PATH" ; \ 42 | rm -rf "$GNUPGHOME"; \ 43 | apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \ 44 | && dpkgArch="$(dpkg --print-architecture)" \ 45 | && nginxPackages=" \ 46 | nginx=${NGINX_VERSION}-${PKG_RELEASE} \ 47 | nginx-module-xslt=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 48 | nginx-module-geoip=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 49 | nginx-module-image-filter=${NGINX_VERSION}-${DYNPKG_RELEASE} \ 50 | nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${NJS_RELEASE} \ 51 | " \ 52 | && case "$dpkgArch" in \ 53 | amd64|arm64) \ 54 | # arches officialy built by upstream 55 | echo "deb [signed-by=$NGINX_GPGKEY_PATH] https://nginx.org/packages/debian/ bookworm nginx" >> /etc/apt/sources.list.d/nginx.list \ 56 | && apt-get update \ 57 | ;; \ 58 | *) \ 59 | # we're on an architecture upstream doesn't officially build for 60 | # let's build binaries from the published packaging sources 61 | # new directory for storing sources and .deb files 62 | tempDir="$(mktemp -d)" \ 63 | && chmod 777 "$tempDir" \ 64 | # (777 to ensure APT's "_apt" user can access it too) 65 | \ 66 | # save list of currently-installed packages so build dependencies can be cleanly removed later 67 | && savedAptMark="$(apt-mark showmanual)" \ 68 | \ 69 | # build .deb files from upstream's packaging sources 70 | && apt-get update \ 71 | && apt-get install --no-install-recommends --no-install-suggests -y \ 72 | curl \ 73 | devscripts \ 74 | equivs \ 75 | git \ 76 | libxml2-utils \ 77 | lsb-release \ 78 | xsltproc \ 79 | && ( \ 80 | cd "$tempDir" \ 81 | && REVISION="${NGINX_VERSION}-${PKG_RELEASE}" \ 82 | && REVISION=${REVISION%~*} \ 83 | && curl -f -L -O https://github.com/nginx/pkg-oss/archive/${REVISION}.tar.gz \ 84 | && PKGOSSCHECKSUM="517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4 *${REVISION}.tar.gz" \ 85 | && if [ "$(openssl sha512 -r ${REVISION}.tar.gz)" = "$PKGOSSCHECKSUM" ]; then \ 86 | echo "pkg-oss tarball checksum verification succeeded!"; \ 87 | else \ 88 | echo "pkg-oss tarball checksum verification failed!"; \ 89 | exit 1; \ 90 | fi \ 91 | && tar xzvf ${REVISION}.tar.gz \ 92 | && cd pkg-oss-${REVISION} \ 93 | && cd debian \ 94 | && for target in base module-geoip module-image-filter module-njs module-xslt; do \ 95 | make rules-$target; \ 96 | mk-build-deps --install --tool="apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes" \ 97 | debuild-$target/nginx-$NGINX_VERSION/debian/control; \ 98 | done \ 99 | && make base module-geoip module-image-filter module-njs module-xslt \ 100 | ) \ 101 | # we don't remove APT lists here because they get re-downloaded and removed later 102 | \ 103 | # reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies 104 | # (which is done after we install the built packages so we don't have to redownload any overlapping dependencies) 105 | && apt-mark showmanual | xargs apt-mark auto > /dev/null \ 106 | && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \ 107 | \ 108 | # create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be) 109 | && ls -lAFh "$tempDir" \ 110 | && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \ 111 | && grep '^Package: ' "$tempDir/Packages" \ 112 | && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \ 113 | # work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes") 114 | # Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 115 | # ... 116 | # E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) 117 | && apt-get -o Acquire::GzipIndexes=false update \ 118 | ;; \ 119 | esac \ 120 | \ 121 | && apt-get install --no-install-recommends --no-install-suggests -y \ 122 | $nginxPackages \ 123 | gettext-base \ 124 | curl \ 125 | && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \ 126 | \ 127 | # if we have leftovers from building, let's purge them (including extra, unnecessary build deps) 128 | && if [ -n "$tempDir" ]; then \ 129 | apt-get purge -y --auto-remove \ 130 | && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \ 131 | fi \ 132 | # forward request and error logs to docker log collector 133 | && ln -sf /dev/stdout /var/log/nginx/access.log \ 134 | && ln -sf /dev/stderr /var/log/nginx/error.log \ 135 | # create a docker-entrypoint.d directory 136 | && mkdir /docker-entrypoint.d 137 | 138 | # implement changes required to run NGINX as an unprivileged user 139 | RUN sed -i 's,listen 80;,listen 8080;,' /etc/nginx/conf.d/default.conf \ 140 | && sed -i '/user nginx;/d' /etc/nginx/nginx.conf \ 141 | && sed -i 's,\(/var\)\{0\,1\}/run/nginx.pid,/tmp/nginx.pid,' /etc/nginx/nginx.conf \ 142 | && sed -i "/^http {/a \ proxy_temp_path /tmp/proxy_temp;\n client_body_temp_path /tmp/client_temp;\n fastcgi_temp_path /tmp/fastcgi_temp;\n uwsgi_temp_path /tmp/uwsgi_temp;\n scgi_temp_path /tmp/scgi_temp;\n" /etc/nginx/nginx.conf \ 143 | # nginx user must own the cache and etc directory to write cache and tweak the nginx config 144 | && chown -R $UID:0 /var/cache/nginx \ 145 | && chmod -R g+w /var/cache/nginx \ 146 | && chown -R $UID:0 /etc/nginx \ 147 | && chmod -R g+w /etc/nginx 148 | 149 | COPY docker-entrypoint.sh / 150 | COPY 10-listen-on-ipv6-by-default.sh /docker-entrypoint.d 151 | COPY 15-local-resolvers.envsh /docker-entrypoint.d 152 | COPY 20-envsubst-on-templates.sh /docker-entrypoint.d 153 | COPY 30-tune-worker-processes.sh /docker-entrypoint.d 154 | ENTRYPOINT ["/docker-entrypoint.sh"] 155 | 156 | EXPOSE 8080 157 | 158 | STOPSIGNAL SIGQUIT 159 | 160 | USER $UID 161 | 162 | CMD ["nginx", "-g", "daemon off;"] 163 | -------------------------------------------------------------------------------- /stable/debian/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:sw=4:ts=4:et 3 | 4 | set -e 5 | 6 | entrypoint_log() { 7 | if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then 8 | echo "$@" 9 | fi 10 | } 11 | 12 | if [ "$1" = "nginx" ] || [ "$1" = "nginx-debug" ]; then 13 | if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then 14 | entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration" 15 | 16 | entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/" 17 | find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do 18 | case "$f" in 19 | *.envsh) 20 | if [ -x "$f" ]; then 21 | entrypoint_log "$0: Sourcing $f"; 22 | . "$f" 23 | else 24 | # warn on shell scripts without exec bit 25 | entrypoint_log "$0: Ignoring $f, not executable"; 26 | fi 27 | ;; 28 | *.sh) 29 | if [ -x "$f" ]; then 30 | entrypoint_log "$0: Launching $f"; 31 | "$f" 32 | else 33 | # warn on shell scripts without exec bit 34 | entrypoint_log "$0: Ignoring $f, not executable"; 35 | fi 36 | ;; 37 | *) entrypoint_log "$0: Ignoring $f";; 38 | esac 39 | done 40 | 41 | entrypoint_log "$0: Configuration complete; ready for start up" 42 | else 43 | entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration" 44 | fi 45 | fi 46 | 47 | exec "$@" 48 | -------------------------------------------------------------------------------- /update.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | shopt -s nullglob 4 | 5 | cd "$(dirname "$(greadlink -f "$BASH_SOURCE")")" 6 | 7 | declare branches=( 8 | "stable" 9 | "mainline" 10 | ) 11 | 12 | # Current nginx versions 13 | # Remember to update pkgosschecksum when changing this. 14 | declare -A nginx=( 15 | [mainline]='1.27.5' 16 | [stable]='1.28.0' 17 | ) 18 | 19 | # Current njs versions 20 | declare -A njs=( 21 | [mainline]='0.8.10' 22 | [stable]='0.8.10' 23 | ) 24 | 25 | # Current njs patchlevel version 26 | # Remember to update pkgosschecksum when changing this. 27 | declare -A njspkg=( 28 | [mainline]='1' 29 | [stable]='1' 30 | ) 31 | 32 | # Current nginx package patchlevel version 33 | # Remember to update pkgosschecksum when changing this. 34 | declare -A pkg=( 35 | [mainline]=1 36 | [stable]=1 37 | ) 38 | 39 | # Current built-in dynamic modules package patchlevel version 40 | # Remember to update pkgosschecksum when changing this 41 | declare -A dynpkg=( 42 | [mainline]=1 43 | [stable]=1 44 | ) 45 | 46 | declare -A debian=( 47 | [mainline]='bookworm' 48 | [stable]='bookworm' 49 | ) 50 | 51 | declare -A alpine=( 52 | [mainline]='3.21' 53 | [stable]='3.21' 54 | ) 55 | 56 | # When we bump njs version in a stable release we don't move the tag in the 57 | # pkg-oss repo. This setting allows us to specify a revision to check out 58 | # when building packages on architectures not supported by nginx.org 59 | # Remember to update pkgosschecksum when changing this. 60 | declare -A rev=( 61 | [mainline]='${NGINX_VERSION}-${PKG_RELEASE}' 62 | [stable]='${NGINX_VERSION}-${PKG_RELEASE}' 63 | ) 64 | 65 | # Holds SHA512 checksum for the pkg-oss tarball produced by source code 66 | # revision/tag in the previous block 67 | # Used in builds for architectures not packaged by nginx.org 68 | declare -A pkgosschecksum=( 69 | [mainline]='c773d98b567bd585c17f55702bf3e4c7d82b676bfbde395270e90a704dca3c758dfe0380b3f01770542b4fd9bed1f1149af4ce28bfc54a27a96df6b700ac1745' 70 | [stable]='517bc18954ccf4efddd51986584ca1f37966833ad342a297e1fe58fd0faf14c5a4dabcb23519dca433878a2927a95d6bea05a6749ee2fa67a33bf24cdc41b1e4' 71 | ) 72 | 73 | get_packages() { 74 | local distro="$1" 75 | shift 76 | local branch="$1" 77 | shift 78 | local perl= 79 | local r= 80 | local sep= 81 | 82 | case "$distro:$branch" in 83 | alpine*:*) 84 | r="r" 85 | sep="." 86 | ;; 87 | debian*:*) 88 | sep="+" 89 | ;; 90 | esac 91 | 92 | case "$distro" in 93 | *-perl) 94 | perl="nginx-module-perl" 95 | ;; 96 | esac 97 | 98 | echo -n ' \\\n' 99 | case "$distro" in 100 | *-slim) 101 | for p in nginx; do 102 | echo -n ' '"$p"'=${NGINX_VERSION}-'"$r"'${PKG_RELEASE} \\' 103 | done 104 | ;; 105 | *) 106 | for p in nginx; do 107 | echo -n ' '"$p"'=${NGINX_VERSION}-'"$r"'${PKG_RELEASE} \\\n' 108 | done 109 | for p in nginx-module-xslt nginx-module-geoip nginx-module-image-filter $perl; do 110 | echo -n ' '"$p"'=${NGINX_VERSION}-'"$r"'${DYNPKG_RELEASE} \\\n' 111 | done 112 | for p in nginx-module-njs; do 113 | echo -n ' '"$p"'=${NGINX_VERSION}'"$sep"'${NJS_VERSION}-'"$r"'${NJS_RELEASE} \\' 114 | done 115 | ;; 116 | esac 117 | } 118 | 119 | get_packagerepo() { 120 | local distro="${1%-perl}" 121 | distro="${distro%-slim}" 122 | shift 123 | local branch="$1" 124 | shift 125 | 126 | [ "$branch" = "mainline" ] && branch="$branch/" || branch="" 127 | 128 | echo "https://nginx.org/packages/${branch}${distro}/" 129 | } 130 | 131 | get_packagever() { 132 | local distro="${1%-perl}" 133 | shift 134 | local branch="$1" 135 | shift 136 | local package="$1" 137 | shift 138 | local suffix= 139 | 140 | [ "${distro}" = "debian" ] && suffix="~${debianver}" 141 | 142 | case "${package}" in 143 | "njs") 144 | echo ${njspkg[$branch]}${suffix} 145 | ;; 146 | "dyn") 147 | echo ${dynpkg[$branch]}${suffix} 148 | ;; 149 | *) 150 | echo ${pkg[$branch]}${suffix} 151 | ;; 152 | esac 153 | } 154 | 155 | get_buildtarget() { 156 | local distro="$1" 157 | case "$distro" in 158 | alpine-slim) 159 | echo base 160 | ;; 161 | alpine) 162 | echo module-geoip module-image-filter module-njs module-xslt 163 | ;; 164 | debian) 165 | echo base module-geoip module-image-filter module-njs module-xslt 166 | ;; 167 | *-perl) 168 | echo module-perl 169 | ;; 170 | esac 171 | } 172 | 173 | generated_warning() { 174 | cat <<__EOF__ 175 | # 176 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "update.sh" 177 | # 178 | # PLEASE DO NOT EDIT IT DIRECTLY. 179 | # 180 | __EOF__ 181 | } 182 | 183 | for branch in "${branches[@]}"; do 184 | for variant in \ 185 | alpine{,-perl,-slim} \ 186 | debian{,-perl}; do 187 | echo "$branch: $variant dockerfiles" 188 | dir="$branch/$variant" 189 | variant="$(basename "$variant")" 190 | 191 | [ -d "$dir" ] || continue 192 | 193 | template="Dockerfile-${variant%}.template" 194 | { 195 | generated_warning 196 | cat "$template" 197 | } >"$dir/Dockerfile" 198 | 199 | debianver="${debian[$branch]}" 200 | alpinever="${alpine[$branch]}" 201 | nginxver="${nginx[$branch]}" 202 | njsver="${njs[${branch}]}" 203 | revver="${rev[${branch}]}" 204 | pkgosschecksumver="${pkgosschecksum[${branch}]}" 205 | 206 | packagerepo=$(get_packagerepo "$variant" "$branch") 207 | packages=$(get_packages "$variant" "$branch") 208 | packagever=$(get_packagever "$variant" "$branch" "any") 209 | njspkgver=$(get_packagever "$variant" "$branch" "njs") 210 | dynpkgver=$(get_packagever "$variant" "$branch" "dyn") 211 | buildtarget=$(get_buildtarget "$variant") 212 | 213 | gsed -i \ 214 | -e 's,%%ALPINE_VERSION%%,'"$alpinever"',' \ 215 | -e 's,%%DEBIAN_VERSION%%,'"$debianver"',' \ 216 | -e 's,%%DYNPKG_RELEASE%%,'"$dynpkgver"',' \ 217 | -e 's,%%NGINX_VERSION%%,'"$nginxver"',' \ 218 | -e 's,%%NJS_VERSION%%,'"$njsver"',' \ 219 | -e 's,%%NJS_RELEASE%%,'"$njspkgver"',' \ 220 | -e 's,%%PKG_RELEASE%%,'"$packagever"',' \ 221 | -e 's,%%PACKAGES%%,'"$packages"',' \ 222 | -e 's,%%PACKAGEREPO%%,'"$packagerepo"',' \ 223 | -e 's,%%REVISION%%,'"$revver"',' \ 224 | -e 's,%%PKGOSSCHECKSUM%%,'"$pkgosschecksumver"',' \ 225 | -e 's,%%BUILDTARGET%%,'"$buildtarget"',' \ 226 | "$dir/Dockerfile" 227 | 228 | done 229 | 230 | for variant in \ 231 | alpine-slim \ 232 | debian; do \ 233 | echo "$branch: $variant entrypoint scripts" 234 | dir="$branch/$variant" 235 | cp -a entrypoint/*.sh "$dir/" 236 | cp -a entrypoint/*.envsh "$dir/" 237 | done 238 | done 239 | --------------------------------------------------------------------------------