├── .dockerignore ├── .empty ├── .github ├── dependabot.yml ├── no-response.yml └── workflows │ ├── bump-graylog.yml │ └── datanode-dev-image.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.j2 ├── README.md ├── build └── fetch-and-extract.sh ├── config ├── graylog.conf └── log4j2.xml ├── docker-entrypoint.sh ├── docker ├── datanode │ ├── Dockerfile │ ├── README.md │ ├── entrypoint.sh │ └── hooks │ │ └── build ├── enterprise │ ├── Dockerfile │ └── hooks │ │ └── build ├── forwarder │ ├── Dockerfile │ ├── forwarder-entrypoint.sh │ └── hooks │ │ └── build └── oss │ ├── Dockerfile │ └── hooks │ └── build ├── health_check.sh ├── jenkins.groovy ├── patches └── graylog-server.conf.patch ├── release.py ├── requirements.txt ├── test ├── docker-compose.tpl ├── input-raw-tcp.json ├── input-syslog-tcp.json ├── integration_test.sh ├── linter.sh └── permissions-dashboard.json └── version.yml /.dockerignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | hooks/ 3 | test/ 4 | /venv/ 5 | -------------------------------------------------------------------------------- /.empty: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Graylog2/graylog-docker/94968e4aad4acd6d0c01b74572c3f5b2c8ddd953/.empty -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: docker 4 | directory: "/docker/forwarder" 5 | schedule: 6 | interval: daily 7 | time: "11:00" 8 | open-pull-requests-limit: 10 9 | - package-ecosystem: docker 10 | directory: "/docker/enterprise" 11 | schedule: 12 | interval: daily 13 | time: "11:00" 14 | open-pull-requests-limit: 10 15 | - package-ecosystem: docker 16 | directory: "/docker/oss" 17 | schedule: 18 | interval: daily 19 | time: "11:00" 20 | open-pull-requests-limit: 10 21 | -------------------------------------------------------------------------------- /.github/no-response.yml: -------------------------------------------------------------------------------- 1 | daysUntilClose: 30 2 | responseRequiredLabel: needs-input 3 | -------------------------------------------------------------------------------- /.github/workflows/bump-graylog.yml: -------------------------------------------------------------------------------- 1 | name: "Bump Version" 2 | run-name: "Bump Version - ${{ inputs.product }} ${{ inputs.version }} (branch: ${{ inputs.branch }})" 3 | 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | branch: 8 | description: "The release branch to check out (use the same branch above!)" 9 | required: true 10 | version: 11 | description: "The new version and revision. (Example: \"6.0.0-beta.1-1\")" 12 | required: true 13 | product: 14 | description: "The product to bump" 15 | required: true 16 | type: "choice" 17 | default: "graylog" 18 | options: 19 | - "graylog" 20 | - "forwarder" 21 | 22 | defaults: 23 | run: 24 | shell: "bash" 25 | 26 | # Avoid having multiple workflows modifying the repo at the same time 27 | concurrency: "repo-write" 28 | 29 | jobs: 30 | bump: 31 | runs-on: "ubuntu-latest" 32 | 33 | steps: 34 | - name: "Checkout ${{ inputs.branch }}" 35 | uses: "actions/checkout@v4" 36 | with: 37 | ref: "${{ inputs.branch }}" 38 | token: "${{ secrets.GITHUB_TOKEN }}" 39 | 40 | - name: "Install dependencies" 41 | run: "pip3 install -r requirements.txt" 42 | 43 | - name: "Bump ${{ inputs.product }} version to ${{ inputs.version }}" 44 | run: "./release.py --bump ${{ inputs.product }} --version ${{ inputs.version }}" 45 | 46 | - name: "Generate README" 47 | run: "./release.py --generate-readme" 48 | 49 | - name: "Commit and push" 50 | run: | 51 | git config user.name "github-actions[bot]" 52 | git config user.email "github-actions[bot]@users.noreply.github.com" 53 | 54 | git add version.yml README.md 55 | git commit -m "Bump to ${{ inputs.version }}" 56 | if [ "${{ inputs.product }}" = "forwarder" ]; then 57 | git tag -m "Tag forwarder-${{ inputs.version }}" "forwarder-${{ inputs.version }}" 58 | else 59 | git tag -m "Tag ${{ inputs.version }}" "${{ inputs.version }}" 60 | fi 61 | git push origin "${{ inputs.branch }}" 62 | git push --tags 63 | -------------------------------------------------------------------------------- /.github/workflows/datanode-dev-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Data Node DEV Image" 3 | 4 | on: 5 | workflow_dispatch: 6 | 7 | jobs: 8 | build: 9 | name: "Build" 10 | 11 | runs-on: "ubuntu-latest" 12 | 13 | steps: 14 | - uses: "actions/checkout@v3" 15 | 16 | - name: "Set up QEMU" 17 | uses: "docker/setup-qemu-action@v2" 18 | 19 | - name: "Set up Docker Buildx" 20 | uses: "docker/setup-buildx-action@v2" 21 | 22 | - name: "Get build args" 23 | shell: "bash" 24 | run: | 25 | for arch in x64 aarch64; do 26 | curl -o manifest-linux-${arch}.json -fsSL \ 27 | -G -d limit=1 -d artifact=graylog-datanode-linux-${arch} \ 28 | https://downloads.graylog.org/nightly-builds 29 | done 30 | 31 | echo "SNAPSHOT_URL_X64=$(jq -r '.artifacts[].url' manifest-linux-x64.json)" | tee -a "$GITHUB_ENV" 32 | echo "SNAPSHOT_URL_AARCH64=$(jq -r '.artifacts[].url' manifest-linux-aarch64.json)" | tee -a "$GITHUB_ENV" 33 | echo "SNAPSHOT_VERSION=$(jq -r '.artifacts[].version' manifest-linux-x64.json)" | tee -a "$GITHUB_ENV" 34 | echo "BUILD_DATE=$(TZ=UTC date '+%FT%T%Z')" | tee -a "$GITHUB_ENV" 35 | echo "VCS_REF=$(git rev-parse HEAD)" | tee -a "$GITHUB_ENV" 36 | 37 | - name: "Login to Docker Hub" 38 | uses: "docker/login-action@v2" 39 | with: 40 | username: "${{ secrets.DOCKERHUB_USERNAME }}" 41 | password: "${{ secrets.DOCKERHUB_PASSWORD }}" 42 | 43 | - name: "Build and push image" 44 | uses: "docker/build-push-action@v4" 45 | with: 46 | context: "." 47 | file: "docker/datanode/Dockerfile" 48 | platforms: "linux/amd64,linux/arm64" 49 | pull: true 50 | push: true 51 | tags: "graylog/graylog-datanode:5.2-dev" 52 | build-args: | 53 | SNAPSHOT_URL_X64=${{ env.SNAPSHOT_URL_X64 }} 54 | SNAPSHOT_URL_AARCH64=${{ env.SNAPSHOT_URL_AARCH64 }} 55 | GRAYLOG_VERSION=${{ env.SNAPSHOT_VERSION }} 56 | BUILD_DATE=${{ env.BUILD_DATE }} 57 | VCS_REF=${{ env.VCS_REF }} 58 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | test/.env 3 | 4 | test/docker-compose.yml 5 | cert.pem 6 | /venv/ 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | default: docker_build 2 | 3 | docker_build: 4 | cd docker/oss && IMAGE_NAME=graylog hooks/build 5 | cd docker/enterprise && IMAGE_NAME=graylog-enterprise hooks/build 6 | cd docker/forwarder && IMAGE_NAME=graylog-forwarder hooks/build 7 | cd docker/datanode && IMAGE_NAME=graylog-datanode hooks/build 8 | 9 | linter: 10 | @test/linter.sh 11 | 12 | integration_test: 13 | @test/integration_test.sh 14 | 15 | test: linter integration_test 16 | -------------------------------------------------------------------------------- /README.j2: -------------------------------------------------------------------------------- 1 | # Graylog Docker Image 2 | 3 | [![Docker Stars](https://img.shields.io/docker/stars/graylog/graylog.svg)][hub] [![Docker Pulls](https://img.shields.io/docker/pulls/graylog/graylog.svg)][hub] 4 | 5 | [hub]: https://hub.docker.com/r/graylog/graylog/ 6 | 7 | The latest stable version of Graylog is **`{{ graylog.major_version }}.{{ graylog.minor_version }}.{{ graylog.patch_version }}`**. 8 | 9 | ## What is Graylog? 10 | 11 | Graylog is a centralized logging solution that enables aggregating and searching through logs. It provides a powerful query language, a processing pipeline for data transformation, alerting abilities, and much more. It is fully extensible through a REST API. Add-ons can be downloaded from the [Graylog Marketplace](https://marketplace.graylog.org/). 12 | 13 | 14 | ## Image Details 15 | 16 | There are images for the `linux/amd64` and `linux/arm64` platforms available. All images are based on the latest [Eclipse Temurin image](https://hub.docker.com/_/eclipse-temurin) (JRE + Ubuntu LTS variant) available at build time. 17 | 18 | #### `graylog/graylog` 19 | 20 | This is the open source [Graylog ](https://hub.docker.com/r/graylog/graylog/) image. It contains [Graylog](https://github.com/Graylog2/graylog2-server) as well as the [Integrations](https://docs.graylog.org/docs/integrations) plugin. 21 | 22 | | Java Version | Platform | Tags | 23 | |---|---|---| 24 | {% if 'alpha' in graylog.patch_version or 'beta' in graylog.patch_version or 'rc' in graylog.patch_version -%} 25 | | OpenJDK 17 | `linux/amd64`, `linux/arm64` | `{{ graylog.major_version }}.{{ graylog.minor_version }}.{{ graylog.patch_version }}-{{ graylog.release }}` | 26 | {% else -%} 27 | | OpenJDK 17 | `linux/amd64`, `linux/arm64` | `{{ graylog.major_version }}.{{ graylog.minor_version }}`, `{{ graylog.major_version }}.{{ graylog.minor_version }}.{{ graylog.patch_version }}`, `{{ graylog.major_version }}.{{ graylog.minor_version }}.{{ graylog.patch_version }}-{{ graylog.release }}` | 28 | {% endif %} 29 | 30 | > Note: There is no 'latest' tag. You'll need to specify which version you want. 31 | 32 | #### `graylog/graylog-enterprise` 33 | 34 | This is the [Graylog Enterprise](https://hub.docker.com/r/graylog/graylog-enterprise/) image. It contains [Graylog](https://github.com/Graylog2/graylog2-server), the [Graylog Enterprise](https://docs.graylog.org/docs/intro) plugin, the [Integrations](https://docs.graylog.org/docs/integrations) plugin, and the Enterprise Integrations plugin. 35 | 36 | | Java Version | Platform | Tags | 37 | |---|---|---| 38 | {% if 'alpha' in graylog.patch_version or 'beta' in graylog.patch_version or 'rc' in graylog.patch_version -%} 39 | | OpenJDK 17 | `linux/amd64`, `linux/arm64` | `{{ graylog.major_version }}.{{ graylog.minor_version }}.{{ graylog.patch_version }}-{{ graylog.release }}` | 40 | {% else -%} 41 | | OpenJDK 17 | `linux/amd64`, `linux/arm64` | `{{ graylog.major_version }}.{{ graylog.minor_version }}`, `{{ graylog.major_version }}.{{ graylog.minor_version }}.{{ graylog.patch_version }}`, `{{ graylog.major_version }}.{{ graylog.minor_version }}.{{ graylog.patch_version }}-{{ graylog.release }}` | 42 | {% endif %} 43 | 44 | 45 | #### `graylog/graylog-forwarder` 46 | 47 | This image runs the [Graylog Forwarder](https://hub.docker.com/r/graylog/graylog-forwarder/). Documentation on the Forwarder can be found [here](https://docs.graylog.org/docs/forwarder). 48 | 49 | The latest stable version is **`{{ forwarder.version }}`**, with support for Java 17 on platform `linux/amd64` and `linux/arm64`. 50 | 51 | | Java Version | Platform | Tags | 52 | |---|---|---| 53 | | OpenJDK 17 | `linux/amd64`, `linux/arm64` | `{{ forwarder.version }}`, `forwarder-{{ forwarder.version }}-{{ forwarder.release }}` | 54 | 55 | 56 | ## Architecture 57 | 58 | Take a look at the minimal [Graylog architecture](https://docs.graylog.org/docs/architecture) to get the big picture of a Graylog setup. In essence, Graylog needs to talk to MongoDB to store configuration data as well as Elasticsearch to store the actual log data. 59 | 60 | 61 | ## Configuration 62 | 63 | Please refer to the [Graylog Docker documentation](https://docs.graylog.org/docs/docker) for a comprehensive overview and detailed description of the Graylog Docker image. 64 | 65 | If you want to quickly spin up an instance for testing, you can use our [Docker Compose template](https://github.com/Graylog2/docker-compose). 66 | 67 | Notably, this image **requires** that two important configuration options be set (although in practice you will likely need to set more): 68 | 1. `password_secret` (environment variable `GRAYLOG_PASSWORD_SECRET`) 69 | * A secret that is used for password encryption and salting. 70 | * Must be at least 16 characters, however using at least 64 characters is strongly recommended. 71 | * Must be the same on all Graylog nodes in the cluster. 72 | * May be generated with something like: `pwgen -N 1 -s 96` 73 | 2. `root_password_sha2` (environment variable `GRAYLOG_ROOT_PASSWORD_SHA2`) 74 | * A SHA2 hash of a password you will use for your initial login as Graylog's root user. 75 | * The default username is `admin`. This value is customizable via configuration option `root_username` (environment variable `GRAYLOG_ROOT_USERNAME`). 76 | * In general, these credentials will only be needed to initially set up the system or reconfigure the system in the event of an authentication backend failure. 77 | * This password cannot be changed using the API or via the Web interface. 78 | * May be generated with something like: `echo -n "Enter Password: " && head -1 Note: There is no 'latest' tag. You'll need to specify which version you want. 28 | 29 | #### `graylog/graylog-enterprise` 30 | 31 | This is the [Graylog Enterprise](https://hub.docker.com/r/graylog/graylog-enterprise/) image. It contains [Graylog](https://github.com/Graylog2/graylog2-server), the [Graylog Enterprise](https://docs.graylog.org/docs/intro) plugin, the [Integrations](https://docs.graylog.org/docs/integrations) plugin, and the Enterprise Integrations plugin. 32 | 33 | | Java Version | Platform | Tags | 34 | |---|---|---| 35 | | OpenJDK 17 | `linux/amd64`, `linux/arm64` | `6.1`, `6.1.0`, `6.1.0-1` | 36 | 37 | 38 | 39 | #### `graylog/graylog-forwarder` 40 | 41 | This image runs the [Graylog Forwarder](https://hub.docker.com/r/graylog/graylog-forwarder/). Documentation on the Forwarder can be found [here](https://docs.graylog.org/docs/forwarder). 42 | 43 | The latest stable version is **`6.1`**, with support for Java 17 on platform `linux/amd64` and `linux/arm64`. 44 | 45 | | Java Version | Platform | Tags | 46 | |---|---|---| 47 | | OpenJDK 17 | `linux/amd64`, `linux/arm64` | `6.1`, `forwarder-6.1-1` | 48 | 49 | 50 | ## Architecture 51 | 52 | Take a look at the minimal [Graylog architecture](https://docs.graylog.org/docs/architecture) to get the big picture of a Graylog setup. In essence, Graylog needs to talk to MongoDB to store configuration data as well as Elasticsearch to store the actual log data. 53 | 54 | 55 | ## Configuration 56 | 57 | Please refer to the [Graylog Docker documentation](https://docs.graylog.org/docs/docker) for a comprehensive overview and detailed description of the Graylog Docker image. 58 | 59 | If you want to quickly spin up an instance for testing, you can use our [Docker Compose template](https://github.com/Graylog2/docker-compose). 60 | 61 | Notably, this image **requires** that two important configuration options be set (although in practice you will likely need to set more): 62 | 1. `password_secret` (environment variable `GRAYLOG_PASSWORD_SECRET`) 63 | * A secret that is used for password encryption and salting. 64 | * Must be at least 16 characters, however using at least 64 characters is strongly recommended. 65 | * Must be the same on all Graylog nodes in the cluster. 66 | * May be generated with something like: `pwgen -N 1 -s 96` 67 | 2. `root_password_sha2` (environment variable `GRAYLOG_ROOT_PASSWORD_SHA2`) 68 | * A SHA2 hash of a password you will use for your initial login as Graylog's root user. 69 | * The default username is `admin`. This value is customizable via configuration option `root_username` (environment variable `GRAYLOG_ROOT_USERNAME`). 70 | * In general, these credentials will only be needed to initially set up the system or reconfigure the system in the event of an authentication backend failure. 71 | * This password cannot be changed using the API or via the Web interface. 72 | * May be generated with something like: `echo -n "Enter Password: " && head -1 Configuration > Index Set Defaults. 277 | # The following settings are used to initialize in-database defaults on the first Graylog server startup. 278 | # Specify these values if you want the Graylog server and indices to start with specific settings. 279 | 280 | # The prefix for the Default Graylog index set. 281 | # 282 | #elasticsearch_index_prefix = graylog 283 | 284 | # The name of the index template for the Default Graylog index set. 285 | # 286 | #elasticsearch_template_name = graylog-internal 287 | 288 | # The prefix for the for graylog event indices. 289 | # 290 | #default_events_index_prefix = gl-events 291 | 292 | # The prefix for graylog system event indices. 293 | # 294 | #default_system_events_index_prefix = gl-system-events 295 | 296 | # Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea. 297 | # All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom 298 | # Elasticsearch documentation: https://www.elastic.co/guide/en/elasticsearch/reference/2.3/analysis.html 299 | # Note that this setting only takes effect on newly created indices. 300 | # 301 | #elasticsearch_analyzer = standard 302 | 303 | # How many Elasticsearch shards and replicas should be used per index? 304 | # 305 | #elasticsearch_shards = 1 306 | #elasticsearch_replicas = 0 307 | 308 | # Maximum number of attempts to connect to datanode on boot. 309 | # Default: 0, retry indefinitely with the given delay until a connection could be established 310 | #datanode_startup_connection_attempts = 5 311 | 312 | # Waiting time in between connection attempts for datanode_startup_connection_attempts 313 | # 314 | # Default: 5s 315 | # datanode_startup_connection_delay = 5s 316 | 317 | # Disable the optimization of Elasticsearch indices after index cycling. This may take some load from Elasticsearch 318 | # on heavily used systems with large indices, but it will decrease search performance. The default is to optimize 319 | # cycled indices. 320 | # 321 | #disable_index_optimization = true 322 | 323 | # Optimize the index down to <= index_optimization_max_num_segments. A higher number may take some load from Elasticsearch 324 | # on heavily used systems with large indices, but it will decrease search performance. The default is 1. 325 | # 326 | #index_optimization_max_num_segments = 1 327 | 328 | # Time interval to trigger a full refresh of the index field types for all indexes. This will query ES for all indexes 329 | # and populate any missing field type information to the database. 330 | # 331 | #index_field_type_periodical_full_refresh_interval = 5m 332 | 333 | # You can configure the default strategy used to determine when to rotate the currently active write index. 334 | # Multiple rotation strategies are supported, the default being "time-size-optimizing": 335 | # - "time-size-optimizing" tries to rotate daily, while focussing on optimal sized shards. 336 | # The global default values can be configured with 337 | # "time_size_optimizing_retention_min_lifetime" and "time_size_optimizing_retention_max_lifetime". 338 | # - "count" of messages per index, use elasticsearch_max_docs_per_index below to configure 339 | # - "size" per index, use elasticsearch_max_size_per_index below to configure 340 | # - "time" interval between index rotations, use elasticsearch_max_time_per_index to configure 341 | # A strategy may be disabled by specifying the optional enabled_index_rotation_strategies list and excluding that strategy. 342 | # 343 | #enabled_index_rotation_strategies = count,size,time,time-size-optimizing 344 | 345 | # The default index rotation strategy to use. 346 | #rotation_strategy = time-size-optimizing 347 | 348 | # (Approximate) maximum number of documents in an Elasticsearch index before a new index 349 | # is being created, also see no_retention and elasticsearch_max_number_of_indices. 350 | # Configure this if you used 'rotation_strategy = count' above. 351 | # 352 | #elasticsearch_max_docs_per_index = 20000000 353 | 354 | # (Approximate) maximum size in bytes per Elasticsearch index on disk before a new index is being created, also see 355 | # no_retention and elasticsearch_max_number_of_indices. Default is 30GB. 356 | # Configure this if you used 'rotation_strategy = size' above. 357 | # 358 | #elasticsearch_max_size_per_index = 32212254720 359 | 360 | # (Approximate) maximum time before a new Elasticsearch index is being created, also see 361 | # no_retention and elasticsearch_max_number_of_indices. Default is 1 day. 362 | # Configure this if you used 'rotation_strategy = time' above. 363 | # Please note that this rotation period does not look at the time specified in the received messages, but is 364 | # using the real clock value to decide when to rotate the index! 365 | # Specify the time using a duration and a suffix indicating which unit you want: 366 | # 1w = 1 week 367 | # 1d = 1 day 368 | # 12h = 12 hours 369 | # Permitted suffixes are: d for day, h for hour, m for minute, s for second. 370 | # 371 | #elasticsearch_max_time_per_index = 1d 372 | 373 | # Controls whether empty indices are rotated. Only applies to the "time" rotation_strategy. 374 | # 375 | #elasticsearch_rotate_empty_index_set=false 376 | 377 | # Provides a hard upper limit for the retention period of any index set at configuration time. 378 | # 379 | # This setting is used to validate the value a user chooses for the maximum number of retained indexes, when configuring 380 | # an index set. However, it is only in effect, when a time-based rotation strategy is chosen. 381 | # 382 | # If a rotation strategy other than time-based is selected and/or no value is provided for this setting, no upper limit 383 | # for index retention will be enforced. This is also the default. 384 | 385 | # Default: none 386 | #max_index_retention_period = P90d 387 | 388 | # Optional upper bound on elasticsearch_max_time_per_index 389 | # 390 | #elasticsearch_max_write_index_age = 1d 391 | 392 | # Disable message retention on this node, i. e. disable Elasticsearch index rotation. 393 | #no_retention = false 394 | 395 | # Decide what happens with the oldest indices when the maximum number of indices is reached. 396 | # The following strategies are available: 397 | # - delete # Deletes the index completely (Default) 398 | # - close # Closes the index and hides it from the system. Can be re-opened later. 399 | # 400 | #retention_strategy = delete 401 | 402 | # This configuration list limits the retention strategies available for user configuration via the UI 403 | # The following strategies can be disabled: 404 | # - delete # Deletes the index completely (Default) 405 | # - close # Closes the index and hides it from the system. Can be re-opened later. 406 | # - none # No operation is performed. The index stays open. (Not recommended) 407 | # WARNING: At least one strategy must be enabled. Be careful when extending this list on existing installations! 408 | disabled_retention_strategies = none,close 409 | 410 | # How many indices do you want to keep for the delete and close retention types? 411 | # 412 | #elasticsearch_max_number_of_indices = 20 413 | 414 | # Disable checking the version of Elasticsearch for being compatible with this Graylog release. 415 | # WARNING: Using Graylog with unsupported and untested versions of Elasticsearch may lead to data loss! 416 | # 417 | #elasticsearch_disable_version_check = true 418 | 419 | # Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only 420 | # be enabled with care. See also: https://docs.graylog.org/docs/query-language 421 | allow_leading_wildcard_searches = false 422 | 423 | # Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and 424 | # should only be enabled after making sure your Elasticsearch cluster has enough memory. 425 | allow_highlighting = false 426 | 427 | # Sets field value suggestion mode. The possible values are: 428 | # 1. "off" - field value suggestions are turned off 429 | # 2. "textual_only" - field values are suggested only for textual fields 430 | # 3. "on" (default) - field values are suggested for all field types, even the types where suggestions are inefficient performance-wise 431 | field_value_suggestion_mode = on 432 | 433 | # Global timeout for index optimization (force merge) requests. 434 | # Default: 1h 435 | #elasticsearch_index_optimization_timeout = 1h 436 | 437 | # Maximum number of concurrently running index optimization (force merge) jobs. 438 | # If you are using lots of different index sets, you might want to increase that number. 439 | # This value should be set lower than elasticsearch_max_total_connections_per_route, otherwise index optimization 440 | # could deplete all the client connections to the search server and block new messages ingestion for prolonged 441 | # periods of time. 442 | # Default: 10 443 | #elasticsearch_index_optimization_jobs = 10 444 | 445 | # Mute the logging-output of ES deprecation warnings during REST calls in the ES RestClient 446 | #elasticsearch_mute_deprecation_warnings = true 447 | 448 | # Time interval for index range information cleanups. This setting defines how often stale index range information 449 | # is being purged from the database. 450 | # Default: 1h 451 | #index_ranges_cleanup_interval = 1h 452 | 453 | # Batch size for the Elasticsearch output. This is the maximum (!) number of messages the Elasticsearch output 454 | # module will get at once and write to Elasticsearch in a batch call. If the configured batch size has not been 455 | # reached within output_flush_interval seconds, everything that is available will be flushed at once. Remember 456 | # that every outputbuffer processor manages its own batch and performs its own batch write calls. 457 | # ("outputbuffer_processors" variable) 458 | output_batch_size = 500 459 | 460 | # Flush interval (in seconds) for the Elasticsearch output. This is the maximum amount of time between two 461 | # batches of messages written to Elasticsearch. It is only effective at all if your minimum number of messages 462 | # for this time period is less than output_batch_size * outputbuffer_processors. 463 | output_flush_interval = 1 464 | 465 | # As stream outputs are loaded only on demand, an output which is failing to initialize will be tried over and 466 | # over again. To prevent this, the following configuration options define after how many faults an output will 467 | # not be tried again for an also configurable amount of seconds. 468 | output_fault_count_threshold = 5 469 | output_fault_penalty_seconds = 30 470 | 471 | # Number of process buffer processors running in parallel. 472 | # By default, the value will be determined automatically based on the number of CPU cores available to the JVM, using 473 | # the formula (<#cores> * 0.36 + 0.625) rounded to the nearest integer. 474 | # Set this value explicitly to override the dynamically calculated value. Try raising the number if your buffers are 475 | # filling up. 476 | #processbuffer_processors = 5 477 | 478 | # Number of output buffer processors running in parallel. 479 | # By default, the value will be determined automatically based on the number of CPU cores available to the JVM, using 480 | # the formula (<#cores> * 0.162 + 0.625) rounded to the nearest integer. 481 | # Set this value explicitly to override the dynamically calculated value. Try raising the number if your buffers are 482 | # filling up. 483 | #outputbuffer_processors = 3 484 | 485 | # The size of the thread pool in the output buffer processor. 486 | # Default: 3 487 | #outputbuffer_processor_threads_core_pool_size = 3 488 | 489 | # UDP receive buffer size for all message inputs (e. g. SyslogUDPInput). 490 | #udp_recvbuffer_sizes = 1048576 491 | 492 | # Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping) 493 | # Possible types: 494 | # - yielding 495 | # Compromise between performance and CPU usage. 496 | # - sleeping 497 | # Compromise between performance and CPU usage. Latency spikes can occur after quiet periods. 498 | # - blocking 499 | # High throughput, low latency, higher CPU usage. 500 | # - busy_spinning 501 | # Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores. 502 | processor_wait_strategy = blocking 503 | 504 | # Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore. 505 | # For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache. 506 | # Must be a power of 2. (512, 1024, 2048, ...) 507 | ring_size = 65536 508 | 509 | inputbuffer_ring_size = 65536 510 | inputbuffer_wait_strategy = blocking 511 | 512 | # Number of input buffer processors running in parallel. 513 | #inputbuffer_processors = 2 514 | 515 | # Manually stopped inputs are no longer auto-restarted. To re-enable the previous behavior, set auto_restart_inputs to true. 516 | #auto_restart_inputs = true 517 | 518 | # Enable the message journal. 519 | message_journal_enabled = true 520 | 521 | # The directory which will be used to store the message journal. The directory must be exclusively used by Graylog and 522 | # must not contain any other files than the ones created by Graylog itself. 523 | # 524 | # ATTENTION: 525 | # If you create a separate partition for the journal files and use a file system creating directories like 'lost+found' 526 | # in the root directory, you need to create a sub directory for your journal. 527 | # Otherwise Graylog will log an error message that the journal is corrupt and Graylog will not start. 528 | message_journal_dir = data/journal 529 | 530 | # Journal hold messages before they could be written to Elasticsearch. 531 | # For a maximum of 12 hours or 5 GB whichever happens first. 532 | # During normal operation the journal will be smaller. 533 | #message_journal_max_age = 12h 534 | #message_journal_max_size = 5gb 535 | 536 | #message_journal_flush_age = 1m 537 | #message_journal_flush_interval = 1000000 538 | #message_journal_segment_age = 1h 539 | #message_journal_segment_size = 100mb 540 | 541 | # Number of threads used exclusively for dispatching internal events. Default is 2. 542 | #async_eventbus_processors = 2 543 | 544 | # How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual 545 | # shutdown process. Set to 0 if you have no status checking load balancers in front. 546 | lb_recognition_period_seconds = 3 547 | 548 | # Journal usage percentage that triggers requesting throttling for this server node from load balancers. The feature is 549 | # disabled if not set. 550 | #lb_throttle_threshold_percentage = 95 551 | 552 | # Every message is matched against the configured streams and it can happen that a stream contains rules which 553 | # take an unusual amount of time to run, for example if its using regular expressions that perform excessive backtracking. 554 | # This will impact the processing of the entire server. To keep such misbehaving stream rules from impacting other 555 | # streams, Graylog limits the execution time for each stream. 556 | # The default values are noted below, the timeout is in milliseconds. 557 | # If the stream matching for one stream took longer than the timeout value, and this happened more than "max_faults" times 558 | # that stream is disabled and a notification is shown in the web interface. 559 | #stream_processing_timeout = 2000 560 | #stream_processing_max_faults = 3 561 | 562 | # Since 0.21 the Graylog server supports pluggable output modules. This means a single message can be written to multiple 563 | # outputs. The next setting defines the timeout for a single output module, including the default output module where all 564 | # messages end up. 565 | # 566 | # Time in milliseconds to wait for all message outputs to finish writing a single message. 567 | #output_module_timeout = 10000 568 | 569 | # Time in milliseconds after which a detected stale leader node is being rechecked on startup. 570 | #stale_leader_timeout = 2000 571 | 572 | # Time in milliseconds which Graylog is waiting for all threads to stop on shutdown. 573 | #shutdown_timeout = 30000 574 | 575 | # MongoDB connection string 576 | # See https://docs.mongodb.com/manual/reference/connection-string/ for details 577 | mongodb_uri = mongodb://mongo/graylog 578 | 579 | # Authenticate against the MongoDB server 580 | # '+'-signs in the username or password need to be replaced by '%2B' 581 | #mongodb_uri = mongodb://grayloguser:secret@localhost:27017/graylog 582 | 583 | # Use a replica set instead of a single host 584 | #mongodb_uri = mongodb://grayloguser:secret@localhost:27017,localhost:27018,localhost:27019/graylog?replicaSet=rs01 585 | 586 | # DNS Seedlist https://docs.mongodb.com/manual/reference/connection-string/#dns-seedlist-connection-format 587 | #mongodb_uri = mongodb+srv://server.example.org/graylog 588 | 589 | # Increase this value according to the maximum connections your MongoDB server can handle from a single client 590 | # if you encounter MongoDB connection problems. 591 | mongodb_max_connections = 1000 592 | 593 | # Maximum number of attempts to connect to MongoDB on boot for the version probe. 594 | # 595 | # Default: 0, retry indefinitely until a connection can be established 596 | #mongodb_version_probe_attempts = 5 597 | 598 | # Email transport 599 | #transport_email_enabled = false 600 | #transport_email_hostname = mail.example.com 601 | #transport_email_port = 587 602 | #transport_email_use_auth = true 603 | #transport_email_auth_username = you@example.com 604 | #transport_email_auth_password = secret 605 | #transport_email_from_email = graylog@example.com 606 | #transport_email_socket_connection_timeout = 10s 607 | #transport_email_socket_timeout = 10s 608 | 609 | # Encryption settings 610 | # 611 | # ATTENTION: 612 | # Using SMTP with STARTTLS *and* SMTPS at the same time is *not* possible. 613 | 614 | # Use SMTP with STARTTLS, see https://en.wikipedia.org/wiki/Opportunistic_TLS 615 | #transport_email_use_tls = true 616 | 617 | # Use SMTP over SSL (SMTPS), see https://en.wikipedia.org/wiki/SMTPS 618 | # This is deprecated on most SMTP services! 619 | #transport_email_use_ssl = false 620 | 621 | 622 | # Specify and uncomment this if you want to include links to the stream in your stream alert mails. 623 | # This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users. 624 | #transport_email_web_interface_url = https://graylog.example.com 625 | 626 | # The default connect timeout for outgoing HTTP connections. 627 | # Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds). 628 | # Default: 5s 629 | #http_connect_timeout = 5s 630 | 631 | # The default read timeout for outgoing HTTP connections. 632 | # Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds). 633 | # Default: 10s 634 | #http_read_timeout = 10s 635 | 636 | # The default write timeout for outgoing HTTP connections. 637 | # Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds). 638 | # Default: 10s 639 | #http_write_timeout = 10s 640 | 641 | # HTTP proxy for outgoing HTTP connections 642 | # ATTENTION: If you configure a proxy, make sure to also configure the "http_non_proxy_hosts" option so internal 643 | # HTTP connections with other nodes does not go through the proxy. 644 | # Examples: 645 | # - http://proxy.example.com:8123 646 | # - http://username:password@proxy.example.com:8123 647 | #http_proxy_uri = 648 | 649 | # A list of hosts that should be reached directly, bypassing the configured proxy server. 650 | # This is a list of patterns separated by ",". The patterns may start or end with a "*" for wildcards. 651 | # Any host matching one of these patterns will be reached through a direct connection instead of through a proxy. 652 | # Examples: 653 | # - localhost,127.0.0.1 654 | # - 10.0.*,*.example.com 655 | #http_non_proxy_hosts = 656 | 657 | # Connection timeout for a configured LDAP server (e. g. ActiveDirectory) in milliseconds. 658 | #ldap_connection_timeout = 2000 659 | 660 | # Disable the use of a native system stats collector (currently OSHI) 661 | #disable_native_system_stats_collector = false 662 | 663 | # The default cache time for dashboard widgets. (Default: 10 seconds, minimum: 1 second) 664 | #dashboard_widget_default_cache_time = 10s 665 | 666 | # For some cluster-related REST requests, the node must query all other nodes in the cluster. This is the maximum number 667 | # of threads available for this. Increase it, if '/cluster/*' requests take long to complete. 668 | # Should be http_thread_pool_size * average_cluster_size if you have a high number of concurrent users. 669 | #proxied_requests_thread_pool_size = 64 670 | 671 | # The default HTTP call timeout for cluster-related REST requests. This timeout might be overriden for some 672 | # resources in code or other configuration values. (some cluster metrics resources use a lower timeout) 673 | #proxied_requests_default_call_timeout = 5s 674 | 675 | # The server is writing processing status information to the database on a regular basis. This setting controls how 676 | # often the data is written to the database. 677 | # Default: 1s (cannot be less than 1s) 678 | #processing_status_persist_interval = 1s 679 | 680 | # Configures the threshold for detecting outdated processing status records. Any records that haven't been updated 681 | # in the configured threshold will be ignored. 682 | # Default: 1m (one minute) 683 | #processing_status_update_threshold = 1m 684 | 685 | # Configures the journal write rate threshold for selecting processing status records. Any records that have a lower 686 | # one minute rate than the configured value might be ignored. (dependent on number of messages in the journal) 687 | # Default: 1 688 | #processing_status_journal_write_rate_threshold = 1 689 | 690 | # Automatically load content packs in "content_packs_dir" on the first start of Graylog. 691 | #content_packs_loader_enabled = false 692 | 693 | # The directory which contains content packs which should be loaded on the first start of Graylog. 694 | # Default: /contentpacks 695 | #content_packs_dir = data/contentpacks 696 | 697 | # A comma-separated list of content packs (files in "content_packs_dir") which should be applied on 698 | # the first start of Graylog. 699 | # Default: empty 700 | #content_packs_auto_install = grok-patterns.json 701 | 702 | # The allowed TLS protocols for system wide TLS enabled servers. (e.g. message inputs, http interface) 703 | # Setting this to an empty value, leaves it up to system libraries and the used JDK to chose a default. 704 | # Default: TLSv1.2,TLSv1.3 (might be automatically adjusted to protocols supported by the JDK) 705 | #enabled_tls_protocols = TLSv1.2,TLSv1.3 706 | 707 | # Enable Prometheus exporter HTTP server. 708 | # Default: false 709 | #prometheus_exporter_enabled = false 710 | 711 | # IP address and port for the Prometheus exporter HTTP server. 712 | # Default: 127.0.0.1:9833 713 | #prometheus_exporter_bind_address = 127.0.0.1:9833 714 | 715 | # Path to the Prometheus exporter core mapping file. If this option is enabled, the full built-in core mapping is 716 | # replaced with the mappings in this file. 717 | # This file is monitored for changes and updates will be applied at runtime. 718 | # Default: none 719 | #prometheus_exporter_mapping_file_path_core = prometheus-exporter-mapping-core.yml 720 | 721 | # Path to the Prometheus exporter custom mapping file. If this option is enabled, the mappings in this file are 722 | # configured in addition to the built-in core mappings. The mappings in this file cannot overwrite any core mappings. 723 | # This file is monitored for changes and updates will be applied at runtime. 724 | # Default: none 725 | #prometheus_exporter_mapping_file_path_custom = prometheus-exporter-mapping-custom.yml 726 | 727 | # Configures the refresh interval for the monitored Prometheus exporter mapping files. 728 | # Default: 60s 729 | #prometheus_exporter_mapping_file_refresh_interval = 60s 730 | 731 | # Optional allowed paths for Graylog data files. If provided, certain operations in Graylog will only be permitted 732 | # if the data file(s) are located in the specified paths (for example, with the CSV File lookup adapter). 733 | # All subdirectories of indicated paths are allowed by default. This Provides an additional layer of security, 734 | # and allows administrators to control where in the file system Graylog users can select files from. 735 | #allowed_auxiliary_paths = /etc/graylog/data-files,/etc/custom-allowed-path 736 | 737 | # Do not perform any preflight checks when starting Graylog 738 | # Default: false 739 | #skip_preflight_checks = false 740 | 741 | # Ignore any exceptions encountered when running migrations 742 | # Use with caution - skipping failing migrations may result in an inconsistent DB state. 743 | # Default: false 744 | #ignore_migration_failures = false 745 | 746 | # Comma-separated list of notifcation types which should not emit a system event. 747 | # Default: SIDECAR_STATUS_UNKNOWN which would create a new event whenever the status of a sidecar becomes "Unknown" 748 | #system_event_excluded_types = SIDECAR_STATUS_UNKNOWN 749 | 750 | # RSS settings for content stream 751 | #content_stream_rss_url = https://www.graylog.org/post 752 | #content_stream_refresh_interval = 7d 753 | 754 | # Maximum value that can be set for an event limit. 755 | # Default: 1000 756 | #event_definition_max_event_limit = 1000 757 | -------------------------------------------------------------------------------- /config/log4j2.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # save the settings over the docker(-compose) environment 6 | __GRAYLOG_SERVER_JAVA_OPTS=${GRAYLOG_SERVER_JAVA_OPTS} 7 | 8 | # shellcheck disable=SC1091 9 | source /etc/profile 10 | 11 | # and add the previous saved settings to our defaults 12 | if [[ ! -z ${__GRAYLOG_SERVER_JAVA_OPTS} ]] 13 | then 14 | echo "adding environment opts" 15 | GRAYLOG_SERVER_JAVA_OPTS="${GRAYLOG_SERVER_JAVA_OPTS} ${__GRAYLOG_SERVER_JAVA_OPTS}" 16 | export GRAYLOG_SERVER_JAVA_OPTS 17 | fi 18 | 19 | # Convert all environment variables with names ending in __FILE into the content of 20 | # the file that they point at and use the name without the trailing __FILE. 21 | # This can be used to carry in Docker secrets. 22 | for VAR_NAME in $(env | grep '^GRAYLOG_[^=]\+__FILE=.\+' | sed -r 's/^(GRAYLOG_[^=]*)__FILE=.*/\1/g'); do 23 | VAR_NAME_FILE="${VAR_NAME}__FILE" 24 | if [ "${!VAR_NAME}" ]; then 25 | echo >&2 "ERROR: Both ${VAR_NAME} and ${VAR_NAME_FILE} are set but are exclusive" 26 | exit 1 27 | fi 28 | VAR_FILENAME="${!VAR_NAME_FILE}" 29 | echo "Getting secret ${VAR_NAME} from ${VAR_FILENAME}" 30 | if [ ! -r "${VAR_FILENAME}" ]; then 31 | echo >&2 "ERROR: ${VAR_FILENAME} does not exist or is not readable" 32 | exit 1 33 | fi 34 | export "${VAR_NAME}"="$(< "${VAR_FILENAME}")" 35 | unset "${VAR_NAME_FILE}" 36 | done 37 | 38 | 39 | # Delete outdated PID file 40 | [[ -e /tmp/graylog.pid ]] && rm --force /tmp/graylog.pid 41 | 42 | # check if we are inside kubernetes, Graylog should be run as statefulset and $POD_NAME env var should be defined like this 43 | # env: 44 | # - name: POD_NAME 45 | # valueFrom: 46 | # fieldRef: 47 | # fieldPath: metadata.name 48 | # First stateful member is having pod name ended with -0, so 49 | if [[ ! -z "${POD_NAME}" ]] 50 | then 51 | if echo "${POD_NAME}" | grep "\\-0$" >/dev/null 52 | then 53 | export GRAYLOG_IS_LEADER="true" 54 | else 55 | export GRAYLOG_IS_LEADER="false" 56 | fi 57 | fi 58 | 59 | # check if we are inside a nomad cluster 60 | # First member is having alloc-index 0, so 61 | if [[ ! -z "${NOMAD_ALLOC_INDEX}" ]]; then 62 | if [ ${NOMAD_ALLOC_INDEX} == 0 ]; then 63 | export GRAYLOG_IS_LEADER="true" 64 | else 65 | export GRAYLOG_IS_LEADER="false" 66 | fi 67 | fi 68 | 69 | # Merge plugin dirs to allow mounting of /plugin as a volume 70 | export GRAYLOG_PLUGIN_DIR=${GRAYLOG_HOME}/plugins-merged 71 | rm -f ${GRAYLOG_PLUGIN_DIR}/* 72 | find ${GRAYLOG_HOME}/plugins-default/ -type f -exec cp {} ${GRAYLOG_PLUGIN_DIR}/ \; 73 | find ${GRAYLOG_HOME}/plugin ! -readable -prune -o -type f -a -readable -exec cp {} ${GRAYLOG_PLUGIN_DIR}/ \; 74 | 75 | 76 | setup() { 77 | # Create data directories 78 | for d in journal log plugin config contentpacks 79 | do 80 | dir=${GRAYLOG_HOME}/data/${d} 81 | [[ -d "${dir}" ]] || mkdir -p "${dir}" 82 | 83 | if [[ "$(stat --format='%U:%G' $dir)" != 'graylog:graylog' ]] && [[ -w "$dir" ]]; then 84 | chown -R graylog:graylog "$dir" || echo "Warning can not change owner to graylog:graylog" 85 | fi 86 | done 87 | } 88 | 89 | graylog() { 90 | 91 | exec "${JAVA_HOME}/bin/java" \ 92 | ${GRAYLOG_SERVER_JAVA_OPTS} \ 93 | -jar \ 94 | -Dlog4j.configurationFile="${GRAYLOG_HOME}/data/config/log4j2.xml" \ 95 | -Djava.library.path="${GRAYLOG_HOME}/lib/sigar/" \ 96 | -Dgraylog2.installation_source=docker \ 97 | "${GRAYLOG_HOME}/graylog.jar" \ 98 | "$@" \ 99 | -f "${GRAYLOG_HOME}/data/config/graylog.conf" 100 | } 101 | 102 | run() { 103 | setup 104 | 105 | # if being called without an argument assume "server" for backwards compatibility 106 | if [ $# = 0 ]; then 107 | graylog server "$@" 108 | fi 109 | 110 | graylog "$@" 111 | } 112 | 113 | run "$@" 114 | -------------------------------------------------------------------------------- /docker/datanode/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | # Build time variables - not persistent in the container image 4 | ARG GRAYLOG_VERSION 5 | ARG VCS_REF 6 | ARG BUILD_DATE 7 | ARG DOWNLOAD_URL_X64=https://downloads.graylog.org/releases/graylog-datanode/graylog-datanode-${GRAYLOG_VERSION}-linux-x64.tgz 8 | ARG DOWNLOAD_URL_AARCH64=https://downloads.graylog.org/releases/graylog-datanode/graylog-datanode-${GRAYLOG_VERSION}-linux-aarch64.tgz 9 | ARG DEBIAN_FRONTEND=noninteractive 10 | ARG TARGETPLATFORM 11 | 12 | # We default to an empty file instead of leaving LOCAL_BUILD_TGZ blank 13 | # because Docker would execute the following COPY command with a blank 14 | # value: 15 | # COPY "" "/tmp/datanode-local.tar.gz" 16 | # That creates a /tmp/graylog.tar.gz *directory* in the container with 17 | # all files from the build context. 18 | ARG LOCAL_BUILD_TGZ=.empty 19 | 20 | # Will be persistet in the container image 21 | ENV GDN_APP_ROOT=/usr/share/graylog-datanode 22 | ENV GDN_DATA_ROOT=/var/lib/graylog-datanode 23 | ENV GDN_CONFIG_DIR=/etc/graylog/datanode 24 | ENV GDN_CONFIG_FILE=$GDN_CONFIG_DIR/datanode.conf 25 | ENV GDN_FEATURE_FLAG_FILE=$GDN_CONFIG_DIR/feature-flag.conf 26 | ENV GDN_JVM_OPTIONS_FILE=$GDN_CONFIG_DIR/jvm.options 27 | ENV GDN_LOG4J_CONFIG_FILE=$GDN_CONFIG_DIR/log4j2.xml 28 | ENV GDN_GROUP=graylog 29 | ENV GDN_USER=graylog 30 | 31 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 32 | 33 | RUN groupadd -r "$GDN_GROUP" \ 34 | && useradd --no-log-init -r \ 35 | -s "/usr/sbin/nologin" \ 36 | -d "$GDN_DATA_ROOT" \ 37 | -g "$GDN_GROUP" \ 38 | "$GDN_USER" \ 39 | && install -d -o "$GDN_USER" -g "$GDN_GROUP" -m 0700 "$GDN_DATA_ROOT" \ 40 | && install -d -o "$GDN_USER" -g "$GDN_GROUP" -m 0700 "$GDN_CONFIG_DIR" \ 41 | && touch "$GDN_CONFIG_FILE" \ 42 | && chown "$GDN_USER":"$GDN_GROUP" "$GDN_CONFIG_FILE" 43 | 44 | # hadolint ignore=DL3008 45 | RUN apt-get update \ 46 | && apt-get install -y --no-install-recommends \ 47 | ca-certificates \ 48 | curl \ 49 | tini \ 50 | && apt-get clean \ 51 | && rm -rf \ 52 | /tmp/* \ 53 | /usr/share/doc/* \ 54 | /usr/share/X11 \ 55 | /var/cache/debconf/* \ 56 | /var/lib/apt/lists/* \ 57 | /var/log/* 58 | 59 | RUN install -d -o root -g root -m 0755 "$GDN_APP_ROOT" 60 | 61 | COPY "${LOCAL_BUILD_TGZ}" "/tmp/datanode-local.tar.gz" 62 | 63 | # An empty /tmp/datanode-local.tar.gz file indicates that we don't use a 64 | # custom LOCAL_BUILD_TGZ file. 65 | RUN if [ -f /tmp/datanode-local.tar.gz ] && [ -s /tmp/datanode-local.tar.gz ]; then \ 66 | mv /tmp/datanode-local.tar.gz /tmp/datanode.tar.gz; \ 67 | fi; \ 68 | if [ "${LOCAL_BUILD_TGZ}" = ".empty" ]; then \ 69 | if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ 70 | export DOWNLOAD_URL="$DOWNLOAD_URL_AARCH64"; \ 71 | else \ 72 | export DOWNLOAD_URL="$DOWNLOAD_URL_X64"; \ 73 | fi; \ 74 | curl -fsSL --retry 3 "$DOWNLOAD_URL" -o /tmp/datanode.tar.gz; \ 75 | fi; \ 76 | tar -C "$GDN_APP_ROOT" --strip-components=1 -xzf /tmp/datanode.tar.gz \ 77 | && rm -rf /tmp/datanode-local.tar.gz /tmp/datanode.tar.gz \ 78 | && mv "$GDN_APP_ROOT/config/"* "$GDN_CONFIG_DIR"/ \ 79 | && rmdir "$GDN_APP_ROOT/config" \ 80 | && chown -R "$GDN_USER":"$GDN_GROUP" "$GDN_CONFIG_DIR" \ 81 | && chown -R root:root "$GDN_APP_ROOT" 82 | 83 | RUN apt-get purge -y curl \ 84 | && apt-get autoremove -y --purge \ 85 | && rm -rf /var/log/* 86 | 87 | COPY docker/datanode/entrypoint.sh /entrypoint.sh 88 | 89 | ENTRYPOINT ["tini", "--", "/entrypoint.sh"] 90 | CMD ["graylog-datanode"] 91 | 92 | # Put at the very end to avoid rebuilding all layers for ARG changes like 93 | # BUILD_DATE, VCS_REV, and GRAYLOG_VERSION. 94 | LABEL org.opencontainers.image.authors="Graylog, Inc. " 95 | LABEL org.opencontainers.image.created="$BUILD_DATE" 96 | LABEL org.opencontainers.image.description="Container image to run the Graylog Data Node" 97 | LABEL org.opencontainers.image.documentation="https://docs.graylog.org/" 98 | LABEL org.opencontainers.image.licenses="SSPL-1.0" 99 | LABEL org.opencontainers.image.revision="$VCS_REF" 100 | LABEL org.opencontainers.image.source="https://github.com/Graylog2/graylog-docker" 101 | LABEL org.opencontainers.image.title="Graylog Data Node" 102 | LABEL org.opencontainers.image.url="https://www.graylog.org/" 103 | LABEL org.opencontainers.image.vendor="Graylog, Inc." 104 | LABEL org.opencontainers.image.version="$GRAYLOG_VERSION" 105 | -------------------------------------------------------------------------------- /docker/datanode/README.md: -------------------------------------------------------------------------------- 1 | # Graylog Data Node Docker Image 2 | 3 | [![Docker Stars](https://img.shields.io/docker/stars/graylog/graylog-datanode.svg)][hub] [![Docker Pulls](https://img.shields.io/docker/pulls/graylog/graylog-datanode.svg)][hub] 4 | 5 | [hub]: https://hub.docker.com/r/graylog/graylog/ 6 | 7 | The latest stable version of Graylog Data Node is **`5.2.0`**. 8 | 9 | ## What is Graylog Data Node? 10 | 11 | Graylog is a centralized logging solution that enables aggregating and searching through logs. 12 | The Data Node is a management component for OpenSearch to configure and adapt it for use with Graylog. 13 | It ensures that the data layer for Graylog is properly secured with certificates, provides cluster membership handling and adoption of new nodes for use with Graylog. 14 | The Data Node will make sure the appropriate OpenSearch version and necessary extensions, are in place so that Graylog can function as intended. 15 | 16 | 17 | ## Image Details 18 | 19 | There are images for the `linux/amd64` and `linux/arm64` platforms available. All images are based on the latest [Eclipse Temurin image](https://hub.docker.com/_/eclipse-temurin) (JRE + Ubuntu LTS variant) available at build time. 20 | 21 | #### `graylog/graylog-datanode` 22 | 23 | 24 | | Java Version | Platform | Tags | 25 | |---|---|---------------------------| 26 | | OpenJDK 17 | `linux/amd64`, `linux/arm64` | `5.2`, `5.2.0`, `5.2.0-1` | 27 | 28 | 29 | > Note: There is no 'latest' tag. You'll need to specify which version you want. 30 | 31 | 32 | ## Configuration 33 | 34 | Please refer to the [Graylog Docker documentation](https://docs.graylog.org/docs/docker) for a comprehensive overview and detailed description of the Graylog Docker image. 35 | 36 | If you want to quickly spin up an instance for testing, you can use our [Docker Compose template](https://github.com/Graylog2/docker-compose). 37 | 38 | Notably, this image **requires** one important configuration option to be set (although in practice you will likely need to set more): 39 | * `password_secret` (environment variable `GRAYLOG_DATANODE_PASSWORD_SECRET`) 40 | * A shared common secret with Graylog. Please refer to the Graylog docs on how to create it (and then, copy it over) 41 | 42 | Every [Graylog DataNode configuration option](https://docs.graylog.org/docs/server-conf) can be set via environment variable. To get the environment variable name for a given configuration option, simply prefix the option name with `GRAYLOG_DATANODE_` and put it all in upper case. Another option is to store the configuration file outside of the container and edit it directly. 43 | 44 | ### Docker Compose Example 45 | 46 | ```yaml 47 | --- 48 | services: 49 | graylog-datanode: 50 | # hostname: "datanode" 51 | image: "graylog/graylog-datanode:5.2" 52 | depends_on: 53 | - "mongodb" 54 | environment: 55 | GRAYLOG_DATANODE_PASSWORD_SECRET: "" 56 | GRAYLOG_DATANODE_ROOT_USERNAME: "" 57 | GRAYLOG_DATANODE_MONGODB_URI: "mongodb://mongodb:27017/graylog" 58 | ulimits: 59 | memlock: 60 | hard: -1 61 | soft: -1 62 | nofile: 63 | soft: 65536 64 | hard: 65536 65 | ports: 66 | - "127.0.0.1:8999:8999" # Graylog Data Node REST API 67 | - "127.0.0.1:9200:9200" # OpenSearch REST API 68 | - "127.0.0.1:9300:9300" # OpenSearch Transport API 69 | volumes: 70 | - "graylog-datanode:/var/lib/graylog-datanode" 71 | 72 | mongodb: 73 | image: "mongo:5.0" 74 | ports: 75 | - "127.0.0.1:27017:27017" 76 | volumes: 77 | - "mongodb:/data/db" 78 | 79 | volumes: 80 | graylog-datanode: 81 | mongodb: 82 | 83 | ``` 84 | 85 | Enable `hostname: "datanode"` in `docker-compose.yml` and `datanode` as an alias for your IPv4/IPv6 addresses for localhost, if you want to only run it as above and connect from within a running graylog in IntelliJ during develpoment. 86 | 87 | ### Environment Variables 88 | 89 | | Variable | Default | Required | Description | 90 | | :--- | :--- | :--- |:----------------------------------------------------------| 91 | | `GRAYLOG_DATANODE_PASSWORD_SECRET` | none | yes | Password secret to seed secret storage. Must be the same value as the `password_secret` in the Graylog server configuration. | 92 | | `GRAYLOG_DATANODE_ROOT_USERNAME` | `admin` | yes | Name of the root user. | 93 | | `GRAYLOG_DATANODE_MONGODB_URI` | none | yes | URI to the MongoDB instance and database. | 94 | | `GRAYLOG_DATANODE_DATA_DIR` | `/var/lib/graylog-datanode` | no | The data root directory. (e.g., OpenSearch data) | 95 | | `GRAYLOG_DATANODE_NODE_NAME` | container hostname | no | The OpenSearch node name. | 96 | | `GRAYLOG_DATANODE_OPENSEARCH_DISCOVERY_SEED_HOSTS` | none | no | tbd | 97 | 98 | 99 | 100 | 101 | 102 | ## Documentation 103 | 104 | Documentation for Graylog is hosted [here](https://docs.graylog.org/). Please read through the docs and familiarize yourself with the functionality before opening an [issue on GitHub](https://github.com/Graylog2/graylog2-server/issues). 105 | 106 | ## License 107 | 108 | Graylog itself is licensed under the Server Side Public License (SSPL), see [license information](https://www.mongodb.com/licensing/server-side-public-license). 109 | 110 | This Docker image is licensed under the Apache 2.0 license, see [LICENSE](LICENSE). 111 | -------------------------------------------------------------------------------- /docker/datanode/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | # Execute the given command instead of running the datanode. (e.g., bash) 6 | if [ "$1" != "graylog-datanode" ]; then 7 | exec "$@" 8 | fi 9 | 10 | # Convert all environment variables with names ending in __FILE into the content of 11 | # the file that they point at and use the name without the trailing __FILE. 12 | # This can be used to carry in Docker secrets. 13 | for VAR_NAME in $(env | grep '^GRAYLOG_[^=]\+__FILE=.\+' | sed -r 's/^(GRAYLOG_[^=]*)__FILE=.*/\1/g'); do 14 | VAR_NAME_FILE="${VAR_NAME}__FILE" 15 | if [ "${!VAR_NAME}" ]; then 16 | echo >&2 "ERROR: Both ${VAR_NAME} and ${VAR_NAME_FILE} are set but are mutually exclusive" 17 | exit 1 18 | fi 19 | VAR_FILENAME="${!VAR_NAME_FILE}" 20 | echo "Getting secret ${VAR_NAME} from ${VAR_FILENAME}" 21 | if [ ! -r "${VAR_FILENAME}" ]; then 22 | echo >&2 "ERROR: ${VAR_FILENAME} does not exist or is not readable" 23 | exit 1 24 | fi 25 | export "${VAR_NAME}"="$(< "${VAR_FILENAME}")" 26 | unset VAR_NAME_FILE VAR_FILENAME 27 | done 28 | 29 | check_env() { 30 | local name="$1" 31 | 32 | if [ -z "${!name}" ]; then 33 | echo "ERROR: Missing $name environment variable" 34 | exit 1 35 | fi 36 | } 37 | 38 | check_env "GDN_APP_ROOT" 39 | check_env "GDN_DATA_ROOT" 40 | check_env "GDN_CONFIG_FILE" 41 | check_env "GDN_FEATURE_FLAG_FILE" 42 | check_env "GDN_JVM_OPTIONS_FILE" 43 | check_env "GDN_USER" 44 | check_env "GDN_GROUP" 45 | check_env "GRAYLOG_DATANODE_PASSWORD_SECRET" 46 | check_env "GRAYLOG_DATANODE_MONGODB_URI" 47 | 48 | # Default Graylog settings 49 | export GRAYLOG_DATANODE_BIN_DIR="${GDN_APP_ROOT}/bin" 50 | export GRAYLOG_DATANODE_DATA_DIR="${GRAYLOG_DATANODE_DATA_DIR:-$GDN_DATA_ROOT}" 51 | export GRAYLOG_DATANODE_INSTALLATION_SOURCE="${GRAYLOG_DATANODE_INSTALLATION_SOURCE:-container}" 52 | export GRAYLOG_DATANODE_NODE_ID_FILE="${GRAYLOG_DATANODE_NODE_ID_FILE:-$GDN_DATA_ROOT/node-id}" 53 | export GRAYLOG_DATANODE_OPENSEARCH_CONFIG_LOCATION="${GRAYLOG_DATANODE_OPENSEARCH_CONFIG_LOCATION:-"$GRAYLOG_DATANODE_DATA_DIR/opensearch/config"}" 54 | export GRAYLOG_DATANODE_OPENSEARCH_DATA_LOCATION="${GRAYLOG_DATANODE_OPENSEARCH_DATA_LOCATION:-"$GRAYLOG_DATANODE_DATA_DIR/opensearch/data"}" 55 | export GRAYLOG_DATANODE_OPENSEARCH_LOGS_LOCATION="${GRAYLOG_DATANODE_OPENSEARCH_LOGS_LOCATION:-"$GRAYLOG_DATANODE_DATA_DIR/opensearch/logs"}" 56 | export GRAYLOG_DATANODE_OPENSEARCH_HTTP_PORT="${GRAYLOG_DATANODE_OPENSEARCH_HTTP_PORT:-9200}" 57 | export GRAYLOG_DATANODE_OPENSEARCH_TRANSPORT_PORT="${GRAYLOG_DATANODE_OPENSEARCH_TRANSPORT_PORT:-9300}" 58 | export GRAYLOG_DATANODE_NODE_NAME="${GRAYLOG_DATANODE_NODE_NAME:-"$HOSTNAME"}" 59 | 60 | # TODO: Bundle the OpenSearch version property in the tarball so we can read it 61 | opensearch_dist="$(find "$GDN_APP_ROOT/dist" -maxdepth 1 -name 'opensearch-*-linux-*' -type d | sort -V | tail -1)" 62 | 63 | if [ -z "$opensearch_dist" ]; then 64 | echo "ERROR: No OpenSearch distribution found in $GDN_APP_ROOT/dist" 65 | exit 1 66 | fi 67 | 68 | export GRAYLOG_DATANODE_OPENSEARCH_LOCATION="$opensearch_dist" 69 | 70 | # Settings for the graylog-datanode script 71 | export DATANODE_JVM_OPTIONS_FILE="${DATANODE_JVM_OPTIONS_FILE:-"$GDN_JVM_OPTIONS_FILE"}" 72 | export DATANODE_LOG4J_CONFIG_FILE="${DATANODE_LOG4J_CONFIG_FILE:-"$GDN_LOG4J_CONFIG_FILE"}" 73 | export JAVA_OPTS="$JAVA_OPTS" 74 | 75 | # Create required OpenSearch directories 76 | install -d -o "$GDN_USER" -g "$GDN_GROUP" -m 0700 \ 77 | "$GRAYLOG_DATANODE_OPENSEARCH_CONFIG_LOCATION" \ 78 | "$GRAYLOG_DATANODE_OPENSEARCH_DATA_LOCATION" \ 79 | "$GRAYLOG_DATANODE_OPENSEARCH_LOGS_LOCATION" 80 | 81 | # Make sure the data node can write to the data dir 82 | chown -R "$GDN_USER":"$GDN_GROUP" "$GRAYLOG_DATANODE_DATA_DIR" 83 | 84 | # Starting the data node with dropped privileges 85 | exec setpriv --reuid="$GDN_USER" --regid="$GDN_GROUP" --init-groups \ 86 | "${GRAYLOG_DATANODE_BIN_DIR}/graylog-datanode" \ 87 | datanode \ 88 | -f "$GDN_CONFIG_FILE" \ 89 | -ff "$GDN_FEATURE_FLAG_FILE" 90 | -------------------------------------------------------------------------------- /docker/datanode/hooks/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Custom build for Docker Hub 4 | # see: https://medium.com/microscaling-systems/labelling-automated-builds-on-docker-hub-f3d073fb8e1 5 | 6 | cd ../.. 7 | 8 | apt-get install -y python3-pip 9 | pip3 install pyyaml 10 | 11 | graylog_version="$(./release.py --get-graylog-version)" 12 | build_args=() 13 | 14 | if [[ "$graylog_version" =~ SNAPSHOT ]]; then 15 | download_url_x64="$(curl -fsSL -G -d artifact=graylog-datanode-linux-x64 -d limit=1 https://downloads.graylog.org/nightly-builds | jq -r '.artifacts[0].url')" 16 | download_url_aarch64="$(sed -e 's,linux-x64,linux-aarch64,' <<< "$download_url_x64")" 17 | download_url="$(sed -e 's,-linux-x64,,' <<< "$download_url_x64")" 18 | 19 | build_args+=(--build-arg DOWNLOAD_URL="$download_url") 20 | build_args+=(--build-arg DOWNLOAD_URL_X64="$download_url_x64") 21 | build_args+=(--build-arg DOWNLOAD_URL_AARCH64="$download_url_aarch64") 22 | fi 23 | 24 | # Build Graylog 25 | docker build --build-arg VCS_REF="$(git rev-parse --short HEAD)" \ 26 | --build-arg GRAYLOG_VERSION="$graylog_version" \ 27 | --build-arg BUILD_DATE="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ 28 | ${build_args[*]} \ 29 | --file docker/datanode/Dockerfile \ 30 | --tag $IMAGE_NAME . 31 | -------------------------------------------------------------------------------- /docker/enterprise/Dockerfile: -------------------------------------------------------------------------------- 1 | # layer for download and verifying 2 | FROM ubuntu:jammy as graylog-downloader 3 | 4 | ARG VCS_REF 5 | ARG BUILD_DATE 6 | ARG GRAYLOG_VERSION 7 | ARG GRAYLOG_PRODUCT=graylog-enterprise 8 | ARG GRAYLOG_HOME=/usr/share/graylog 9 | ARG GRAYLOG_UID=1100 10 | ARG GRAYLOG_GID=1100 11 | ARG TARGETPLATFORM 12 | 13 | # We default to an empty file instead of leaving LOCAL_BUILD_TGZ blank 14 | # because Docker would execute the following COPY command with a blank 15 | # value: 16 | # COPY "" "/tmp/graylog.tgz" 17 | # That creates a /tmp/graylog.tgz *directory* in the container with 18 | # all files from the build context. 19 | ARG LOCAL_BUILD_TGZ=.empty 20 | 21 | # Allows building a custom artifact. (e.g., snapshot builds) 22 | ARG DOWNLOAD_URL=none 23 | 24 | WORKDIR /tmp 25 | 26 | # hadolint ignore=DL3008,DL3015 27 | RUN \ 28 | apt-get update > /dev/null && \ 29 | apt-get upgrade -y > /dev/null && \ 30 | apt-get install --assume-yes \ 31 | ca-certificates \ 32 | curl > /dev/null 33 | 34 | COPY build/fetch-and-extract.sh /bin/fetch-and-extract 35 | 36 | RUN if [ "${LOCAL_BUILD_TGZ}" = ".empty" ] && [ "${DOWNLOAD_URL}" = "none" ]; then \ 37 | chmod +x /bin/fetch-and-extract \ 38 | && fetch-and-extract \ 39 | "https://downloads.graylog.org/releases/${GRAYLOG_PRODUCT}/${GRAYLOG_PRODUCT}-${GRAYLOG_VERSION}.tgz" \ 40 | "https://downloads.graylog.org/releases/${GRAYLOG_PRODUCT}/${GRAYLOG_PRODUCT}-${GRAYLOG_VERSION}.tgz.sha256.txt"; \ 41 | fi 42 | 43 | RUN if [ "${DOWNLOAD_URL}" != "none" ]; then \ 44 | chmod +x /bin/fetch-and-extract && fetch-and-extract "${DOWNLOAD_URL}"; \ 45 | fi 46 | 47 | COPY "${LOCAL_BUILD_TGZ}" "/tmp/graylog.tgz" 48 | 49 | # An empty /tmp/graylog.tgz file indicates that we don't use a 50 | # custom LOCAL_BUILD_TGZ file. 51 | RUN if [ -f "/tmp/graylog.tgz" ] && [ -s "/tmp/graylog.tgz" ]; then \ 52 | mkdir /opt/graylog && \ 53 | tar --extract --gzip --file "/tmp/graylog.tgz" --strip-components=1 --directory /opt/graylog; \ 54 | rm -rf /tmp/graylog.tgz; \ 55 | fi 56 | 57 | # Reduce image size by removing large cross platform chromedriver binaries 58 | # hadolint ignore=DL3059 59 | RUN if [ "${TARGETPLATFORM}" != "linux/arm64" ]; then \ 60 | rm -f /opt/graylog/bin/*_arm64; \ 61 | fi 62 | # hadolint ignore=DL3059 63 | RUN if [ "${TARGETPLATFORM}" = "linux/arm64" ]; then \ 64 | rm -f /opt/graylog/bin/*_amd64; \ 65 | fi 66 | 67 | # hadolint ignore=DL3059 68 | RUN \ 69 | install \ 70 | --directory \ 71 | --mode=0750 \ 72 | /opt/graylog/data \ 73 | /opt/graylog/data/journal \ 74 | /opt/graylog/data/log \ 75 | /opt/graylog/data/config \ 76 | /opt/graylog/data/plugin \ 77 | /opt/graylog/data/data \ 78 | /opt/graylog/data/scripts 79 | 80 | 81 | RUN mv /opt/graylog ${GRAYLOG_HOME} && chown -R ${GRAYLOG_UID}:${GRAYLOG_GID} ${GRAYLOG_HOME} 82 | RUN mv ${GRAYLOG_HOME}/plugin ${GRAYLOG_HOME}/plugins-default 83 | RUN install -d -o "${GRAYLOG_UID}" -g "${GRAYLOG_GID}" -m 0755 ${GRAYLOG_HOME}/plugins-merged && \ 84 | install -d -o "${GRAYLOG_UID}" -g "${GRAYLOG_GID}" -m 0755 ${GRAYLOG_HOME}/plugin 85 | 86 | COPY config ${GRAYLOG_HOME}/data/config 87 | 88 | # ------------------------------------------------------------------------------------------------- 89 | # 90 | # final layer 91 | FROM eclipse-temurin:17-jre-jammy 92 | 93 | ARG VCS_REF 94 | ARG GRAYLOG_VERSION 95 | ARG BUILD_DATE 96 | ARG GRAYLOG_HOME=/usr/share/graylog 97 | ARG GRAYLOG_USER=graylog 98 | ARG GRAYLOG_UID=1100 99 | ARG GRAYLOG_GROUP=graylog 100 | ARG GRAYLOG_GID=1100 101 | 102 | COPY --chown=${GRAYLOG_UID}:${GRAYLOG_GID} --from=graylog-downloader ${GRAYLOG_HOME} ${GRAYLOG_HOME} 103 | 104 | WORKDIR ${GRAYLOG_HOME} 105 | 106 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 107 | # hadolint ignore=DL3027,DL3008 108 | RUN \ 109 | echo "export BUILD_DATE=${BUILD_DATE}" >> /etc/profile.d/graylog.sh && \ 110 | echo "export GRAYLOG_VERSION=${GRAYLOG_VERSION}" >> /etc/profile.d/graylog.sh && \ 111 | echo "export GRAYLOG_SERVER_JAVA_OPTS='-Dlog4j2.formatMsgNoLookups=true -Djdk.tls.acknowledgeCloseNotify=true -XX:+UnlockExperimentalVMOptions -XX:-OmitStackTraceInFastThrow -XX:+UseG1GC -server'" >> /etc/profile.d/graylog.sh && \ 112 | echo "export GRAYLOG_HOME=${GRAYLOG_HOME}" >> /etc/profile.d/graylog.sh && \ 113 | echo "export GRAYLOG_USER=${GRAYLOG_USER}" >> /etc/profile.d/graylog.sh && \ 114 | echo "export GRAYLOG_GROUP=${GRAYLOG_GROUP}" >> /etc/profile.d/graylog.sh && \ 115 | echo "export GRAYLOG_UID=${GRAYLOG_UID}" >> /etc/profile.d/graylog.sh && \ 116 | echo "export GRAYLOG_GID=${GRAYLOG_GID}" >> /etc/profile.d/graylog.sh && \ 117 | echo "export PATH=${GRAYLOG_HOME}/bin:${PATH}" >> /etc/profile.d/graylog.sh && \ 118 | apt-get update > /dev/null && \ 119 | apt-get upgrade -y > /dev/null && \ 120 | apt-get install --no-install-recommends --assume-yes \ 121 | curl \ 122 | tini \ 123 | libcap2-bin \ 124 | libglib2.0-0 \ 125 | libx11-6 \ 126 | libnss3 \ 127 | wait-for-it \ 128 | fonts-dejavu \ 129 | fontconfig > /dev/null && \ 130 | addgroup \ 131 | --gid "${GRAYLOG_GID}" \ 132 | --quiet \ 133 | "${GRAYLOG_GROUP}" && \ 134 | adduser \ 135 | --disabled-password \ 136 | --disabled-login \ 137 | --gecos '' \ 138 | --home ${GRAYLOG_HOME} \ 139 | --uid "${GRAYLOG_UID}" \ 140 | --gid "${GRAYLOG_GID}" \ 141 | --quiet \ 142 | "${GRAYLOG_USER}" && \ 143 | setcap 'cap_net_bind_service=+ep' "${JAVA_HOME}/bin/java" && \ 144 | # https://github.com/docker-library/openjdk/blob/da594d91b0364d5f1a32e0ce6b4d3fd8a9116844/8/jdk/slim-bullseye/Dockerfile#L105 145 | # https://github.com/docker-library/openjdk/issues/331#issuecomment-498834472 146 | find "$JAVA_HOME/lib" -name '*.so' -exec dirname '{}' ';' | sort -u > /etc/ld.so.conf.d/docker-openjdk.conf && \ 147 | ldconfig && \ 148 | apt-get remove --assume-yes --purge \ 149 | apt-utils > /dev/null && \ 150 | rm -f /etc/apt/sources.list.d/* && \ 151 | apt-get clean > /dev/null && \ 152 | apt autoremove --assume-yes > /dev/null && \ 153 | rm -rf \ 154 | /tmp/* \ 155 | /var/cache/debconf/* \ 156 | /var/lib/apt/lists/* \ 157 | /var/log/* \ 158 | /usr/share/X11 \ 159 | /usr/share/doc/* 2> /dev/null 160 | 161 | COPY docker-entrypoint.sh / 162 | COPY health_check.sh / 163 | 164 | EXPOSE 9000 165 | USER ${GRAYLOG_USER} 166 | VOLUME ${GRAYLOG_HOME}/data 167 | ENTRYPOINT ["tini", "--", "/docker-entrypoint.sh"] 168 | CMD ["server"] 169 | 170 | # add healthcheck 171 | HEALTHCHECK \ 172 | --interval=10s \ 173 | --timeout=2s \ 174 | --retries=12 \ 175 | CMD /health_check.sh 176 | 177 | # ------------------------------------------------------------------------------------------------- 178 | 179 | LABEL maintainer="Graylog, Inc. " \ 180 | org.label-schema.name="Graylog Enterprise Docker Image" \ 181 | org.label-schema.description="Official Graylog Enterprise Docker Image" \ 182 | org.label-schema.url="https://www.graylog.org/" \ 183 | org.label-schema.vcs-ref=${VCS_REF} \ 184 | org.label-schema.vcs-url="https://github.com/Graylog2/graylog-docker" \ 185 | org.label-schema.vendor="Graylog, Inc." \ 186 | org.label-schema.version=${GRAYLOG_VERSION} \ 187 | org.label-schema.schema-version="1.0" \ 188 | org.label-schema.build-date=${BUILD_DATE} 189 | -------------------------------------------------------------------------------- /docker/enterprise/hooks/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Custom build for Docker Hub 4 | # see: https://medium.com/microscaling-systems/labelling-automated-builds-on-docker-hub-f3d073fb8e1 5 | 6 | cd ../.. 7 | 8 | patch config/graylog.conf patches/graylog-server.conf.patch 9 | 10 | apt-get install -y python3-pip 11 | pip3 install pyyaml 12 | 13 | graylog_version="$(./release.py --get-graylog-version)" 14 | build_args=() 15 | 16 | if [[ "$graylog_version" =~ SNAPSHOT ]]; then 17 | download_url_x64="$(curl -fsSL -G -d artifact=graylog-enterprise-linux-x64 -d limit=1 https://downloads.graylog.org/nightly-builds | jq -r '.artifacts[0].url')" 18 | download_url_aarch64="$(sed -e 's,linux-x64,linux-aarch64,' <<< "$download_url_x64")" 19 | download_url="$(sed -e 's,-linux-x64,,' <<< "$download_url_x64")" 20 | 21 | build_args+=(--build-arg DOWNLOAD_URL="$download_url") 22 | build_args+=(--build-arg DOWNLOAD_URL_X64="$download_url_x64") 23 | build_args+=(--build-arg DOWNLOAD_URL_AARCH64="$download_url_aarch64") 24 | fi 25 | 26 | # Build Graylog Enterprise & Integration Included 27 | docker build --build-arg VCS_REF="$(git rev-parse --short HEAD)" \ 28 | --build-arg GRAYLOG_VERSION="$graylog_version" \ 29 | --build-arg BUILD_DATE="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ 30 | ${build_args[*]} \ 31 | --file docker/enterprise/Dockerfile \ 32 | --tag $IMAGE_NAME . 33 | -------------------------------------------------------------------------------- /docker/forwarder/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM eclipse-temurin:17-jre-jammy 2 | 3 | ARG VCS_REF 4 | ARG BUILD_DATE 5 | ARG GRAYLOG_FORWARDER_VERSION 6 | ARG GRAYLOG_FORWARDER_IMAGE_VERSION 7 | ARG GRAYLOG_FORWARDER_ROOT=/usr/share/graylog-forwarder 8 | ARG GRAYLOG_FORWARDER_FILE=/tmp/graylog-forwarder-bin.tar.gz 9 | ARG DEBIAN_FRONTEND=noninteractive 10 | 11 | ENV FORWARDER_CONFIG_FILE=/etc/graylog/forwarder/forwarder.conf 12 | ENV FORWARDER_JVM_OPTIONS_FILE=/etc/graylog/forwarder/jvm.options 13 | ENV FORWARDER_DATA_DIR=/var/lib/graylog-forwarder 14 | 15 | # We are using an empty forwarder.conf file so we are setting defaults 16 | # via environment variables: 17 | ENV GRAYLOG_BIN_DIR=/usr/share/graylog-forwarder/bin 18 | ENV GRAYLOG_PLUGIN_DIR=/usr/share/graylog-forwarder/plugin 19 | ENV GRAYLOG_DATA_DIR=/var/lib/graylog-forwarder/data 20 | ENV GRAYLOG_MESSAGE_JOURNAL_DIR=/var/lib/graylog-forwarder/journal 21 | ENV GRAYLOG_NODE_ID_FILE=/var/lib/graylog-forwarder/node-id 22 | 23 | # hadolint ignore=DL3008 24 | RUN apt-get update && \ 25 | apt-get -y install --no-install-recommends apt-utils && \ 26 | apt-get -y install --no-install-recommends ca-certificates curl tini && \ 27 | apt-get clean && \ 28 | rm -rf /var/lib/apt/lists/* 29 | 30 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 31 | 32 | RUN curl \ 33 | --silent \ 34 | --location \ 35 | --retry 3 \ 36 | --output "$GRAYLOG_FORWARDER_FILE" \ 37 | "https://packages.graylog2.org/releases/cloud/forwarder/${GRAYLOG_FORWARDER_VERSION}/graylog-forwarder-${GRAYLOG_FORWARDER_VERSION}-bin.tar.gz" && \ 38 | install -d -o root -g root -m 0755 "$GRAYLOG_FORWARDER_ROOT" && \ 39 | tar -C "$GRAYLOG_FORWARDER_ROOT" -xzf "$GRAYLOG_FORWARDER_FILE" && \ 40 | chown -R root.root "$GRAYLOG_FORWARDER_ROOT" && \ 41 | install -d -o root -g root -m 0755 "$FORWARDER_DATA_DIR" && \ 42 | install -d -o root -g root -m 0755 "$(dirname $FORWARDER_CONFIG_FILE)" && \ 43 | touch "$FORWARDER_CONFIG_FILE" && \ 44 | echo "forwarder_server_hostname =" >> "$FORWARDER_CONFIG_FILE" && \ 45 | echo "forwarder_grpc_api_token =" >> "$FORWARDER_CONFIG_FILE" && \ 46 | mv "${GRAYLOG_FORWARDER_ROOT}/config/jvm.options" "$FORWARDER_JVM_OPTIONS_FILE" && \ 47 | rmdir "${GRAYLOG_FORWARDER_ROOT}/config" && \ 48 | rm -f "$GRAYLOG_FORWARDER_FILE" 49 | 50 | COPY docker/forwarder/forwarder-entrypoint.sh / 51 | 52 | LABEL maintainer="Graylog, Inc. " \ 53 | org.label-schema.name="Graylog Forwarder Docker Image" \ 54 | org.label-schema.description="Official Graylog Forwarder Docker image" \ 55 | org.label-schema.url="https://www.graylog.org/" \ 56 | org.label-schema.vcs-ref=${VCS_REF} \ 57 | org.label-schema.vcs-url="https://github.com/Graylog2/graylog-docker" \ 58 | org.label-schema.vendor="Graylog, Inc." \ 59 | org.label-schema.version=${GRAYLOG_FORWARDER_IMAGE_VERSION} \ 60 | org.label-schema.schema-version="1.0" \ 61 | org.label-schema.build-date=${BUILD_DATE} 62 | 63 | ENTRYPOINT ["tini", "--", "/forwarder-entrypoint.sh"] 64 | -------------------------------------------------------------------------------- /docker/forwarder/forwarder-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Convert all environment variables with names ending in __FILE into the content of 6 | # the file that they point at and use the name without the trailing __FILE. 7 | # This can be used to carry in Docker secrets. 8 | for VAR_NAME in $(env | grep '^GRAYLOG_[^=]\+__FILE=.\+' | sed -r 's/^(GRAYLOG_[^=]*)__FILE=.*/\1/g'); do 9 | VAR_NAME_FILE="${VAR_NAME}__FILE" 10 | if [ "${!VAR_NAME}" ]; then 11 | echo >&2 "ERROR: Both ${VAR_NAME} and ${VAR_NAME_FILE} are set but are exclusive" 12 | exit 1 13 | fi 14 | VAR_FILENAME="${!VAR_NAME_FILE}" 15 | echo "Getting secret ${VAR_NAME} from ${VAR_FILENAME}" 16 | if [ ! -r "${VAR_FILENAME}" ]; then 17 | echo >&2 "ERROR: ${VAR_FILENAME} does not exist or is not readable" 18 | exit 1 19 | fi 20 | export "${VAR_NAME}"="$(< "${VAR_FILENAME}")" 21 | unset "${VAR_NAME_FILE}" 22 | done 23 | 24 | # Create data and journal dir explicitly because FORWARDER_DATA_DIR could 25 | # be mounted to an empty volume. 26 | /usr/bin/install -d -o root -g root -m 0755 "$GRAYLOG_DATA_DIR" 27 | /usr/bin/install -d -o root -g root -m 0755 "$GRAYLOG_MESSAGE_JOURNAL_DIR" 28 | 29 | exec "${GRAYLOG_BIN_DIR}/graylog-forwarder" run -f "$FORWARDER_CONFIG_FILE" 30 | -------------------------------------------------------------------------------- /docker/forwarder/hooks/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Custom build for Docker Hub 4 | # see: https://medium.com/microscaling-systems/labelling-automated-builds-on-docker-hub-f3d073fb8e1 5 | 6 | cd ../.. 7 | 8 | apt-get install -y python3-pip 9 | pip3 install pyyaml 10 | 11 | # Build Graylog Forwarder 12 | docker build --build-arg VCS_REF="$(git rev-parse --short HEAD)" \ 13 | --build-arg GRAYLOG_FORWARDER_VERSION="$(./release.py --get-forwarder-version)" \ 14 | --build-arg GRAYLOG_FORWARDER_IMAGE_VERSION="$(./release.py --get-forwarder-image-version)" \ 15 | --build-arg BUILD_DATE="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ 16 | --file docker/forwarder/Dockerfile \ 17 | --tag $IMAGE_NAME . 18 | -------------------------------------------------------------------------------- /docker/oss/Dockerfile: -------------------------------------------------------------------------------- 1 | # layer for download and verifying 2 | FROM ubuntu:jammy as graylog-downloader 3 | 4 | ARG VCS_REF 5 | ARG BUILD_DATE 6 | ARG GRAYLOG_VERSION 7 | ARG GRAYLOG_PRODUCT=graylog 8 | ARG GRAYLOG_HOME=/usr/share/graylog 9 | ARG GRAYLOG_UID=1100 10 | ARG GRAYLOG_GID=1100 11 | ARG TARGETPLATFORM 12 | 13 | # We default to an empty file instead of leaving LOCAL_BUILD_TGZ blank 14 | # because Docker would execute the following COPY command with a blank 15 | # value: 16 | # COPY "" "/tmp/graylog.tgz" 17 | # That creates a /tmp/graylog.tgz *directory* in the container with 18 | # all files from the build context. 19 | ARG LOCAL_BUILD_TGZ=.empty 20 | 21 | # Allows building a custom artifact. (e.g., snapshot builds) 22 | ARG DOWNLOAD_URL=none 23 | 24 | WORKDIR /tmp 25 | 26 | # hadolint ignore=DL3008,DL3015 27 | RUN \ 28 | apt-get update > /dev/null && \ 29 | apt-get upgrade -y > /dev/null && \ 30 | apt-get install --assume-yes \ 31 | ca-certificates \ 32 | curl > /dev/null 33 | 34 | COPY build/fetch-and-extract.sh /bin/fetch-and-extract 35 | 36 | RUN if [ "${LOCAL_BUILD_TGZ}" = ".empty" ] && [ "${DOWNLOAD_URL}" = "none" ]; then \ 37 | chmod +x /bin/fetch-and-extract \ 38 | && fetch-and-extract \ 39 | "https://downloads.graylog.org/releases/${GRAYLOG_PRODUCT}/${GRAYLOG_PRODUCT}-${GRAYLOG_VERSION}.tgz" \ 40 | "https://downloads.graylog.org/releases/${GRAYLOG_PRODUCT}/${GRAYLOG_PRODUCT}-${GRAYLOG_VERSION}.tgz.sha256.txt"; \ 41 | fi 42 | 43 | RUN if [ "${DOWNLOAD_URL}" != "none" ]; then \ 44 | chmod +x /bin/fetch-and-extract && fetch-and-extract "${DOWNLOAD_URL}"; \ 45 | fi 46 | 47 | COPY "${LOCAL_BUILD_TGZ}" "/tmp/graylog.tgz" 48 | 49 | # An empty /tmp/graylog.tgz file indicates that we don't use a 50 | # custom LOCAL_BUILD_TGZ file. 51 | RUN if [ -f "/tmp/graylog.tgz" ] && [ -s "/tmp/graylog.tgz" ]; then \ 52 | mkdir /opt/graylog && \ 53 | tar --extract --gzip --file "/tmp/graylog.tgz" --strip-components=1 --directory /opt/graylog; \ 54 | rm -rf /tmp/graylog.tgz; \ 55 | fi 56 | 57 | RUN \ 58 | install \ 59 | --directory \ 60 | --mode=0750 \ 61 | /opt/graylog/data \ 62 | /opt/graylog/data/journal \ 63 | /opt/graylog/data/log \ 64 | /opt/graylog/data/config \ 65 | /opt/graylog/data/plugin \ 66 | /opt/graylog/data/data 67 | 68 | RUN mv /opt/graylog ${GRAYLOG_HOME} && chown -R ${GRAYLOG_UID}:${GRAYLOG_GID} ${GRAYLOG_HOME} 69 | RUN mv ${GRAYLOG_HOME}/plugin ${GRAYLOG_HOME}/plugins-default 70 | RUN install -d -o "${GRAYLOG_UID}" -g "${GRAYLOG_GID}" -m 0755 ${GRAYLOG_HOME}/plugins-merged && \ 71 | install -d -o "${GRAYLOG_UID}" -g "${GRAYLOG_GID}" -m 0755 ${GRAYLOG_HOME}/plugin 72 | 73 | COPY config ${GRAYLOG_HOME}/data/config 74 | 75 | # ------------------------------------------------------------------------------------------------- 76 | # 77 | # final layer 78 | FROM eclipse-temurin:17-jre-jammy 79 | 80 | ARG VCS_REF 81 | ARG GRAYLOG_VERSION 82 | ARG BUILD_DATE 83 | ARG GRAYLOG_HOME=/usr/share/graylog 84 | ARG GRAYLOG_USER=graylog 85 | ARG GRAYLOG_UID=1100 86 | ARG GRAYLOG_GROUP=graylog 87 | ARG GRAYLOG_GID=1100 88 | 89 | COPY --chown=${GRAYLOG_UID}:${GRAYLOG_GID} --from=graylog-downloader ${GRAYLOG_HOME} ${GRAYLOG_HOME} 90 | 91 | WORKDIR ${GRAYLOG_HOME} 92 | 93 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 94 | # hadolint ignore=DL3027,DL3008 95 | RUN \ 96 | echo "export BUILD_DATE=${BUILD_DATE}" >> /etc/profile.d/graylog.sh && \ 97 | echo "export GRAYLOG_VERSION=${GRAYLOG_VERSION}" >> /etc/profile.d/graylog.sh && \ 98 | echo "export GRAYLOG_SERVER_JAVA_OPTS='-Dlog4j2.formatMsgNoLookups=true -Djdk.tls.acknowledgeCloseNotify=true -XX:+UnlockExperimentalVMOptions -XX:-OmitStackTraceInFastThrow -XX:+UseG1GC -server'" >> /etc/profile.d/graylog.sh && \ 99 | echo "export GRAYLOG_HOME=${GRAYLOG_HOME}" >> /etc/profile.d/graylog.sh && \ 100 | echo "export GRAYLOG_USER=${GRAYLOG_USER}" >> /etc/profile.d/graylog.sh && \ 101 | echo "export GRAYLOG_GROUP=${GRAYLOG_GROUP}" >> /etc/profile.d/graylog.sh && \ 102 | echo "export GRAYLOG_UID=${GRAYLOG_UID}" >> /etc/profile.d/graylog.sh && \ 103 | echo "export GRAYLOG_GID=${GRAYLOG_GID}" >> /etc/profile.d/graylog.sh && \ 104 | echo "export PATH=${GRAYLOG_HOME}/bin:${PATH}" >> /etc/profile.d/graylog.sh && \ 105 | apt-get update > /dev/null && \ 106 | apt-get upgrade -y > /dev/null && \ 107 | apt-get install --no-install-recommends --assume-yes \ 108 | curl \ 109 | tini \ 110 | libcap2-bin \ 111 | libglib2.0-0 \ 112 | libx11-6 \ 113 | libnss3 \ 114 | wait-for-it \ 115 | fonts-dejavu \ 116 | fontconfig > /dev/null && \ 117 | addgroup \ 118 | --gid "${GRAYLOG_GID}" \ 119 | --quiet \ 120 | "${GRAYLOG_GROUP}" && \ 121 | adduser \ 122 | --disabled-password \ 123 | --disabled-login \ 124 | --gecos '' \ 125 | --home ${GRAYLOG_HOME} \ 126 | --uid "${GRAYLOG_UID}" \ 127 | --gid "${GRAYLOG_GID}" \ 128 | --quiet \ 129 | "${GRAYLOG_USER}" && \ 130 | setcap 'cap_net_bind_service=+ep' "${JAVA_HOME}/bin/java" && \ 131 | # https://github.com/docker-library/openjdk/blob/da594d91b0364d5f1a32e0ce6b4d3fd8a9116844/8/jdk/slim-bullseye/Dockerfile#L105 132 | # https://github.com/docker-library/openjdk/issues/331#issuecomment-498834472 133 | find "$JAVA_HOME/lib" -name '*.so' -exec dirname '{}' ';' | sort -u > /etc/ld.so.conf.d/docker-openjdk.conf && \ 134 | ldconfig && \ 135 | apt-get remove --assume-yes --purge \ 136 | apt-utils > /dev/null && \ 137 | rm -f /etc/apt/sources.list.d/* && \ 138 | apt-get clean > /dev/null && \ 139 | apt autoremove --assume-yes > /dev/null && \ 140 | rm -rf \ 141 | /tmp/* \ 142 | /var/cache/debconf/* \ 143 | /var/lib/apt/lists/* \ 144 | /var/log/* \ 145 | /usr/share/X11 \ 146 | /usr/share/doc/* 2> /dev/null 147 | 148 | COPY docker-entrypoint.sh / 149 | COPY health_check.sh / 150 | 151 | EXPOSE 9000 152 | USER ${GRAYLOG_USER} 153 | VOLUME ${GRAYLOG_HOME}/data 154 | ENTRYPOINT ["tini", "--", "/docker-entrypoint.sh"] 155 | CMD ["server"] 156 | 157 | # add healthcheck 158 | HEALTHCHECK \ 159 | --interval=10s \ 160 | --timeout=2s \ 161 | --retries=12 \ 162 | CMD /health_check.sh 163 | 164 | # ------------------------------------------------------------------------------------------------- 165 | 166 | LABEL maintainer="Graylog, Inc. " \ 167 | org.label-schema.name="Graylog Docker Image" \ 168 | org.label-schema.description="Official Graylog Docker Image" \ 169 | org.label-schema.url="https://www.graylog.org/" \ 170 | org.label-schema.vcs-ref=${VCS_REF} \ 171 | org.label-schema.vcs-url="https://github.com/Graylog2/graylog-docker" \ 172 | org.label-schema.vendor="Graylog, Inc." \ 173 | org.label-schema.version=${GRAYLOG_VERSION} \ 174 | org.label-schema.schema-version="1.0" \ 175 | org.label-schema.build-date=${BUILD_DATE} 176 | -------------------------------------------------------------------------------- /docker/oss/hooks/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Custom build for Docker Hub 4 | # see: https://medium.com/microscaling-systems/labelling-automated-builds-on-docker-hub-f3d073fb8e1 5 | 6 | cd ../.. 7 | 8 | apt-get install -y python3-pip 9 | pip3 install pyyaml 10 | 11 | graylog_version="$(./release.py --get-graylog-version)" 12 | build_args=() 13 | 14 | if [[ "$graylog_version" =~ SNAPSHOT ]]; then 15 | download_url_x64="$(curl -fsSL -G -d artifact=graylog-linux-x64 -d limit=1 https://downloads.graylog.org/nightly-builds | jq -r '.artifacts[0].url')" 16 | download_url_aarch64="$(sed -e 's,linux-x64,linux-aarch64,' <<< "$download_url_x64")" 17 | download_url="$(sed -e 's,-linux-x64,,' <<< "$download_url_x64")" 18 | 19 | build_args+=(--build-arg DOWNLOAD_URL="$download_url") 20 | build_args+=(--build-arg DOWNLOAD_URL_X64="$download_url_x64") 21 | build_args+=(--build-arg DOWNLOAD_URL_AARCH64="$download_url_aarch64") 22 | fi 23 | 24 | # Build Graylog 25 | docker build --build-arg VCS_REF="$(git rev-parse --short HEAD)" \ 26 | --build-arg GRAYLOG_VERSION="$graylog_version" \ 27 | --build-arg BUILD_DATE="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ 28 | ${build_args[*]} \ 29 | --file docker/oss/Dockerfile \ 30 | --tag $IMAGE_NAME . 31 | -------------------------------------------------------------------------------- /health_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source /etc/profile 4 | 5 | 6 | # See "Web & Rest API" @ https://docs.graylog.org/docs/server-conf 7 | # 8 | # if `http_publish_uri` is given, use that for healthcheck, 9 | # if not take `http_bind_address` what defaults to 127.0.0.1 10 | # if nothing is set. 11 | # 12 | 13 | # defaults 14 | proto=http 15 | http_bind_address=127.0.0.1:9000 16 | 17 | # check if configuration file is given and grep for variable 18 | if [[ -f "${GRAYLOG_HOME}"/data/config/graylog.conf ]] 19 | then 20 | # try to grep the variable from a mounted configuration 21 | http_publish_uri=$(grep "^http_publish_uri" "${GRAYLOG_HOME}"/data/config/graylog.conf | awk -F '=' '{print $2}' | awk '{$1=$1};1') 22 | http_bind_address=$(grep "^http_bind_address" "${GRAYLOG_HOME}"/data/config/graylog.conf | awk -F '=' '{print $2}' | awk '{$1=$1};1') 23 | http_enable_tls=$(grep "^http_enable_tls" "${GRAYLOG_HOME}"/data/config/graylog.conf | awk -F '=' '{print $2}' | awk '{$1=$1};1') 24 | 25 | # FIX https://github.com/Graylog2/graylog-docker/issues/102 26 | # This will remove the protocol from the URI if set via 27 | # configuration. 28 | # not the smartest solution currently but a working 29 | # TODO: find a better way or maybe write a function 30 | # shellcheck disable=SC2001 31 | if [[ ! -z ${http_publish_uri} ]] 32 | then 33 | # remove the protocol from the URI 34 | proton="$(echo "${http_publish_uri}" | grep :// | sed -e's,^\(.*://\).*,\1,g')" 35 | url=$(echo "${http_publish_uri}" | sed -e s,"$proton",,g) 36 | # we want to be sure to use https if enable 37 | # currently this looks like the best solution to cut 38 | # the protocoll away and set it based on 39 | # the fact if TLS is enabled or not 40 | http_publish_uri="${url}" 41 | fi 42 | 43 | fi 44 | 45 | # try to get the data from environment variables 46 | # they will always override all other settings 47 | # shellcheck disable=SC2001 48 | if [[ ! -z "${GRAYLOG_HTTP_PUBLISH_URI}" ]] 49 | then 50 | # remove the protocol from the URI 51 | proton="$(echo "${GRAYLOG_HTTP_PUBLISH_URI}" | grep :// | sed -e's,^\(.*://\).*,\1,g')" 52 | url=$(echo "${GRAYLOG_HTTP_PUBLISH_URI}" | sed -e s,"$proton",,g) 53 | # we want to be sure to use https if enable 54 | # currently this looks like the best solution to cut 55 | # the protocoll away and set it based on 56 | # the fact if TLS is enabled or not 57 | http_publish_uri="${url}" 58 | fi 59 | if [[ ! -z "${GRAYLOG_HTTP_BIND_ADDRESS}" ]] 60 | then 61 | http_bind_address="${GRAYLOG_HTTP_BIND_ADDRESS}" 62 | fi 63 | if [[ ! -z "${GRAYLOG_HTTP_ENABLE_TLS}" ]] 64 | then 65 | http_enable_tls="${GRAYLOG_HTTP_ENABLE_TLS}" 66 | fi 67 | 68 | # if configured set https 69 | [[ ! -z "${http_enable_tls}" ]] && [[ ${http_enable_tls} = "true" ]] && proto=https 70 | 71 | # when HTTP_PUBLISH_URI is given that is used for the healtcheck 72 | # otherwise HTTP_BIND_ADDRESS 73 | 74 | if [[ ! -z "${http_bind_address}" ]] 75 | then 76 | check_url="${proto}"://"${http_bind_address}" 77 | else 78 | # we will never run into this - but 79 | # never say never 80 | echo "not possible to get Graylog listen URI - abort" 81 | exit 1 82 | fi 83 | 84 | if [[ ! -z "${http_publish_uri}" ]] 85 | then 86 | check_url="${proto}"://"${http_publish_uri}" 87 | fi 88 | 89 | if [[ -z "${check_url}" ]] 90 | then 91 | echo "Not possible to get Graylog listen URI - abort" 92 | exit 1 93 | fi 94 | 95 | # FIX https://github.com/Graylog2/graylog-docker/issues/156 96 | # ignore self-signed certificates for the real URL, not only for localhost 97 | if curl --silent --insecure --fail "${check_url}"/api 98 | then 99 | exit 0 100 | fi 101 | 102 | # FIX https://github.com/Graylog2/graylog-docker/issues/101 103 | # When the above check fails fall back to localhost 104 | # This is not the most elegant solution but a working one 105 | if curl --silent --insecure --fail http://127.0.0.1/api 106 | then 107 | exit 0 108 | fi 109 | 110 | 111 | 112 | exit 1 113 | -------------------------------------------------------------------------------- /jenkins.groovy: -------------------------------------------------------------------------------- 1 | pipeline 2 | { 3 | agent { label 'linux' } 4 | 5 | options 6 | { 7 | buildDiscarder logRotator(artifactDaysToKeepStr: '90', artifactNumToKeepStr: '100', daysToKeepStr: '90', numToKeepStr: '100') 8 | timestamps() 9 | timeout(time: 1, unit: 'HOURS') 10 | 11 | // The test exposes some hardcoded ports, so the tests can't be executed 12 | // at the same time on the same machine. 13 | lock('docker-integrations-test') 14 | } 15 | 16 | stages 17 | { 18 | stage('Build Docker Image') 19 | { 20 | when 21 | { 22 | not 23 | { 24 | buildingTag() 25 | } 26 | } 27 | steps 28 | { 29 | sh 'make docker_build' 30 | } 31 | } 32 | stage('Linter and Integration Test') 33 | { 34 | when 35 | { 36 | not 37 | { 38 | buildingTag() 39 | } 40 | } 41 | steps 42 | { 43 | sh 'make test' 44 | } 45 | } 46 | stage('Deploy image') 47 | { 48 | when 49 | { 50 | buildingTag() 51 | } 52 | 53 | steps 54 | { 55 | echo "TAG_NAME: ${TAG_NAME}" 56 | 57 | script 58 | { 59 | // Building Graylog (no tag suffix) 60 | if (TAG_NAME =~ /^(?:[4-9]|\\d{2,}).[0-9]+.[0-9]+-(?:[0-9]+|alpha|beta|rc).*/) 61 | { 62 | PARSED_VERSION = parse_version(TAG_NAME) 63 | MAJOR = PARSED_VERSION[0] 64 | MINOR = PARSED_VERSION[1] 65 | PATCH = PARSED_VERSION[2] 66 | echo "MAJOR: ${MAJOR}" 67 | echo "MINOR: ${MINOR}" 68 | echo "PATCH: ${PATCH}" 69 | 70 | MAJOR_INT = MAJOR as Integer 71 | MINOR_INT = MINOR as Integer 72 | 73 | //Is the revision suffix just a number? 74 | if (TAG_NAME =~ /^([4-9]|\d{2,}).([0-9]+).([0-9]+)-([0-9]+)$/) 75 | { 76 | TAG_ARGS = """--tag graylog/graylog:${env.TAG_NAME} \ 77 | --tag graylog/graylog:${MAJOR}.${MINOR}.${PATCH} \ 78 | --tag graylog/graylog:${MAJOR}.${MINOR}""" 79 | 80 | TAG_ARGS_DATANODE = """--tag graylog/graylog-datanode:${env.TAG_NAME} \ 81 | --tag graylog/graylog-datanode:${MAJOR}.${MINOR}.${PATCH} \ 82 | --tag graylog/graylog-datanode:${MAJOR}.${MINOR}""" 83 | 84 | TAG_ARGS_ENTERPRISE = """--tag graylog/graylog-enterprise:${env.TAG_NAME} \ 85 | --tag graylog/graylog-enterprise:${MAJOR}.${MINOR}.${PATCH} \ 86 | --tag graylog/graylog-enterprise:${MAJOR}.${MINOR}""" 87 | 88 | } 89 | else 90 | { 91 | //This is an alpha/beta/rc release, so don't update the version tags 92 | TAG_ARGS = "--tag graylog/graylog:${env.TAG_NAME}" 93 | TAG_ARGS_DATANODE = "--tag graylog/graylog-datanode:${env.TAG_NAME}" 94 | TAG_ARGS_ENTERPRISE = "--tag graylog/graylog-enterprise:${env.TAG_NAME}" 95 | } 96 | 97 | docker.withRegistry('', 'docker-hub') 98 | { 99 | sh 'docker run --rm --privileged multiarch/qemu-user-static --reset -p yes' 100 | sh 'docker buildx create --name multiarch --driver docker-container --use | true' 101 | sh 'docker buildx inspect --bootstrap' 102 | 103 | sh """ 104 | docker buildx build \ 105 | --platform linux/amd64,linux/arm64/v8 \ 106 | --no-cache \ 107 | --build-arg GRAYLOG_VERSION=\$(./release.py --get-graylog-version) \ 108 | --build-arg BUILD_DATE=\$(date -u +\"%Y-%m-%dT%H:%M:%SZ\") \ 109 | --build-arg VCS_REF=\$(git rev-parse HEAD) \ 110 | ${TAG_ARGS} \ 111 | --file docker/oss/Dockerfile \ 112 | --pull \ 113 | --push \ 114 | . 115 | """ 116 | 117 | sh """ 118 | docker buildx build \ 119 | --platform linux/amd64,linux/arm64/v8 \ 120 | --no-cache \ 121 | --build-arg GRAYLOG_VERSION=\$(./release.py --get-graylog-version) \ 122 | --build-arg BUILD_DATE=\$(date -u +\"%Y-%m-%dT%H:%M:%SZ\") \ 123 | --build-arg VCS_REF=\$(git rev-parse HEAD) \ 124 | ${TAG_ARGS_DATANODE} \ 125 | --file docker/datanode/Dockerfile \ 126 | --pull \ 127 | --push \ 128 | . 129 | """ 130 | 131 | sh """ 132 | docker buildx build \ 133 | --platform linux/amd64,linux/arm64/v8 \ 134 | --no-cache \ 135 | --build-arg GRAYLOG_VERSION=\$(./release.py --get-graylog-version) \ 136 | --build-arg BUILD_DATE=\$(date -u +\"%Y-%m-%dT%H:%M:%SZ\") \ 137 | --build-arg VCS_REF=\$(git rev-parse HEAD) \ 138 | ${TAG_ARGS_ENTERPRISE} \ 139 | --file docker/enterprise/Dockerfile \ 140 | --pull \ 141 | --push \ 142 | . 143 | """ 144 | } 145 | } 146 | 147 | // Building the Forwarder (always a "forwarder-" tag suffix) 148 | if (TAG_NAME =~ /forwarder-.*/) 149 | { 150 | PARSED_VERSION = parse_forwarder_version(TAG_NAME) 151 | MAJOR = PARSED_VERSION[0] 152 | MINOR = PARSED_VERSION[1] 153 | CLEAN_TAG = TAG_NAME.replaceFirst("^forwarder-", "") 154 | echo "MAJOR: ${MAJOR}" 155 | echo "MINOR: ${MINOR}" 156 | 157 | IMAGE_NAME = "graylog/graylog-forwarder" 158 | 159 | TAG_ARGS = "--tag ${IMAGE_NAME}:${CLEAN_TAG}" 160 | 161 | if (TAG_NAME =~ /^forwarder-\d+.\d+-\d+$/) 162 | { 163 | // If we build a GA release (no alpha/beta/rc), we also add 164 | // the simple version tag. 165 | TAG_ARGS += " --tag ${IMAGE_NAME}:${MAJOR}.${MINOR}" 166 | } 167 | 168 | docker.withRegistry('', 'docker-hub') 169 | { 170 | sh 'docker run --rm --privileged multiarch/qemu-user-static --reset -p yes' 171 | sh 'docker buildx create --name multiarch --driver docker-container --use | true' 172 | sh 'docker buildx inspect --bootstrap' 173 | sh """ 174 | docker buildx build \ 175 | --platform linux/amd64,linux/arm64/v8 \ 176 | --no-cache \ 177 | --build-arg GRAYLOG_FORWARDER_VERSION=\$(./release.py --get-forwarder-version) \ 178 | --build-arg GRAYLOG_FORWARDER_IMAGE_VERSION=\$(./release.py --get-forwarder-image-version) \ 179 | --build-arg BUILD_DATE=\$(date -u +\"%Y-%m-%dT%H:%M:%SZ\") \ 180 | --build-arg VCS_REF=\$(git rev-parse HEAD) \ 181 | ${TAG_ARGS} \ 182 | --file docker/forwarder/Dockerfile \ 183 | --pull \ 184 | --push \ 185 | . 186 | """ 187 | } 188 | } 189 | } 190 | } 191 | } 192 | } 193 | 194 | post 195 | { 196 | always 197 | { 198 | cleanWs() 199 | } 200 | } 201 | } 202 | 203 | // Parse a string containing a semantic version 204 | def parse_version(version) 205 | { 206 | if (version) 207 | { 208 | def pattern = /^([4-9]|\d\{2,\}+).([0-9]+).([0-9]+)-?.*$/ 209 | def matcher = java.util.regex.Pattern.compile(pattern).matcher(version) 210 | 211 | if (matcher.find()) { 212 | return [matcher.group(1), matcher.group(2), matcher.group(3)] 213 | } else { 214 | return null 215 | } 216 | } 217 | else 218 | { 219 | return null 220 | } 221 | } 222 | 223 | // Parse a string containing the forwarder version 224 | def parse_forwarder_version(version) 225 | { 226 | if (version) 227 | { 228 | // Matches the following version patterns: 229 | // 230 | // forwarder-4.8-1 231 | // forwarder-4.8-rc.1-1 232 | def pattern = /^forwarder-([0-9]+).([0-9]+)-?(?:[^-]+)?-([0-9]+)$/ 233 | def matcher = java.util.regex.Pattern.compile(pattern).matcher(version) 234 | 235 | if (matcher.find()) { 236 | return [matcher.group(1), matcher.group(2), matcher.group(3)] 237 | } else { 238 | return null 239 | } 240 | } 241 | else 242 | { 243 | return null 244 | } 245 | } 246 | -------------------------------------------------------------------------------- /patches/graylog-server.conf.patch: -------------------------------------------------------------------------------- 1 | --- graylog.conf.orig 2023-10-24 14:55:33.695355185 +0200 2 | +++ graylog.conf 2023-10-24 14:56:24.631974899 +0200 3 | @@ -707,6 +707,9 @@ 4 | # and allows administrators to control where in the file system Graylog users can select files from. 5 | #allowed_auxiliary_paths = /etc/graylog/data-files,/etc/custom-allowed-path 6 | 7 | +# An absolute path where scripts are permitted to be executed from. 8 | +integrations_scripts_dir = /usr/share/graylog/data/scripts 9 | + 10 | # Do not perform any preflight checks when starting Graylog 11 | # Default: false 12 | #skip_preflight_checks = false 13 | -------------------------------------------------------------------------------- /release.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import yaml 4 | import argparse 5 | import sys 6 | 7 | parser = argparse.ArgumentParser( 8 | description='Release utility for the Graylog Docker image.') 9 | parser.add_argument('--get-graylog-version', 10 | help="Get Graylog image version.", action='store_true') 11 | parser.add_argument('--get-forwarder-version', 12 | help="Get Forwarder version.", action='store_true') 13 | parser.add_argument('--get-forwarder-image-version', 14 | help="Get Forwarder image version.", action='store_true') 15 | parser.add_argument('--generate-readme', 16 | help="Generate a new README.md with the latest tags", action='store_true') 17 | parser.add_argument('--bump', dest='bump', 18 | choices=['graylog', 'forwarder'], help="Bump the given version") 19 | parser.add_argument('--version', dest='version', 20 | help="The new version and revision") 21 | 22 | if len(sys.argv) == 1: 23 | parser.print_help(sys.stderr) 24 | sys.exit(1) 25 | 26 | args = parser.parse_args() 27 | 28 | if args.bump and not args.version: 29 | parser.error('Missing --version parameter') 30 | 31 | version_parsed = None 32 | 33 | with open('version.yml', 'r') as version_file: 34 | version_parsed = yaml.safe_load(version_file) 35 | 36 | if args.get_graylog_version: 37 | print(str(version_parsed['graylog']['major_version']) + '.' + str(version_parsed['graylog'] 38 | ['minor_version']) + '.' + str(version_parsed['graylog']['patch_version']), end='') 39 | 40 | if args.get_forwarder_version: 41 | print(str(version_parsed['forwarder']['version']), end='') 42 | 43 | if args.get_forwarder_image_version: 44 | print(str(version_parsed['forwarder']['version']) + '-' + 45 | str(version_parsed['forwarder']['release']), end='') 46 | 47 | if args.generate_readme: 48 | from jinja2 import Template 49 | with open('README.j2', 'r') as template_file: 50 | j2_template = Template(template_file.read()) 51 | 52 | with open("README.md", "w") as readme_file: 53 | readme_file.write(j2_template.render(version_parsed)) 54 | 55 | if args.bump == 'graylog': 56 | print(f'Bumping {args.bump} to {args.version}') 57 | 58 | # 6.0.0-alpha.1-1 => version="6.0.0", suffixes=["alpha.1", "1"] 59 | # 6.0.0-1 => version="6.0.0", suffixes=["1"] 60 | version, *suffixes = args.version.split('-', 2) 61 | # 6.0.0 => major=6, minor=0, patch= 0 62 | major, minor, patch = version.split('.', 2) 63 | 64 | suffix = None 65 | release = None 66 | if len(suffixes) == 0: 67 | raise RuntimeError("Missing revision suffix!") 68 | elif len(suffixes) > 1: 69 | suffix = suffixes[0] 70 | release = suffixes[1] 71 | else: 72 | suffix = None 73 | release = suffixes[0] 74 | 75 | version_parsed[args.bump]['major_version'] = major 76 | version_parsed[args.bump]['minor_version'] = minor 77 | version_parsed[args.bump]['patch_version'] = f'{patch}-{suffix}' if suffix else patch 78 | version_parsed[args.bump]['release'] = int(release) 79 | 80 | print(version_parsed[args.bump]) 81 | if args.bump == 'forwarder': 82 | print(f'Bumping {args.bump} to {args.version}') 83 | 84 | # 6.0-alpha.1-1 => version="6.0", suffixes=["alpha.1", "1"] 85 | # 6.0-1 => version="6.0", suffixes=["1"] 86 | version, *suffixes = args.version.split('-', 2) 87 | 88 | suffix = None 89 | release = None 90 | if len(suffixes) == 0: 91 | raise RuntimeError("Missing revision suffix!") 92 | elif len(suffixes) > 1: 93 | suffix = suffixes[0] 94 | release = suffixes[1] 95 | else: 96 | suffix = None 97 | release = suffixes[0] 98 | 99 | version_parsed[args.bump]['version'] = f'{version}-{suffix}' if suffix else version 100 | version_parsed[args.bump]['release'] = int(release) 101 | 102 | print(version_parsed[args.bump]) 103 | 104 | 105 | if version_parsed and args.bump: 106 | with open('version.yml', 'w') as f: 107 | yaml.dump(version_parsed, f, sort_keys=False) 108 | 109 | with open('version.yml', 'r+') as f: 110 | content = f.read() 111 | f.seek(0, 0) 112 | # Preserve some comments 113 | f.write('# For pre-releases: patch_version=0-beta.1, patch_version=0-rc.1\n') 114 | f.write('# For GA releases: patch_version=0\n') 115 | f.write(content) 116 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Jinja2 2 | PyYAML 3 | -------------------------------------------------------------------------------- /test/docker-compose.tpl: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | # MongoDB: https://hub.docker.com/_/mongo/ 4 | mongodb: 5 | image: mongo:5.0 6 | mem_limit: 128m 7 | 8 | opensearch: 9 | image: "opensearchproject/opensearch:1.3.6" 10 | environment: 11 | - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m -Dlog4j2.formatMsgNoLookups=true" 12 | - "discovery.type=single-node" 13 | - "logger.deprecation.level=warn" 14 | - "action.auto_create_index=false" 15 | - "bootstrap.memory_lock=true" 16 | - "plugins.security.ssl.http.enabled=false" 17 | - "plugins.security.disabled=true" 18 | ulimits: 19 | memlock: 20 | soft: -1 21 | hard: -1 22 | mem_limit: 1g 23 | 24 | graylog: 25 | build: 26 | context: .. 27 | dockerfile: docker/oss/Dockerfile 28 | args: 29 | - VCS_REF 30 | - GRAYLOG_VERSION 31 | - DOWNLOAD_URL 32 | entrypoint: /usr/bin/tini -- wait-for-it opensearch:9200 -- /docker-entrypoint.sh 33 | environment: 34 | # CHANGE ME! 35 | - GRAYLOG_PASSWORD_SECRET=somepasswordpepper 36 | # Password: admin 37 | - GRAYLOG_ROOT_PASSWORD_SHA2=8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918 38 | - GRAYLOG_MESSAGE_JOURNAL_ENABLED=false 39 | - GRAYLOG_NODE_ID_FILE=/usr/share/graylog/data/config/node-id 40 | - GRAYLOG_ELASTICSEARCH_HOSTS=http://opensearch:9200/ 41 | - GRAYLOG_MONGODB_URI=mongodb://mongodb:27017/graylog 42 | # - GRAYLOG_HTTP_EXTERNAL_URI=http://127.0.0.1:9000/ 43 | mem_limit: 1g 44 | ports: 45 | # Graylog web interface and REST API 46 | - 9000:9000 47 | # Syslog TCP input 48 | - 514:514 49 | # Raw/Plaintext input 50 | - 5555:5555 51 | restart: always 52 | depends_on: 53 | - opensearch 54 | - mongodb 55 | -------------------------------------------------------------------------------- /test/input-raw-tcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "configuration" : { 3 | "bind_address" : "0.0.0.0", 4 | "port" : 5555 5 | }, 6 | "title" : "Raw/Plaintext TCP", 7 | "type" : "org.graylog2.inputs.raw.tcp.RawTCPInput", 8 | "global" : true 9 | } 10 | -------------------------------------------------------------------------------- /test/input-syslog-tcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "type" : "org.graylog2.inputs.syslog.tcp.SyslogTCPInput", 3 | "global" : true, 4 | "configuration" : { 5 | "port" : 514, 6 | "bind_address" : "0.0.0.0" 7 | }, 8 | "title" : "Syslog TCP" 9 | } 10 | -------------------------------------------------------------------------------- /test/integration_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ux 4 | 5 | GRAYLOG_PORT=9000 6 | 7 | CREDENTIALS="admin:admin" 8 | URL="http://127.0.0.1:9000" 9 | 10 | TEST_DIR="$(dirname "${0}")" 11 | 12 | # ------------------------------------------------------------------------------------------------- 13 | 14 | finish() { 15 | rv=$? 16 | if [ ${rv} -gt 0 ] 17 | then 18 | echo -e "\033[38;5;202m\033[1mexit with signal '${rv}'\033[0m" 19 | 20 | docker-compose down 21 | fi 22 | 23 | rm -f test/*_result.json 2> /dev/null 24 | 25 | exit $rv 26 | } 27 | 28 | trap finish SIGINT SIGTERM INT TERM EXIT 29 | 30 | # ------------------------------------------------------------------------------------------------- 31 | 32 | 33 | 34 | if ! command -v docker-compose; then 35 | docker-compose() { 36 | docker compose "$@" 37 | } 38 | fi 39 | 40 | JQ_VERSION='1.6' 41 | JQ_PATH='/usr/local/bin/jq' 42 | if ! [ -x "$(command -v jq)" ]; then 43 | sudo wget -O "${JQ_PATH}" "https://github.com/stedolan/jq/releases/download/jq-${JQ_VERSION}/jq-linux64" 44 | sudo chmod +x "${JQ_PATH}" 45 | fi 46 | 47 | NC=$(command -v ncat) 48 | NC_OPTS="-z" 49 | 50 | if [[ -z "${NC}" ]] 51 | then 52 | NC=$(command -v nc) 53 | NC_OPTS= 54 | fi 55 | 56 | get_snapshot_url() { 57 | curl -fsSL -G -d artifact=graylog -d limit=1 https://downloads.graylog.org/nightly-builds | \ 58 | jq -r '.artifacts[0].url' 59 | } 60 | 61 | compose_up() { 62 | local graylog_version="$(cd .. && ./release.py --get-graylog-version)" 63 | 64 | echo "Using Graylog version: $graylog_version" 65 | cat << EOF > .env 66 | VCS_REF=$(git rev-parse --short HEAD) 67 | GRAYLOG_VERSION=$graylog_version 68 | EOF 69 | 70 | if [[ "$graylog_version" =~ SNAPSHOT ]]; then 71 | local snapshot_url="$(get_snapshot_url)" 72 | echo "Using snapshot: $snapshot_url" 73 | echo "DOWNLOAD_URL=$snapshot_url" >> .env 74 | fi 75 | 76 | docker-compose --file docker-compose.tpl config > ./docker-compose.yml 77 | docker-compose down -v 78 | docker-compose build --pull 79 | docker-compose up -d --remove-orphans 80 | } 81 | 82 | compose_down() { 83 | 84 | # Shutdown 85 | docker-compose down -v 86 | } 87 | 88 | wait_for_port() { 89 | 90 | echo "wait for graylog port ${GRAYLOG_PORT}" 91 | 92 | RETRY=40 93 | until [[ ${RETRY} -le 0 ]] 94 | do 95 | timeout 1 bash -c "cat < /dev/null > /dev/tcp/127.0.0.1/${GRAYLOG_PORT}" 2> /dev/null 96 | if [ $? -eq 0 ] 97 | then 98 | break 99 | else 100 | sleep 3s 101 | RETRY=$(expr ${RETRY} - 1) 102 | fi 103 | done 104 | 105 | if [[ $RETRY -le 0 ]] 106 | then 107 | echo "could not connect to the graylog instance" 108 | exit 1 109 | fi 110 | } 111 | 112 | 113 | wait_for_application() { 114 | 115 | echo 'Waiting until Graylog has been started' 116 | 117 | RETRY=0 118 | until curl --silent --head "${URL}" || [[ $RETRY -ge 10 ]] 119 | do 120 | RETRY=$((RETRY + 1)) 121 | if [[ $RETRY -le 10 ]]; then 122 | echo "Waiting 10s until Graylog has been started - retry #$RETRY" 123 | fi 124 | sleep 10s 125 | done 126 | 127 | curl \ 128 | --silent \ 129 | --user "${CREDENTIALS}" \ 130 | --header 'Accept: application/json' \ 131 | "${URL}/api/?pretty=true" 132 | 133 | sleep 2s 134 | } 135 | 136 | 137 | cluster_state() { 138 | 139 | echo -e "\nget cluster state with session" 140 | code=$(curl \ 141 | --silent \ 142 | --request POST \ 143 | --header 'Accept: application/json' \ 144 | --header 'Content-Type: application/json' \ 145 | --header 'X-Requested-By: cli' \ 146 | --output session_result.json \ 147 | --write-out '%{http_code}\n' \ 148 | --data '{"username":"admin", "password":"admin", "host":""}' \ 149 | "${URL}/api/system/sessions") 150 | 151 | result=${?} 152 | 153 | if [ ${result} -eq 0 ] && [ ${code} -eq 200 ] || [ ${code} -eq 201 ] 154 | then 155 | session_id=$(jq --raw-output '.session_id' session_result.json) 156 | 157 | curl \ 158 | --silent \ 159 | --user "${session_id}:session" \ 160 | --header 'Accept: application/json' \ 161 | "${URL}/api/cluster?pretty=true" 162 | 163 | else 164 | echo "code: ${code}" 165 | cat session_result.json 166 | jq --raw-output '.message' session_result.json 2> /dev/null 167 | fi 168 | 169 | } 170 | 171 | create_roles() { 172 | 173 | echo -e "\ncreate permissions to create dashboards" 174 | code=$(curl \ 175 | --silent \ 176 | --user "${CREDENTIALS}" \ 177 | --header 'Accept: application/json' \ 178 | --header 'Content-Type: application/json' \ 179 | --header 'X-Requested-By: cli' \ 180 | --output permissions_result.json \ 181 | --write-out '%{http_code}\n' \ 182 | --data @permissions-dashboard.json \ 183 | "${URL}/api/roles") 184 | 185 | result=${?} 186 | 187 | if [ ${result} -eq 0 ] && [ ${code} -eq 200 ] || [ ${code} -eq 201 ] 188 | then 189 | echo "successful" 190 | else 191 | echo "code: ${code}" 192 | cat permissions_result.json 193 | jq --raw-output '.message' permissions_result.json 2> /dev/null 194 | fi 195 | 196 | rm -f permissions_result.json 197 | } 198 | 199 | create_input_streams() { 200 | 201 | echo -e "\nimport input stream for plaintext" 202 | # Create Raw/Plaintext TCP input 203 | code=$(curl \ 204 | --silent \ 205 | --user "${CREDENTIALS}" \ 206 | --header 'Accept: application/json' \ 207 | --header 'Content-Type: application/json' \ 208 | --header 'X-Requested-By: curl' \ 209 | --output input_plaintext_result.json \ 210 | --data @input-raw-tcp.json \ 211 | --write-out '%{http_code}\n' \ 212 | "${URL}/api/system/inputs?pretty=true") 213 | 214 | if [ ${result} -eq 0 ] && [ ${code} -eq 200 ] || [ ${code} -eq 201 ] 215 | then 216 | echo "successful" 217 | else 218 | echo "code: ${code}" 219 | cat input_plaintext_result.json 220 | jq --raw-output '.message' input_plaintext_result.json 2> /dev/null 221 | fi 222 | 223 | echo -e "\nimport input stream for syslog" 224 | # Create Syslog TCP input 225 | code=$(curl \ 226 | --silent \ 227 | --user "${CREDENTIALS}" \ 228 | --header 'Accept: application/json' \ 229 | --header 'Content-Type: application/json' \ 230 | --header 'X-Requested-By: curl' \ 231 | --output input_syslog_result.json \ 232 | --write-out '%{http_code}\n' \ 233 | --data @input-syslog-tcp.json \ 234 | "${URL}/api/system/inputs?pretty=true") 235 | 236 | if [ ${result} -eq 0 ] && [ ${code} -eq 200 ] || [ ${code} -eq 201 ] 237 | then 238 | echo "successful" 239 | else 240 | echo "code: ${code}" 241 | cat input_syslog_result.json 242 | jq --raw-output '.message' input_plaintext_result.json 2> /dev/null 243 | fi 244 | 245 | sleep 5 246 | } 247 | 248 | send_messages() { 249 | 250 | echo -e "\nsend message to RAW input stream" 251 | 252 | # Send message to Raw/Plaintext TCP input 253 | echo 'plaintext' | nc -w5 127.0.0.1 5555 254 | 255 | echo -e "send message to syslog input stream" 256 | # Send message to Syslog TCP input 257 | echo '<0>1 2018-07-04T12:00:00.000Z test.example.com test - - - syslog' | nc -w5 127.0.0.1 514 258 | 259 | sleep 5s 260 | } 261 | 262 | validate_messages() { 263 | 264 | echo -e "\ncheck received messages" 265 | # Check messages received by Raw/Plaintext TCP input 266 | TOTAL_MESSAGES=$(curl \ 267 | --silent \ 268 | --user "${CREDENTIALS}" \ 269 | --header 'Accept: application/json' \ 270 | "${URL}/api/search/universal/relative/?pretty=true&query=plaintext&range=0" | jq '.total_results') 271 | 272 | echo "plaintext messages found: '${TOTAL_MESSAGES}'" 273 | 274 | if [ "${TOTAL_MESSAGES}" -ne 1 ] 275 | then 276 | echo "Expected to find 1 message from Raw/Plaintext TCP input" 277 | exit 1 278 | fi 279 | 280 | # Check messages received by Syslog TCP input 281 | TOTAL_MESSAGES=$(curl \ 282 | --silent \ 283 | --user "${CREDENTIALS}" \ 284 | --header 'Accept: application/json' \ 285 | "${URL}/api/search/universal/relative/?pretty=true&query=syslog&range=0" | jq '.total_results') 286 | 287 | echo "syslog messages found: '${TOTAL_MESSAGES}'" 288 | 289 | if [ "${TOTAL_MESSAGES}" -ne 1 ] 290 | then 291 | echo "Expected to find 1 message from Syslog TCP input" 292 | exit 1 293 | fi 294 | 295 | echo "" 296 | } 297 | 298 | inspect() { 299 | 300 | echo "" 301 | echo "inspect needed containers" 302 | for d in $(docker ps | tail -n +2 | awk '{print($1)}') 303 | do 304 | # docker inspect --format "{{lower .Name}}" ${d} 305 | c=$(docker inspect --format '{{with .State}} {{$.Name}} has pid {{.Pid}} {{end}}' ${d}) 306 | s=$(docker inspect --format '{{json .State.Health }}' ${d} | jq --raw-output .Status) 307 | 308 | printf "%-40s - %s\n" "${c}" "${s}" 309 | done 310 | } 311 | 312 | run() { 313 | 314 | pushd test > /dev/null 315 | 316 | compose_up 317 | 318 | wait_for_port 319 | wait_for_application 320 | inspect 321 | cluster_state 322 | create_roles 323 | create_input_streams 324 | send_messages 325 | validate_messages 326 | 327 | compose_down 328 | 329 | popd > /dev/null 330 | } 331 | 332 | run 333 | 334 | exit 0 335 | -------------------------------------------------------------------------------- /test/linter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | 4 | HADOLINT_VERSION='2.10.0' 5 | 6 | hadolint() { 7 | docker run --rm -i hadolint/hadolint:$HADOLINT_VERSION < $1 8 | } 9 | 10 | # lint all dockerfiles 11 | hadolint docker/oss/Dockerfile 12 | hadolint docker/enterprise/Dockerfile 13 | hadolint docker/forwarder/Dockerfile 14 | 15 | shellcheck \ 16 | --external-sources \ 17 | --exclude=SC2086,SC2236 \ 18 | *.sh 19 | -------------------------------------------------------------------------------- /test/permissions-dashboard.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Dashboard Access", 3 | "description": "Dashboard Access", 4 | "permissions": [ 5 | "dashboards:*", 6 | "dashboards:create:*" 7 | ], 8 | "read_only": false 9 | } 10 | -------------------------------------------------------------------------------- /version.yml: -------------------------------------------------------------------------------- 1 | # For pre-releases: patch_version=0-beta.1, patch_version=0-rc.1 2 | # For GA releases: patch_version=0 3 | graylog: 4 | major_version: '6' 5 | minor_version: '2' 6 | patch_version: 0-SNAPSHOT 7 | release: 1 8 | forwarder: 9 | version: '6.1' 10 | release: 1 11 | --------------------------------------------------------------------------------