├── logstash ├── config │ ├── logstash-oss.yml │ ├── logstash-full.yml │ ├── pipelines.yml │ ├── log4j2.properties │ └── log4j2.file.properties ├── env2yaml │ ├── env2yaml-amd64 │ └── env2yaml-arm64 ├── pipeline │ └── default.conf ├── bin │ └── docker-entrypoint └── Dockerfile ├── elasticsearch ├── config │ ├── elasticsearch.yml │ └── log4j2.properties ├── bin │ ├── docker-openjdk │ └── docker-entrypoint.sh └── Dockerfile ├── kibana ├── config │ └── kibana.yml ├── Dockerfile └── bin │ └── kibana-docker ├── .github └── workflows │ ├── kibana.yml │ ├── logstash.yml │ ├── elasticsearch.yml │ └── pr_auto_approval.yml └── README.md /logstash/config/logstash-oss.yml: -------------------------------------------------------------------------------- 1 | http.host: "0.0.0.0" 2 | -------------------------------------------------------------------------------- /elasticsearch/config/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | cluster.name: "docker-cluster" 2 | network.host: 0.0.0.0 3 | -------------------------------------------------------------------------------- /logstash/env2yaml/env2yaml-amd64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/dockerfiles/HEAD/logstash/env2yaml/env2yaml-amd64 -------------------------------------------------------------------------------- /logstash/env2yaml/env2yaml-arm64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/dockerfiles/HEAD/logstash/env2yaml/env2yaml-arm64 -------------------------------------------------------------------------------- /logstash/config/logstash-full.yml: -------------------------------------------------------------------------------- 1 | http.host: "0.0.0.0" 2 | xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] 3 | -------------------------------------------------------------------------------- /logstash/pipeline/default.conf: -------------------------------------------------------------------------------- 1 | input { 2 | beats { 3 | port => 5044 4 | } 5 | } 6 | 7 | output { 8 | stdout { 9 | codec => rubydebug 10 | } 11 | } 12 | 13 | -------------------------------------------------------------------------------- /kibana/config/kibana.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ** THIS IS AN AUTO-GENERATED FILE ** 3 | # 4 | 5 | # Default Kibana configuration for docker target 6 | server.host: "0.0.0.0" 7 | server.shutdownTimeout: "5s" 8 | elasticsearch.hosts: [ "http://elasticsearch:9200" ] 9 | monitoring.ui.container.elasticsearch.enabled: true -------------------------------------------------------------------------------- /logstash/config/pipelines.yml: -------------------------------------------------------------------------------- 1 | # This file is where you define your pipelines. You can define multiple. 2 | # For more information on multiple pipelines, see the documentation: 3 | # https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html 4 | 5 | - pipeline.id: main 6 | path.config: "/usr/share/logstash/pipeline" 7 | -------------------------------------------------------------------------------- /elasticsearch/bin/docker-openjdk: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -Eeuo pipefail 4 | 5 | # Update "cacerts" bundle to use Ubuntu's CA certificates (and make sure it 6 | # stays up-to-date with changes to Ubuntu's store) 7 | 8 | trust extract \ 9 | --overwrite \ 10 | --format=java-cacerts \ 11 | --filter=ca-anchors \ 12 | --purpose=server-auth \ 13 | /usr/share/elasticsearch/jdk/lib/security/cacerts 14 | -------------------------------------------------------------------------------- /.github/workflows/kibana.yml: -------------------------------------------------------------------------------- 1 | name: Kibana Docker Image CI 2 | 3 | on: 4 | push: 5 | branches: [ "7.16", "7.17", "8.0", "8.1", "8.2", "8.3", "8.4", "8.5"] 6 | pull_request: 7 | branches: [ "7.16", "7.17", "8.0", "8.1", "8.2", "8.3", "8.4", "8.5"] 8 | 9 | jobs: 10 | 11 | build: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | 18 | - name: Build the Kibana Docker image 19 | run: docker build kibana 20 | -------------------------------------------------------------------------------- /.github/workflows/logstash.yml: -------------------------------------------------------------------------------- 1 | name: Logstash Docker Image CI 2 | 3 | on: 4 | push: 5 | branches: [ "7.16", "7.17", "8.0", "8.1", "8.2", "8.3", "8.4", "8.5"] 6 | pull_request: 7 | branches: [ "7.16", "7.17", "8.0", "8.1", "8.2", "8.3", "8.4", "8.5"] 8 | 9 | jobs: 10 | 11 | build: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | 18 | - name: Build the Logstash Docker image 19 | run: docker build logstash 20 | -------------------------------------------------------------------------------- /.github/workflows/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | name: Elasticsearch Docker Image CI 2 | 3 | on: 4 | push: 5 | branches: [ "7.16", "7.17", "8.0", "8.1", "8.2", "8.3", "8.4", "8.5"] 6 | pull_request: 7 | branches: [ "7.16", "7.17", "8.0", "8.1", "8.2", "8.3", "8.4", "8.5"] 8 | 9 | jobs: 10 | 11 | build: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | 18 | - name: Build the Elasticsearch Docker image 19 | run: docker build elasticsearch 20 | -------------------------------------------------------------------------------- /logstash/config/log4j2.properties: -------------------------------------------------------------------------------- 1 | status = error 2 | name = LogstashPropertiesConfig 3 | 4 | appender.console.type = Console 5 | appender.console.name = plain_console 6 | appender.console.layout.type = PatternLayout 7 | appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n 8 | 9 | appender.json_console.type = Console 10 | appender.json_console.name = json_console 11 | appender.json_console.layout.type = JSONLayout 12 | appender.json_console.layout.compact = true 13 | appender.json_console.layout.eventEol = true 14 | 15 | rootLogger.level = ${sys:ls.log.level} 16 | rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console 17 | -------------------------------------------------------------------------------- /.github/workflows/pr_auto_approval.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | types: 4 | - opened 5 | branches: 6 | - '*' 7 | 8 | jobs: 9 | approve: 10 | name: Auto-approve docker push pr 11 | runs-on: ubuntu-latest 12 | if: | 13 | startsWith(github.event.pull_request.head.ref, 'docker_files_push_') && 14 | github.event.pull_request.user.login == 'elastic-vault-github-plugin-prod[bot]' 15 | permissions: 16 | pull-requests: write 17 | contents: write 18 | 19 | steps: 20 | - name: Debug PR info 21 | run: | 22 | echo "PR Head Ref: ${{ github.event.pull_request.head.ref }}" 23 | echo "PR User Login: ${{ github.event.pull_request.user.login }}" 24 | - name: Auto Approve 25 | uses: hmarr/auto-approve-action@v3 -------------------------------------------------------------------------------- /logstash/bin/docker-entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Map environment variables to entries in logstash.yml. 4 | # Note that this will mutate logstash.yml in place if any such settings are found. 5 | # This may be undesirable, especially if logstash.yml is bind-mounted from the 6 | # host system. 7 | env2yaml /usr/share/logstash/config/logstash.yml 8 | 9 | if [[ -n "$LOG_STYLE" ]]; then 10 | case "$LOG_STYLE" in 11 | console) 12 | # This is the default. Nothing to do. 13 | ;; 14 | file) 15 | # Overwrite the default config with the stack config. Do this as a 16 | # copy, not a move, in case the container is restarted. 17 | cp -f /usr/share/logstash/config/log4j2.file.properties /usr/share/logstash/config/log4j2.properties 18 | ;; 19 | *) 20 | echo "ERROR: LOG_STYLE set to [$LOG_STYLE]. Expected [console] or [file]" >&2 21 | exit 1 ;; 22 | esac 23 | fi 24 | 25 | export LS_JAVA_OPTS="-Dls.cgroup.cpuacct.path.override=/ -Dls.cgroup.cpu.path.override=/ $LS_JAVA_OPTS" 26 | 27 | if [[ -z $1 ]] || [[ ${1:0:1} == '-' ]] ; then 28 | exec logstash "$@" 29 | else 30 | exec "$@" 31 | fi 32 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Dockerfiles 2 | 3 | ## About this Repository 4 | 5 | This repo is used to store the Dockerfiles which can be used to build Docker images 6 | for each product released in the stack. Those Dockerfiles were generated from the 7 | products' own repositories for which you can get the links in the sections below. 8 | Please note that **issues are disabled on this repo** and that all issues and PRs 9 | must be filed in the products' repositories. 10 | 11 | ## Elasticsearch 12 | 13 | **Elasticsearch** is a distributed, RESTful search and analytics engine capable of 14 | solving a growing number of use cases. As the heart of the Elastic Stack, it 15 | centrally stores your data so you can discover the expected and uncover the 16 | unexpected. 17 | 18 | For more information about Elasticsearch, please visit 19 | https://www.elastic.co/products/elasticsearch. 20 | 21 | ### Where to file issues and PRs 22 | 23 | - [Issues](https://github.com/elastic/elasticsearch/issues) 24 | - [PRs](https://github.com/elastic/elasticsearch/pulls) 25 | 26 | ### Where to get help 27 | 28 | - [Elasticsearch Discuss Forums](https://discuss.elastic.co/c/elasticsearch) 29 | - [Elasticsearch Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html) 30 | 31 | ## Kibana 32 | 33 | **Kibana** lets you visualize your Elasticsearch data and navigate the Elastic Stack, 34 | so you can do anything from learning why you're getting paged at 2:00 a.m. to 35 | understanding the impact rain might have on your quarterly numbers. 36 | 37 | For more information about Kibana, please visit 38 | https://www.elastic.co/products/kibana. 39 | 40 | ### Where to file issues and PRs 41 | 42 | - [Issues](https://github.com/elastic/kibana/issues) 43 | - [PRs](https://github.com/elastic/kibana/pulls) 44 | 45 | 46 | ### Where to get help 47 | 48 | - [Kibana Discuss Forums](https://discuss.elastic.co/c/kibana) 49 | - [Kibana Documentation](https://www.elastic.co/guide/en/kibana/current/index.html) 50 | 51 | ## Logstash 52 | 53 | **Logstash** is a server-side data processing pipeline that ingests data from a 54 | multitude of sources simultaneously, transforms it, and then sends it to your 55 | favorite "stash." 56 | 57 | For more information about Logstash, please visit 58 | https://www.elastic.co/products/logstash. 59 | 60 | ### Where to file issues and PRs 61 | 62 | - [Issues](https://github.com/elastic/logstash/issues) 63 | - [PRs](https://github.com/elastic/logstash/pulls) 64 | 65 | ### Where to get help 66 | 67 | - [Logstash Discuss Forums](https://discuss.elastic.co/c/logstash) 68 | - [Logstash Documentation](https://www.elastic.co/guide/en/logstash/current/index.html) 69 | 70 | ## Still need help? 71 | 72 | You can learn more about the Elastic Community and also understand how to get more help 73 | visiting [Elastic Community](https://www.elastic.co/community). 74 | 75 | 76 | This software is governed by their applicable licenses, 77 | and includes the full set of [free 78 | features](https://www.elastic.co/subscriptions). 79 | 80 | View the detailed release notes 81 | [here](https://www.elastic.co/guide/en/elasticsearch/reference/8.17/es-release-notes.html). 82 | -------------------------------------------------------------------------------- /elasticsearch/bin/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Files created by Elasticsearch should always be group writable too 5 | umask 0002 6 | 7 | # Allow user specify custom CMD, maybe bin/elasticsearch itself 8 | # for example to directly specify `-E` style parameters for elasticsearch on k8s 9 | # or simply to run /bin/bash to check the image 10 | if [[ "$1" == "eswrapper" || $(basename "$1") == "elasticsearch" ]]; then 11 | # Rewrite CMD args to remove the explicit command, 12 | # so that we are backwards compatible with the docs 13 | # from the previous Elasticsearch versions < 6 14 | # and configuration option: 15 | # https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docker.html#_d_override_the_image_8217_s_default_ulink_url_https_docs_docker_com_engine_reference_run_cmd_default_command_or_options_cmd_ulink 16 | # Without this, user could specify `elasticsearch -E x.y=z` but 17 | # `bin/elasticsearch -E x.y=z` would not work. In any case, 18 | # we want to continue through this script, and not exec early. 19 | set -- "${@:2}" 20 | else 21 | # Run whatever command the user wanted 22 | exec "$@" 23 | fi 24 | 25 | # Allow environment variables to be set by creating a file with the 26 | # contents, and setting an environment variable with the suffix _FILE to 27 | # point to it. This can be used to provide secrets to a container, without 28 | # the values being specified explicitly when running the container. 29 | # 30 | # This is also sourced in elasticsearch-env, and is only needed here 31 | # as well because we use ELASTIC_PASSWORD below. Sourcing this script 32 | # is idempotent. 33 | source /usr/share/elasticsearch/bin/elasticsearch-env-from-file 34 | 35 | if [[ -f bin/elasticsearch-users ]]; then 36 | # Check for the ELASTIC_PASSWORD environment variable to set the 37 | # bootstrap password for Security. 38 | # 39 | # This is only required for the first node in a cluster with Security 40 | # enabled, but we have no way of knowing which node we are yet. We'll just 41 | # honor the variable if it's present. 42 | if [[ -n "$ELASTIC_PASSWORD" ]]; then 43 | [[ -f /usr/share/elasticsearch/config/elasticsearch.keystore ]] || (elasticsearch-keystore create) 44 | if ! (elasticsearch-keystore has-passwd --silent) ; then 45 | # keystore is unencrypted 46 | if ! (elasticsearch-keystore list | grep -q '^bootstrap.password$'); then 47 | (echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x 'bootstrap.password') 48 | fi 49 | else 50 | # keystore requires password 51 | if ! (echo "$KEYSTORE_PASSWORD" \ 52 | | elasticsearch-keystore list | grep -q '^bootstrap.password$') ; then 53 | COMMANDS="$(printf "%s\n%s" "$KEYSTORE_PASSWORD" "$ELASTIC_PASSWORD")" 54 | (echo "$COMMANDS" | elasticsearch-keystore add -x 'bootstrap.password') 55 | fi 56 | fi 57 | fi 58 | fi 59 | 60 | if [[ -n "$ES_LOG_STYLE" ]]; then 61 | case "$ES_LOG_STYLE" in 62 | console) 63 | # This is the default. Nothing to do. 64 | ;; 65 | file) 66 | # Overwrite the default config with the stack config. Do this as a 67 | # copy, not a move, in case the container is restarted. 68 | cp -f /usr/share/elasticsearch/config/log4j2.file.properties /usr/share/elasticsearch/config/log4j2.properties 69 | ;; 70 | *) 71 | echo "ERROR: ES_LOG_STYLE set to [$ES_LOG_STYLE]. Expected [console] or [file]" >&2 72 | exit 1 ;; 73 | esac 74 | fi 75 | 76 | if [[ -n "$ENROLLMENT_TOKEN" ]]; then 77 | POSITIONAL_PARAMETERS="--enrollment-token $ENROLLMENT_TOKEN" 78 | else 79 | POSITIONAL_PARAMETERS="" 80 | fi 81 | 82 | # Signal forwarding and child reaping is handled by `tini`, which is the 83 | # actual entrypoint of the container 84 | exec /usr/share/elasticsearch/bin/elasticsearch "$@" $POSITIONAL_PARAMETERS <<<"$KEYSTORE_PASSWORD" 85 | -------------------------------------------------------------------------------- /logstash/Dockerfile: -------------------------------------------------------------------------------- 1 | # This Dockerfile was generated from templates/Dockerfile.erb 2 | 3 | 4 | 5 | FROM ubuntu:20.04 6 | 7 | RUN for iter in {1..10}; do \ 8 | export DEBIAN_FRONTEND=noninteractive && \ 9 | apt-get update -y && \ 10 | apt-get upgrade -y && \ 11 | apt-get install -y procps findutils tar gzip && \ 12 | apt-get install -y locales && \ 13 | apt-get install -y curl && \ 14 | apt-get clean all && \ 15 | locale-gen 'en_US.UTF-8' && \ 16 | apt-get clean metadata && \ 17 | exit_code=0 && break || exit_code=$? && \ 18 | echo "packaging error: retry $iter in 10s" && \ 19 | apt-get clean all && \ 20 | apt-get clean metadata && \ 21 | sleep 10; done; \ 22 | (exit $exit_code) 23 | 24 | # Provide a non-root user to run the process. 25 | RUN groupadd --gid 1000 logstash && \ 26 | adduser --uid 1000 --gid 1000 --home /usr/share/logstash --no-create-home logstash 27 | 28 | # Add Logstash itself. 29 | RUN curl -Lo - https://artifacts.elastic.co/downloads/logstash/logstash-8.17.0-linux-$(arch).tar.gz | \ 30 | tar zxf - -C /usr/share && \ 31 | mv /usr/share/logstash-8.17.0 /usr/share/logstash && \ 32 | chown --recursive logstash:logstash /usr/share/logstash/ && \ 33 | chown -R logstash:root /usr/share/logstash && \ 34 | chmod -R g=u /usr/share/logstash && \ 35 | mkdir /licenses/ && \ 36 | mv /usr/share/logstash/NOTICE.TXT /licenses/NOTICE.TXT && \ 37 | mv /usr/share/logstash/LICENSE.txt /licenses/LICENSE.txt && \ 38 | find /usr/share/logstash -type d -exec chmod g+s {} \; && \ 39 | ln -s /usr/share/logstash /opt/logstash 40 | 41 | WORKDIR /usr/share/logstash 42 | ENV ELASTIC_CONTAINER true 43 | ENV PATH=/usr/share/logstash/bin:$PATH 44 | 45 | # Provide a minimal configuration, so that simple invocations will provide 46 | # a good experience. 47 | COPY config/logstash-full.yml config/logstash.yml 48 | COPY config/pipelines.yml config/log4j2.properties config/log4j2.file.properties config/ 49 | COPY pipeline/default.conf pipeline/logstash.conf 50 | 51 | RUN chown --recursive logstash:root config/ pipeline/ 52 | # Ensure Logstash gets the correct locale by default. 53 | ENV LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 54 | 55 | COPY env2yaml/env2yaml-amd64 env2yaml/env2yaml-arm64 env2yaml/ 56 | # Copy over the appropriate env2yaml artifact 57 | RUN env2yamlarch="$(dpkg --print-architecture)"; \ 58 | case "${env2yamlarch}" in \ 59 | 'x86_64'|'amd64') \ 60 | env2yamlarch=amd64; \ 61 | ;; \ 62 | 'aarch64'|'arm64') \ 63 | env2yamlarch=arm64; \ 64 | ;; \ 65 | *) echo >&2 "error: unsupported architecture '$env2yamlarch'"; exit 1 ;; \ 66 | esac; \ 67 | mkdir -p /usr/local/bin; \ 68 | cp env2yaml/env2yaml-${env2yamlarch} /usr/local/bin/env2yaml; \ 69 | rm -rf env2yaml 70 | # Place the startup wrapper script. 71 | COPY bin/docker-entrypoint /usr/local/bin/ 72 | 73 | RUN chmod 0755 /usr/local/bin/docker-entrypoint 74 | 75 | 76 | USER 1000 77 | 78 | EXPOSE 9600 5044 79 | 80 | LABEL org.label-schema.schema-version="1.0" \ 81 | org.label-schema.vendor="Elastic" \ 82 | org.opencontainers.image.vendor="Elastic" \ 83 | org.label-schema.name="logstash" \ 84 | org.opencontainers.image.title="logstash" \ 85 | org.label-schema.version="8.17.0" \ 86 | org.opencontainers.image.version="8.17.0" \ 87 | org.label-schema.url="https://www.elastic.co/products/logstash" \ 88 | org.label-schema.vcs-url="https://github.com/elastic/logstash" \ 89 | org.label-schema.license="Elastic License" \ 90 | org.opencontainers.image.licenses="Elastic License" \ 91 | org.opencontainers.image.description="Logstash is a free and open server-side data processing pipeline that ingests data from a multitude of sources, transforms it, and then sends it to your favorite 'stash.'" \ 92 | org.label-schema.build-date=2024-12-05T00:55:38+00:00 \ 93 | org.opencontainers.image.created=2024-12-05T00:55:38+00:00 94 | 95 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint"] 96 | -------------------------------------------------------------------------------- /kibana/Dockerfile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # This Dockerfile was generated from the template at: 3 | # src/dev/build/tasks/os_packages/docker_generator/templates/Dockerfile 4 | # 5 | # Beginning of multi stage Dockerfile 6 | ################################################################################ 7 | 8 | ################################################################################ 9 | # Build stage 0 `builder`: 10 | # Extract Kibana artifact 11 | ################################################################################ 12 | FROM ubuntu:20.04 AS builder 13 | 14 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y curl 15 | 16 | RUN cd /tmp && \ 17 | curl --retry 8 -s -L \ 18 | --output kibana.tar.gz \ 19 | https://artifacts.elastic.co/downloads/kibana/kibana-8.17.0-linux-$(arch).tar.gz && \ 20 | cd - 21 | 22 | RUN mkdir /usr/share/kibana 23 | WORKDIR /usr/share/kibana 24 | RUN tar \ 25 | --strip-components=1 \ 26 | -zxf /tmp/kibana.tar.gz 27 | 28 | # Ensure that group permissions are the same as user permissions. 29 | # This will help when relying on GID-0 to run Kibana, rather than UID-1000. 30 | # OpenShift does this, for example. 31 | # REF: https://docs.openshift.org/latest/creating_images/guidelines.html 32 | RUN chmod -R g=u /usr/share/kibana 33 | 34 | # Add an init process, check the checksum to make sure it's a match 35 | RUN set -e ; \ 36 | TINI_BIN="" ; \ 37 | case "$(arch)" in \ 38 | aarch64) \ 39 | TINI_BIN='tini-arm64' ; \ 40 | ;; \ 41 | x86_64) \ 42 | TINI_BIN='tini-amd64' ; \ 43 | ;; \ 44 | *) echo >&2 "Unsupported architecture $(arch)" ; exit 1 ;; \ 45 | esac ; \ 46 | TINI_VERSION='v0.19.0' ; \ 47 | curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}" ; \ 48 | curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}.sha256sum" ; \ 49 | sha256sum -c "${TINI_BIN}.sha256sum" ; \ 50 | rm "${TINI_BIN}.sha256sum" ; \ 51 | mv "${TINI_BIN}" /bin/tini ; \ 52 | chmod +x /bin/tini 53 | RUN mkdir -p /usr/share/fonts/local && \ 54 | curl --retry 8 -S -L -o /usr/share/fonts/local/NotoSansCJK-Regular.ttc https://github.com/googlefonts/noto-cjk/raw/NotoSansV2.001/NotoSansCJK-Regular.ttc && \ 55 | echo "5dcd1c336cc9344cb77c03a0cd8982ca8a7dc97d620fd6c9c434e02dcb1ceeb3 /usr/share/fonts/local/NotoSansCJK-Regular.ttc" | sha256sum -c - 56 | 57 | 58 | ################################################################################ 59 | # Build stage 1 (the actual Kibana image): 60 | # 61 | # Copy kibana from stage 0 62 | # Add entrypoint 63 | ################################################################################ 64 | FROM ubuntu:20.04 65 | EXPOSE 5601 66 | 67 | RUN for iter in {1..10}; do \ 68 | export DEBIAN_FRONTEND=noninteractive && \ 69 | apt-get update && \ 70 | apt-get upgrade -y && \ 71 | apt-get install -y --no-install-recommends \ 72 | fontconfig libnss3 curl ca-certificates && \ 73 | apt-get clean && \ 74 | rm -rf /var/lib/apt/lists/* && exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && \ 75 | sleep 10; \ 76 | done; \ 77 | (exit $exit_code) 78 | 79 | # Bring in Kibana from the initial stage. 80 | COPY --from=builder --chown=1000:0 /usr/share/kibana /usr/share/kibana 81 | COPY --from=builder --chown=0:0 /bin/tini /bin/tini 82 | # Load reporting fonts 83 | COPY --from=builder --chown=0:0 /usr/share/fonts/local/NotoSansCJK-Regular.ttc /usr/share/fonts/local/NotoSansCJK-Regular.ttc 84 | RUN fc-cache -v 85 | WORKDIR /usr/share/kibana 86 | RUN ln -s /usr/share/kibana /opt/kibana 87 | 88 | ENV ELASTIC_CONTAINER true 89 | ENV PATH=/usr/share/kibana/bin:$PATH 90 | 91 | # Set some Kibana configuration defaults. 92 | COPY --chown=1000:0 config/kibana.yml /usr/share/kibana/config/kibana.yml 93 | 94 | # Add the launcher/wrapper script. It knows how to interpret environment 95 | # variables and translate them to Kibana CLI options. 96 | COPY bin/kibana-docker /usr/local/bin/ 97 | 98 | # Ensure gid 0 write permissions for OpenShift. 99 | RUN chmod g+ws /usr/share/kibana && \ 100 | find /usr/share/kibana -gid 0 -and -not -perm /g+w -exec chmod g+w {} \; 101 | 102 | # Remove the suid bit everywhere to mitigate "Stack Clash" 103 | RUN find / -xdev -perm -4000 -exec chmod u-s {} + 104 | 105 | # Provide a non-root user to run the process. 106 | RUN groupadd --gid 1000 kibana && \ 107 | useradd --uid 1000 --gid 1000 -G 0 \ 108 | --home-dir /usr/share/kibana --no-create-home \ 109 | kibana 110 | 111 | LABEL org.label-schema.build-date="2024-12-11T11:12:31.173Z" \ 112 | org.label-schema.license="Elastic License" \ 113 | org.label-schema.name="Kibana" \ 114 | org.label-schema.schema-version="1.0" \ 115 | org.label-schema.url="https://www.elastic.co/products/kibana" \ 116 | org.label-schema.usage="https://www.elastic.co/guide/en/kibana/reference/index.html" \ 117 | org.label-schema.vcs-ref="86cbc85e621f4f3f701ed230f4e859ac5a80145b" \ 118 | org.label-schema.vcs-url="https://github.com/elastic/kibana" \ 119 | org.label-schema.vendor="Elastic" \ 120 | org.label-schema.version="8.17.0" \ 121 | org.opencontainers.image.created="2024-12-11T11:12:31.173Z" \ 122 | org.opencontainers.image.documentation="https://www.elastic.co/guide/en/kibana/reference/index.html" \ 123 | org.opencontainers.image.licenses="Elastic License" \ 124 | org.opencontainers.image.revision="86cbc85e621f4f3f701ed230f4e859ac5a80145b" \ 125 | org.opencontainers.image.source="https://github.com/elastic/kibana" \ 126 | org.opencontainers.image.title="Kibana" \ 127 | org.opencontainers.image.url="https://www.elastic.co/products/kibana" \ 128 | org.opencontainers.image.vendor="Elastic" \ 129 | org.opencontainers.image.version="8.17.0" 130 | 131 | 132 | ENTRYPOINT ["/bin/tini", "--"] 133 | 134 | 135 | CMD ["/usr/local/bin/kibana-docker"] 136 | 137 | 138 | USER 1000 139 | -------------------------------------------------------------------------------- /logstash/config/log4j2.file.properties: -------------------------------------------------------------------------------- 1 | status = error 2 | name = LogstashPropertiesConfig 3 | 4 | appender.console.type = Console 5 | appender.console.name = plain_console 6 | appender.console.layout.type = PatternLayout 7 | appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n 8 | 9 | appender.json_console.type = Console 10 | appender.json_console.name = json_console 11 | appender.json_console.layout.type = JSONLayout 12 | appender.json_console.layout.compact = true 13 | appender.json_console.layout.eventEol = true 14 | 15 | appender.rolling.type = RollingFile 16 | appender.rolling.name = plain_rolling 17 | appender.rolling.fileName = ${sys:ls.logs}/logstash-plain.log 18 | appender.rolling.filePattern = ${sys:ls.logs}/logstash-plain-%d{yyyy-MM-dd}-%i.log.gz 19 | appender.rolling.policies.type = Policies 20 | appender.rolling.policies.time.type = TimeBasedTriggeringPolicy 21 | appender.rolling.policies.time.interval = 1 22 | appender.rolling.policies.time.modulate = true 23 | appender.rolling.layout.type = PatternLayout 24 | appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n 25 | appender.rolling.policies.size.type = SizeBasedTriggeringPolicy 26 | appender.rolling.policies.size.size = 100MB 27 | appender.rolling.strategy.type = DefaultRolloverStrategy 28 | appender.rolling.strategy.max = 30 29 | appender.rolling.avoid_pipelined_filter.type = PipelineRoutingFilter 30 | 31 | appender.json_rolling.type = RollingFile 32 | appender.json_rolling.name = json_rolling 33 | appender.json_rolling.fileName = ${sys:ls.logs}/logstash-json.log 34 | appender.json_rolling.filePattern = ${sys:ls.logs}/logstash-json-%d{yyyy-MM-dd}-%i.log.gz 35 | appender.json_rolling.policies.type = Policies 36 | appender.json_rolling.policies.time.type = TimeBasedTriggeringPolicy 37 | appender.json_rolling.policies.time.interval = 1 38 | appender.json_rolling.policies.time.modulate = true 39 | appender.json_rolling.layout.type = JSONLayout 40 | appender.json_rolling.layout.compact = true 41 | appender.json_rolling.layout.eventEol = true 42 | appender.json_rolling.policies.size.type = SizeBasedTriggeringPolicy 43 | appender.json_rolling.policies.size.size = 100MB 44 | appender.json_rolling.strategy.type = DefaultRolloverStrategy 45 | appender.json_rolling.strategy.max = 30 46 | appender.json_rolling.avoid_pipelined_filter.type = PipelineRoutingFilter 47 | 48 | appender.routing.type = PipelineRouting 49 | appender.routing.name = pipeline_routing_appender 50 | appender.routing.pipeline.type = RollingFile 51 | appender.routing.pipeline.name = appender-${ctx:pipeline.id} 52 | appender.routing.pipeline.fileName = ${sys:ls.logs}/pipeline_${ctx:pipeline.id}.log 53 | appender.routing.pipeline.filePattern = ${sys:ls.logs}/pipeline_${ctx:pipeline.id}.%i.log.gz 54 | appender.routing.pipeline.layout.type = PatternLayout 55 | appender.routing.pipeline.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n 56 | appender.routing.pipeline.policy.type = SizeBasedTriggeringPolicy 57 | appender.routing.pipeline.policy.size = 100MB 58 | appender.routing.pipeline.strategy.type = DefaultRolloverStrategy 59 | appender.routing.pipeline.strategy.max = 30 60 | 61 | rootLogger.level = ${sys:ls.log.level} 62 | rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console 63 | rootLogger.appenderRef.rolling.ref = ${sys:ls.log.format}_rolling 64 | rootLogger.appenderRef.routing.ref = pipeline_routing_appender 65 | 66 | # Slowlog 67 | 68 | appender.console_slowlog.type = Console 69 | appender.console_slowlog.name = plain_console_slowlog 70 | appender.console_slowlog.layout.type = PatternLayout 71 | appender.console_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n 72 | 73 | appender.json_console_slowlog.type = Console 74 | appender.json_console_slowlog.name = json_console_slowlog 75 | appender.json_console_slowlog.layout.type = JSONLayout 76 | appender.json_console_slowlog.layout.compact = true 77 | appender.json_console_slowlog.layout.eventEol = true 78 | 79 | appender.rolling_slowlog.type = RollingFile 80 | appender.rolling_slowlog.name = plain_rolling_slowlog 81 | appender.rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-plain.log 82 | appender.rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-plain-%d{yyyy-MM-dd}-%i.log.gz 83 | appender.rolling_slowlog.policies.type = Policies 84 | appender.rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy 85 | appender.rolling_slowlog.policies.time.interval = 1 86 | appender.rolling_slowlog.policies.time.modulate = true 87 | appender.rolling_slowlog.layout.type = PatternLayout 88 | appender.rolling_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n 89 | appender.rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy 90 | appender.rolling_slowlog.policies.size.size = 100MB 91 | appender.rolling_slowlog.strategy.type = DefaultRolloverStrategy 92 | appender.rolling_slowlog.strategy.max = 30 93 | 94 | appender.json_rolling_slowlog.type = RollingFile 95 | appender.json_rolling_slowlog.name = json_rolling_slowlog 96 | appender.json_rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-json.log 97 | appender.json_rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-json-%d{yyyy-MM-dd}-%i.log.gz 98 | appender.json_rolling_slowlog.policies.type = Policies 99 | appender.json_rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy 100 | appender.json_rolling_slowlog.policies.time.interval = 1 101 | appender.json_rolling_slowlog.policies.time.modulate = true 102 | appender.json_rolling_slowlog.layout.type = JSONLayout 103 | appender.json_rolling_slowlog.layout.compact = true 104 | appender.json_rolling_slowlog.layout.eventEol = true 105 | appender.json_rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy 106 | appender.json_rolling_slowlog.policies.size.size = 100MB 107 | appender.json_rolling_slowlog.strategy.type = DefaultRolloverStrategy 108 | appender.json_rolling_slowlog.strategy.max = 30 109 | 110 | logger.slowlog.name = slowlog 111 | logger.slowlog.level = trace 112 | logger.slowlog.appenderRef.console_slowlog.ref = ${sys:ls.log.format}_console_slowlog 113 | logger.slowlog.appenderRef.rolling_slowlog.ref = ${sys:ls.log.format}_rolling_slowlog 114 | logger.slowlog.additivity = false 115 | 116 | logger.licensereader.name = logstash.licensechecker.licensereader 117 | logger.licensereader.level = error 118 | 119 | # Silence http-client by default 120 | logger.apache_http_client.name = org.apache.http 121 | logger.apache_http_client.level = fatal 122 | 123 | # Deprecation log 124 | appender.deprecation_rolling.type = RollingFile 125 | appender.deprecation_rolling.name = deprecation_plain_rolling 126 | appender.deprecation_rolling.fileName = ${sys:ls.logs}/logstash-deprecation.log 127 | appender.deprecation_rolling.filePattern = ${sys:ls.logs}/logstash-deprecation-%d{yyyy-MM-dd}-%i.log.gz 128 | appender.deprecation_rolling.policies.type = Policies 129 | appender.deprecation_rolling.policies.time.type = TimeBasedTriggeringPolicy 130 | appender.deprecation_rolling.policies.time.interval = 1 131 | appender.deprecation_rolling.policies.time.modulate = true 132 | appender.deprecation_rolling.layout.type = PatternLayout 133 | appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n 134 | appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy 135 | appender.deprecation_rolling.policies.size.size = 100MB 136 | appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy 137 | appender.deprecation_rolling.strategy.max = 30 138 | 139 | logger.deprecation.name = org.logstash.deprecation, deprecation 140 | logger.deprecation.level = WARN 141 | logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_plain_rolling 142 | logger.deprecation.additivity = false 143 | 144 | logger.deprecation_root.name = deprecation 145 | logger.deprecation_root.level = WARN 146 | logger.deprecation_root.appenderRef.deprecation_rolling.ref = deprecation_plain_rolling 147 | logger.deprecation_root.additivity = false 148 | -------------------------------------------------------------------------------- /elasticsearch/Dockerfile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # This Dockerfile was generated from the template at distribution/src/docker/Dockerfile 3 | # 4 | # Beginning of multi stage Dockerfile 5 | ################################################################################ 6 | 7 | ################################################################################ 8 | # Build stage 1 `builder`: 9 | # Extract Elasticsearch artifact 10 | ################################################################################ 11 | 12 | FROM ubuntu:20.04 AS builder 13 | 14 | # Install required packages to extract the Elasticsearch distribution 15 | 16 | RUN for iter in 1 2 3 4 5 6 7 8 9 10; do \ 17 | apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y curl && \ 18 | exit_code=0 && break || \ 19 | exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ 20 | done; \ 21 | exit $exit_code 22 | 23 | # `tini` is a tiny but valid init for containers. This is used to cleanly 24 | # control how ES and any child processes are shut down. 25 | # For wolfi we pick it from the blessed wolfi package registry. 26 | # 27 | # The tini GitHub page gives instructions for verifying the binary using 28 | # gpg, but the keyservers are slow to return the key and this can fail the 29 | # build. Instead, we check the binary against the published checksum. 30 | RUN set -eux ; \ 31 | tini_bin="" ; \ 32 | case "$(arch)" in \ 33 | aarch64) tini_bin='tini-arm64' ;; \ 34 | x86_64) tini_bin='tini-amd64' ;; \ 35 | *) echo >&2 ; echo >&2 "Unsupported architecture $(arch)" ; echo >&2 ; exit 1 ;; \ 36 | esac ; \ 37 | curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v0.19.0/${tini_bin} ; \ 38 | curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v0.19.0/${tini_bin}.sha256sum ; \ 39 | sha256sum -c ${tini_bin}.sha256sum ; \ 40 | rm ${tini_bin}.sha256sum ; \ 41 | mv ${tini_bin} /bin/tini ; \ 42 | chmod 0555 /bin/tini 43 | 44 | RUN mkdir /usr/share/elasticsearch 45 | WORKDIR /usr/share/elasticsearch 46 | 47 | RUN curl --retry 10 -S -L --output /tmp/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-8.17.0-linux-$(arch).tar.gz 48 | 49 | RUN tar -zxf /tmp/elasticsearch.tar.gz --strip-components=1 50 | 51 | # The distribution includes a `config` directory, no need to create it 52 | COPY config/elasticsearch.yml config/ 53 | COPY config/log4j2.properties config/log4j2.docker.properties 54 | 55 | # 1. Configure the distribution for Docker 56 | # 2. Create required directory 57 | # 3. Move the distribution's default logging config aside 58 | # 4. Move the generated docker logging config so that it is the default 59 | # 5. Reset permissions on all directories 60 | # 6. Reset permissions on all files 61 | # 7. Make CLI tools executable 62 | # 8. Make some directories writable. `bin` must be writable because 63 | # plugins can install their own CLI utilities. 64 | # 9. Make some files writable 65 | RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elasticsearch-env && \ 66 | mkdir data && \ 67 | mv config/log4j2.properties config/log4j2.file.properties && \ 68 | mv config/log4j2.docker.properties config/log4j2.properties && \ 69 | find . -type d -exec chmod 0555 {} + && \ 70 | find . -type f -exec chmod 0444 {} + && \ 71 | chmod 0555 bin/* jdk/bin/* jdk/lib/jspawnhelper modules/x-pack-ml/platform/linux-*/bin/* && \ 72 | chmod 0775 bin config config/jvm.options.d data logs plugins && \ 73 | find config -type f -exec chmod 0664 {} + 74 | 75 | ################################################################################ 76 | # Build stage 2 (the actual Elasticsearch image): 77 | # 78 | # Copy elasticsearch from stage 1 79 | # Add entrypoint 80 | ################################################################################ 81 | 82 | FROM ubuntu:20.04 83 | 84 | # Change default shell to bash, then install required packages with retries. 85 | RUN yes no | dpkg-reconfigure dash && \ 86 | for iter in 1 2 3 4 5 6 7 8 9 10; do \ 87 | export DEBIAN_FRONTEND=noninteractive && \ 88 | apt-get update && \ 89 | apt-get upgrade -y && \ 90 | apt-get install -y --no-install-recommends \ 91 | ca-certificates curl netcat p11-kit unzip zip && \ 92 | apt-get clean && \ 93 | rm -rf /var/lib/apt/lists/* && \ 94 | exit_code=0 && break || \ 95 | exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ 96 | done; \ 97 | exit $exit_code 98 | 99 | RUN groupadd -g 1000 elasticsearch && \ 100 | adduser --uid 1000 --gid 1000 --home /usr/share/elasticsearch elasticsearch && \ 101 | adduser elasticsearch root && \ 102 | chown -R 0:0 /usr/share/elasticsearch 103 | 104 | ENV ELASTIC_CONTAINER true 105 | 106 | WORKDIR /usr/share/elasticsearch 107 | 108 | COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearch 109 | 110 | COPY --from=builder --chown=0:0 /bin/tini /bin/tini 111 | 112 | ENV PATH /usr/share/elasticsearch/bin:$PATH 113 | ENV SHELL /bin/bash 114 | COPY bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 115 | 116 | # 1. Sync the user and group permissions of /etc/passwd 117 | # 2. Set correct permissions of the entrypoint 118 | # 3. Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks. 119 | # We've already run this in previous layers so it ought to be a no-op. 120 | # 4. Replace OpenJDK's built-in CA certificate keystore with the one from the OS 121 | # vendor. The latter is superior in several ways. 122 | # REF: https://github.com/elastic/elasticsearch-docker/issues/171 123 | # 5. Tighten up permissions on the ES home dir (the permissions of the contents are handled earlier) 124 | # 6. You can't install plugins that include configuration when running as `elasticsearch` and the `config` 125 | # dir is owned by `root`, because the installed tries to manipulate the permissions on the plugin's 126 | # config directory. 127 | RUN chmod g=u /etc/passwd && \ 128 | chmod 0555 /usr/local/bin/docker-entrypoint.sh && \ 129 | find / -xdev -perm -4000 -exec chmod ug-s {} + && \ 130 | chmod 0775 /usr/share/elasticsearch && \ 131 | chown elasticsearch bin config config/jvm.options.d data logs plugins 132 | 133 | # Update "cacerts" bundle to use Ubuntu's CA certificates (and make sure it 134 | # stays up-to-date with changes to Ubuntu's store) 135 | COPY bin/docker-openjdk /etc/ca-certificates/update.d/docker-openjdk 136 | RUN /etc/ca-certificates/update.d/docker-openjdk 137 | 138 | EXPOSE 9200 9300 139 | 140 | LABEL org.label-schema.build-date="2024-12-11T12:08:05.663969764Z" \ 141 | org.label-schema.license="Elastic-License-2.0" \ 142 | org.label-schema.name="Elasticsearch" \ 143 | org.label-schema.schema-version="1.0" \ 144 | org.label-schema.url="https://www.elastic.co/products/elasticsearch" \ 145 | org.label-schema.usage="https://www.elastic.co/guide/en/elasticsearch/reference/index.html" \ 146 | org.label-schema.vcs-ref="2b6a7fed44faa321997703718f07ee0420804b41" \ 147 | org.label-schema.vcs-url="https://github.com/elastic/elasticsearch" \ 148 | org.label-schema.vendor="Elastic" \ 149 | org.label-schema.version="8.17.0" \ 150 | org.opencontainers.image.created="2024-12-11T12:08:05.663969764Z" \ 151 | org.opencontainers.image.documentation="https://www.elastic.co/guide/en/elasticsearch/reference/index.html" \ 152 | org.opencontainers.image.licenses="Elastic-License-2.0" \ 153 | org.opencontainers.image.revision="2b6a7fed44faa321997703718f07ee0420804b41" \ 154 | org.opencontainers.image.source="https://github.com/elastic/elasticsearch" \ 155 | org.opencontainers.image.title="Elasticsearch" \ 156 | org.opencontainers.image.url="https://www.elastic.co/products/elasticsearch" \ 157 | org.opencontainers.image.vendor="Elastic" \ 158 | org.opencontainers.image.version="8.17.0" 159 | 160 | # Our actual entrypoint is `tini`, a minimal but functional init program. It 161 | # calls the entrypoint we provide, while correctly forwarding signals. 162 | ENTRYPOINT ["/bin/tini", "--", "/usr/local/bin/docker-entrypoint.sh"] 163 | # Dummy overridable parameter parsed by entrypoint 164 | CMD ["eswrapper"] 165 | 166 | USER 1000:0 167 | 168 | ################################################################################ 169 | # End of multi-stage Dockerfile 170 | ################################################################################ 171 | -------------------------------------------------------------------------------- /elasticsearch/config/log4j2.properties: -------------------------------------------------------------------------------- 1 | status = error 2 | 3 | ######## Server JSON ############################ 4 | appender.rolling.type = Console 5 | appender.rolling.name = rolling 6 | appender.rolling.layout.type = ECSJsonLayout 7 | appender.rolling.layout.dataset = elasticsearch.server 8 | 9 | ################################################ 10 | 11 | ################################################ 12 | 13 | rootLogger.level = info 14 | rootLogger.appenderRef.rolling.ref = rolling 15 | 16 | ######## Deprecation JSON ####################### 17 | appender.deprecation_rolling.type = Console 18 | appender.deprecation_rolling.name = deprecation_rolling 19 | appender.deprecation_rolling.layout.type = ECSJsonLayout 20 | # Intentionally follows a different pattern to above 21 | appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch 22 | appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter 23 | 24 | appender.header_warning.type = HeaderWarningAppender 25 | appender.header_warning.name = header_warning 26 | ################################################# 27 | 28 | logger.deprecation.name = org.elasticsearch.deprecation 29 | logger.deprecation.level = WARN 30 | logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling 31 | logger.deprecation.appenderRef.header_warning.ref = header_warning 32 | logger.deprecation.additivity = false 33 | 34 | ######## Search slowlog JSON #################### 35 | appender.index_search_slowlog_rolling.type = Console 36 | appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling 37 | appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout 38 | appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog 39 | 40 | ################################################# 41 | 42 | ################################################# 43 | logger.index_search_slowlog_rolling.name = index.search.slowlog 44 | logger.index_search_slowlog_rolling.level = trace 45 | logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling 46 | logger.index_search_slowlog_rolling.additivity = false 47 | 48 | ######## Indexing slowlog JSON ################## 49 | appender.index_indexing_slowlog_rolling.type = Console 50 | appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling 51 | appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout 52 | appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog 53 | 54 | ################################################# 55 | 56 | logger.index_indexing_slowlog.name = index.indexing.slowlog.index 57 | logger.index_indexing_slowlog.level = trace 58 | logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling 59 | logger.index_indexing_slowlog.additivity = false 60 | 61 | logger.org_apache_pdfbox.name = org.apache.pdfbox 62 | logger.org_apache_pdfbox.level = off 63 | 64 | logger.org_apache_poi.name = org.apache.poi 65 | logger.org_apache_poi.level = off 66 | 67 | logger.org_apache_fontbox.name = org.apache.fontbox 68 | logger.org_apache_fontbox.level = off 69 | 70 | logger.org_apache_xmlbeans.name = org.apache.xmlbeans 71 | logger.org_apache_xmlbeans.level = off 72 | 73 | logger.com_amazonaws.name = com.amazonaws 74 | logger.com_amazonaws.level = warn 75 | 76 | logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport 77 | logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error 78 | 79 | logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics 80 | logger.com_amazonaws_metrics_AwsSdkMetrics.level = error 81 | 82 | logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader 83 | logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error 84 | 85 | logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver 86 | logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error 87 | 88 | appender.audit_rolling.type = Console 89 | appender.audit_rolling.name = audit_rolling 90 | appender.audit_rolling.layout.type = PatternLayout 91 | appender.audit_rolling.layout.pattern = {\ 92 | "type":"audit", \ 93 | "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\ 94 | %varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\ 95 | %varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\ 96 | %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\ 97 | %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\ 98 | %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\ 99 | %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\ 100 | %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\ 101 | %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\ 102 | %varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\ 103 | %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\ 104 | %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\ 105 | %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\ 106 | %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\ 107 | %varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\ 108 | %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\ 109 | %varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\ 110 | %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\ 111 | %varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\ 112 | %varsNotEmpty{, "user.roles":%map{user.roles}}\ 113 | %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\ 114 | %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\ 115 | %varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\ 116 | %varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\ 117 | %varsNotEmpty{, "cross_cluster_access":%map{cross_cluster_access}}\ 118 | %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\ 119 | %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\ 120 | %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\ 121 | %varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\ 122 | %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\ 123 | %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\ 124 | %varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\ 125 | %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\ 126 | %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\ 127 | %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\ 128 | %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ 129 | %varsNotEmpty{, "indices":%map{indices}}\ 130 | %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ 131 | %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\ 132 | %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ 133 | %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ 134 | %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ 135 | %varsNotEmpty{, "put":%map{put}}\ 136 | %varsNotEmpty{, "delete":%map{delete}}\ 137 | %varsNotEmpty{, "change":%map{change}}\ 138 | %varsNotEmpty{, "create":%map{create}}\ 139 | %varsNotEmpty{, "invalidate":%map{invalidate}}\ 140 | }%n 141 | # "node.name" node name from the `elasticsearch.yml` settings 142 | # "node.id" node id which should not change between cluster restarts 143 | # "host.name" unresolved hostname of the local node 144 | # "host.ip" the local bound ip (i.e. the ip listening for connections) 145 | # "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal) 146 | # "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc. 147 | # "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal" 148 | # "user.name" the subject name as authenticated by a realm 149 | # "user.run_by.name" the original authenticated subject name that is impersonating another one. 150 | # "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as. 151 | # "user.realm" the name of the realm that authenticated "user.name" 152 | # "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain 153 | # "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name") 154 | # "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain 155 | # "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from 156 | # "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain 157 | # "user.roles" the roles array of the user; these are the roles that are granting privileges 158 | # "apikey.id" this field is present if and only if the "authentication.type" is "api_key" 159 | # "apikey.name" this field is present if and only if the "authentication.type" is "api_key" 160 | # "authentication.token.name" this field is present if and only if the authenticating credential is a service account token 161 | # "authentication.token.type" this field is present if and only if the authenticating credential is a service account token 162 | # "cross_cluster_access" this field is present if and only if the associated authentication occurred cross cluster 163 | # "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change" 164 | # "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node 165 | # "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated 166 | # "realm_domain" if "realm" is under a domain, this is the name of the domain 167 | # "url.path" the URI component between the port and the query string; it is percent (URL) encoded 168 | # "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded 169 | # "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT 170 | # "request.body" the content of the request body entity, JSON escaped 171 | # "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request 172 | # "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal) 173 | # "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) 174 | # "indices" the array of indices that the "action" is acting upon 175 | # "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header 176 | # "trace_id" an identifier conveyed by the part of "traceparent" request header 177 | # "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) 178 | # "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event 179 | # "rule" name of the applied rule if the "origin.type" is "ip_filter" 180 | # the "put", "delete", "change", "create", "invalidate" fields are only present 181 | # when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect 182 | 183 | logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail 184 | logger.xpack_security_audit_logfile.level = info 185 | logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling 186 | logger.xpack_security_audit_logfile.additivity = false 187 | 188 | logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature 189 | logger.xmlsig.level = error 190 | logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter 191 | logger.samlxml_decrypt.level = fatal 192 | logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter 193 | logger.saml2_decrypt.level = fatal -------------------------------------------------------------------------------- /kibana/bin/kibana-docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # ** THIS IS AN AUTO-GENERATED FILE ** 4 | # 5 | 6 | # Run Kibana, using environment variables to set longopts defining Kibana's 7 | # configuration. 8 | # 9 | # eg. Setting the environment variable: 10 | # 11 | # ELASTICSEARCH_LOGQUERIES=true 12 | # 13 | # will cause Kibana to be invoked with: 14 | # 15 | # --elasticsearch.logQueries=true 16 | 17 | kibana_vars=( 18 | apm_oss.apmAgentConfigurationIndex 19 | apm_oss.errorIndices 20 | apm_oss.indexPattern 21 | apm_oss.metricsIndices 22 | apm_oss.onboardingIndices 23 | apm_oss.sourcemapIndices 24 | apm_oss.spanIndices 25 | apm_oss.transactionIndices 26 | console.proxyConfig 27 | console.proxyFilter 28 | csp.strict 29 | csp.warnLegacyBrowsers 30 | csp.disableUnsafeEval 31 | csp.script_src 32 | csp.worker_src 33 | csp.style_src 34 | csp.connect_src 35 | csp.default_src 36 | csp.font_src 37 | csp.frame_src 38 | csp.img_src 39 | csp.frame_ancestors 40 | csp.report_uri 41 | csp.report_to 42 | csp.report_only.form_action 43 | permissionsPolicy.report_to 44 | data.autocomplete.valueSuggestions.terminateAfter 45 | data.autocomplete.valueSuggestions.timeout 46 | data.search.asyncSearch.waitForCompletion 47 | data.search.asyncSearch.keepAlive 48 | data.search.asyncSearch.batchedReduceSize 49 | data.search.asyncSearch.pollInterval 50 | data.search.sessions.defaultExpiration 51 | data.search.sessions.enabled 52 | data.search.sessions.maxUpdateRetries 53 | data.search.sessions.notTouchedInProgressTimeout 54 | data.search.sessions.notTouchedTimeout 55 | data.search.sessions.pageSize 56 | data.search.sessions.trackingInterval 57 | unifiedSearch.autocomplete.valueSuggestions.terminateAfter 58 | unifiedSearch.autocomplete.valueSuggestions.timeout 59 | unifiedSearch.autocomplete.querySuggestions.enabled 60 | unifiedSearch.autocomplete.valueSuggestions.enabled 61 | unifiedSearch.autocomplete.valueSuggestions.tiers 62 | elasticsearch.customHeaders 63 | elasticsearch.hosts 64 | elasticsearch.logQueries 65 | elasticsearch.password 66 | elasticsearch.pingTimeout 67 | elasticsearch.publicBaseUrl 68 | elasticsearch.requestHeadersWhitelist 69 | elasticsearch.requestTimeout 70 | elasticsearch.serviceAccountToken 71 | elasticsearch.shardTimeout 72 | elasticsearch.sniffInterval 73 | elasticsearch.sniffOnConnectionFault 74 | elasticsearch.sniffOnStart 75 | elasticsearch.ssl.alwaysPresentCertificate 76 | elasticsearch.ssl.certificate 77 | elasticsearch.ssl.certificateAuthorities 78 | elasticsearch.ssl.key 79 | elasticsearch.ssl.keyPassphrase 80 | elasticsearch.ssl.keystore.password 81 | elasticsearch.ssl.keystore.path 82 | elasticsearch.ssl.truststore.password 83 | elasticsearch.ssl.truststore.path 84 | elasticsearch.ssl.verificationMode 85 | elasticsearch.username 86 | enterpriseSearch.accessCheckTimeout 87 | enterpriseSearch.accessCheckTimeoutWarning 88 | enterpriseSearch.host 89 | externalUrl.policy 90 | i18n.locale 91 | interactiveSetup.enabled 92 | interactiveSetup.connectionCheck.interval 93 | kibana.autocompleteTerminateAfter 94 | kibana.autocompleteTimeout 95 | kibana.index 96 | logging.appenders 97 | logging.appenders.console 98 | logging.appenders.file 99 | logging.loggers 100 | logging.loggers.appenders 101 | logging.loggers.level 102 | logging.loggers.name 103 | logging.root 104 | logging.root.appenders 105 | logging.root.level 106 | map.emsUrl 107 | map.includeElasticMapsService 108 | map.tilemap.options.attribution 109 | map.tilemap.options.maxZoom 110 | map.tilemap.options.minZoom 111 | map.tilemap.options.subdomains 112 | map.tilemap.url 113 | migrations.batchSize 114 | migrations.maxBatchSizeBytes 115 | migrations.pollInterval 116 | migrations.retryAttempts 117 | migrations.scrollDuration 118 | migrations.skip 119 | monitoring.cluster_alerts.email_notifications.email_address 120 | monitoring.kibana.collection.enabled 121 | monitoring.kibana.collection.interval 122 | monitoring.ui.ccs.enabled 123 | monitoring.ui.container.elasticsearch.enabled 124 | monitoring.ui.container.logstash.enabled 125 | monitoring.ui.elasticsearch.hosts 126 | monitoring.ui.elasticsearch.logFetchCount 127 | monitoring.ui.elasticsearch.password 128 | monitoring.ui.elasticsearch.pingTimeout 129 | monitoring.ui.elasticsearch.ssl.certificateAuthorities 130 | monitoring.ui.elasticsearch.ssl.verificationMode 131 | monitoring.ui.elasticsearch.username 132 | monitoring.ui.enabled 133 | monitoring.ui.logs.index 134 | monitoring.ui.max_bucket_size 135 | monitoring.ui.min_interval_seconds 136 | newsfeed.enabled 137 | node.roles 138 | ops.cGroupOverrides.cpuAcctPath 139 | ops.cGroupOverrides.cpuPath 140 | ops.interval 141 | path.data 142 | pid.file 143 | profiler.signal 144 | regionmap 145 | savedObjects.maxImportExportSize 146 | savedObjects.maxImportPayloadBytes 147 | savedObjects.allowHttpApiAccess 148 | security.showInsecureClusterWarning 149 | server.basePath 150 | server.cdn.url 151 | server.compression.enabled 152 | server.compression.referrerWhitelist 153 | server.cors 154 | server.cors.allowCredentials 155 | server.cors.allowOrigin 156 | server.cors.enabled 157 | server.cors.origin 158 | server.customResponseHeaders 159 | server.defaultRoute 160 | server.host 161 | server.http2.allowUnsecure 162 | server.keepAliveTimeout 163 | server.maxPayload 164 | server.maxPayloadBytes 165 | server.name 166 | server.port 167 | server.protocol 168 | server.publicBaseUrl 169 | server.requestId.allowFromAnyIp 170 | server.requestId.ipAllowlist 171 | server.rewriteBasePath 172 | server.restrictInternalApis 173 | server.securityResponseHeaders.disableEmbedding 174 | server.securityResponseHeaders.permissionsPolicy 175 | server.securityResponseHeaders.referrerPolicy 176 | server.securityResponseHeaders.strictTransportSecurity 177 | server.securityResponseHeaders.xContentTypeOptions 178 | server.securityResponseHeaders.crossOriginOpenerPolicy 179 | server.shutdownTimeout 180 | server.socketTimeout 181 | server.ssl.cert 182 | server.ssl.certificate 183 | server.ssl.certificateAuthorities 184 | server.ssl.cipherSuites 185 | server.ssl.clientAuthentication 186 | server.ssl.enabled 187 | server.ssl.key 188 | server.ssl.keyPassphrase 189 | server.ssl.keystore.password 190 | server.ssl.keystore.path 191 | server.ssl.redirectHttpFromPort 192 | server.ssl.supportedProtocols 193 | server.ssl.truststore.password 194 | server.ssl.truststore.path 195 | server.uuid 196 | server.xsrf.allowlist 197 | server.xsrf.disableProtection 198 | status.allowAnonymous 199 | status.v6ApiFormat 200 | telemetry.allowChangingOptInStatus 201 | telemetry.enabled 202 | telemetry.hidePrivacyStatement 203 | telemetry.optIn 204 | telemetry.sendUsageTo 205 | telemetry.sendUsageFrom 206 | tilemap.options.attribution 207 | tilemap.options.maxZoom 208 | tilemap.options.minZoom 209 | tilemap.options.subdomains 210 | tilemap.url 211 | vega.enableExternalUrls 212 | vis_type_vega.enableExternalUrls 213 | xpack.actions.allowedHosts 214 | xpack.actions.customHostSettings 215 | xpack.actions.email.domain_allowlist 216 | xpack.actions.enableFooterInEmail 217 | xpack.actions.enabledActionTypes 218 | xpack.actions.maxResponseContentLength 219 | xpack.actions.preconfigured 220 | xpack.actions.preconfiguredAlertHistoryEsIndex 221 | xpack.actions.proxyBypassHosts 222 | xpack.actions.proxyHeaders 223 | xpack.actions.proxyOnlyHosts 224 | xpack.actions.proxyRejectUnauthorizedCertificates 225 | xpack.actions.proxyUrl 226 | xpack.actions.rejectUnauthorized 227 | xpack.actions.responseTimeout 228 | xpack.actions.ssl.proxyVerificationMode 229 | xpack.actions.ssl.verificationMode 230 | xpack.alerting.healthCheck.interval 231 | xpack.alerting.invalidateApiKeysTask.interval 232 | xpack.alerting.invalidateApiKeysTask.removalDelay 233 | xpack.alerting.defaultRuleTaskTimeout 234 | xpack.alerting.rules.run.timeout 235 | xpack.alerting.rules.run.ruleTypeOverrides 236 | xpack.alerting.cancelAlertsOnRuleTimeout 237 | xpack.alerting.rules.minimumScheduleInterval.value 238 | xpack.alerting.rules.minimumScheduleInterval.enforce 239 | xpack.alerting.rules.run.actions.max 240 | xpack.alerting.rules.run.alerts.max 241 | xpack.alerting.rules.run.actions.connectorTypeOverrides 242 | xpack.alerting.maxScheduledPerMinute 243 | xpack.alerts.healthCheck.interval 244 | xpack.alerts.invalidateApiKeysTask.interval 245 | xpack.alerts.invalidateApiKeysTask.removalDelay 246 | xpack.apm.indices.error 247 | xpack.apm.indices.metric 248 | xpack.apm.indices.onboarding 249 | xpack.apm.indices.sourcemap 250 | xpack.apm.indices.span 251 | xpack.apm.indices.transaction 252 | xpack.apm.maxServiceEnvironments 253 | xpack.apm.searchAggregatedTransactions 254 | xpack.apm.serviceMapEnabled 255 | xpack.apm.serviceMapFingerprintBucketSize 256 | xpack.apm.serviceMapFingerprintGlobalBucketSize 257 | xpack.apm.ui.enabled 258 | xpack.apm.ui.maxTraceItems 259 | xpack.apm.ui.transactionGroupBucketSize 260 | xpack.banners.backgroundColor 261 | xpack.banners.disableSpaceBanners 262 | xpack.banners.placement 263 | xpack.banners.textColor 264 | xpack.banners.textContent 265 | xpack.cases.files.allowedMimeTypes 266 | xpack.cases.files.maxSize 267 | xpack.code.disk.thresholdEnabled 268 | xpack.code.disk.watermarkLow 269 | xpack.code.indexRepoFrequencyMs 270 | xpack.code.lsp.verbose 271 | xpack.code.maxWorkspace 272 | xpack.code.security.enableGitCertCheck 273 | xpack.code.security.gitHostWhitelist 274 | xpack.code.security.gitProtocolWhitelist 275 | xpack.code.ui.enabled 276 | xpack.code.updateRepoFrequencyMs 277 | xpack.code.verbose 278 | xpack.data_enhanced.search.sessions.defaultExpiration 279 | xpack.data_enhanced.search.sessions.enabled 280 | xpack.data_enhanced.search.sessions.maxUpdateRetries 281 | xpack.data_enhanced.search.sessions.notTouchedInProgressTimeout 282 | xpack.data_enhanced.search.sessions.notTouchedTimeout 283 | xpack.data_enhanced.search.sessions.pageSize 284 | xpack.data_enhanced.search.sessions.trackingInterval 285 | xpack.discoverEnhanced.actions.exploreDataInChart.enabled 286 | xpack.discoverEnhanced.actions.exploreDataInContextMenu.enabled 287 | xpack.encryptedSavedObjects.encryptionKey 288 | xpack.encryptedSavedObjects.keyRotation.decryptionOnlyKeys 289 | xpack.event_log.indexEntries 290 | xpack.event_log.logEntries 291 | xpack.fleet.agentPolicies 292 | xpack.fleet.agents.elasticsearch.host 293 | xpack.fleet.agents.elasticsearch.hosts 294 | xpack.fleet.agents.enabled 295 | xpack.fleet.agents.fleet_server.hosts 296 | xpack.fleet.agents.kibana.host 297 | xpack.fleet.agents.tlsCheckDisabled 298 | xpack.fleet.packages 299 | xpack.fleet.packageVerification.gpgKeyPath 300 | xpack.fleet.registryProxyUrl 301 | xpack.fleet.registryUrl 302 | xpack.graph.canEditDrillDownUrls 303 | xpack.graph.savePolicy 304 | xpack.infra.query.partitionFactor 305 | xpack.infra.query.partitionSize 306 | xpack.infra.sources.default.fields.container 307 | xpack.infra.sources.default.fields.host 308 | xpack.infra.sources.default.fields.message 309 | xpack.infra.sources.default.fields.pod 310 | xpack.infra.sources.default.fields.tiebreaker 311 | xpack.infra.sources.default.fields.timestamp 312 | xpack.infra.sources.default.logAlias 313 | xpack.infra.sources.default.metricAlias 314 | xpack.ingestManager.fleet.tlsCheckDisabled 315 | xpack.ingestManager.registryUrl 316 | xpack.observability.annotations.index 317 | xpack.observability.unsafe.alertDetails.metrics.enabled 318 | xpack.observability.unsafe.alertDetails.logs.enabled 319 | xpack.observability.unsafe.alertDetails.uptime.enabled 320 | xpack.observability.unsafe.alertDetails.observability.enabled 321 | xpack.observability.unsafe.thresholdRule.enabled 322 | xpack.productDocBase.artifactRepositoryUrl 323 | xpack.reporting.capture.browser.autoDownload 324 | xpack.reporting.capture.browser.chromium.disableSandbox 325 | xpack.reporting.capture.browser.chromium.inspect 326 | xpack.reporting.capture.browser.chromium.maxScreenshotDimension 327 | xpack.reporting.capture.browser.chromium.proxy.bypass 328 | xpack.reporting.capture.browser.chromium.proxy.enabled 329 | xpack.reporting.capture.browser.chromium.proxy.server 330 | xpack.reporting.capture.browser.type 331 | xpack.reporting.capture.concurrency 332 | xpack.reporting.capture.loadDelay 333 | xpack.reporting.capture.maxAttempts 334 | xpack.reporting.capture.networkPolicy 335 | xpack.reporting.capture.settleTime 336 | xpack.reporting.capture.timeout 337 | xpack.reporting.capture.timeouts.openUrl 338 | xpack.reporting.capture.timeouts.openUrl 339 | xpack.reporting.capture.timeouts.renderComplete 340 | xpack.reporting.capture.timeouts.waitForElements 341 | xpack.reporting.capture.viewport.height 342 | xpack.reporting.capture.viewport.width 343 | xpack.reporting.capture.zoom 344 | xpack.reporting.csv.checkForFormulas 345 | xpack.reporting.csv.enablePanelActionDownload 346 | xpack.reporting.csv.escapeFormulaValues 347 | xpack.reporting.csv.maxSizeBytes 348 | xpack.reporting.csv.scroll.duration 349 | xpack.reporting.csv.scroll.size 350 | xpack.reporting.csv.scroll.strategy 351 | xpack.reporting.csv.useByteOrderMarkEncoding 352 | xpack.reporting.enabled 353 | xpack.reporting.encryptionKey 354 | xpack.reporting.kibanaApp 355 | xpack.reporting.kibanaServer.hostname 356 | xpack.reporting.kibanaServer.port 357 | xpack.reporting.kibanaServer.protocol 358 | xpack.reporting.poll.jobCompletionNotifier.interval 359 | xpack.reporting.poll.jobCompletionNotifier.intervalErrorMultiplier 360 | xpack.reporting.poll.jobsRefresh.interval 361 | xpack.reporting.poll.jobsRefresh.intervalErrorMultiplier 362 | xpack.reporting.queue.indexInterval 363 | xpack.reporting.queue.pollEnabled 364 | xpack.reporting.queue.pollInterval 365 | xpack.reporting.queue.pollIntervalErrorMultiplier 366 | xpack.reporting.queue.timeout 367 | xpack.reporting.roles.allow 368 | xpack.reporting.roles.enabled 369 | xpack.ruleRegistry.write.enabled 370 | xpack.screenshotting.browser.chromium.disableSandbox 371 | xpack.security.accessAgreement.message 372 | xpack.security.audit.appender.fileName 373 | xpack.security.audit.appender.layout.highlight 374 | xpack.security.audit.appender.layout.pattern 375 | xpack.security.audit.appender.layout.type 376 | xpack.security.audit.appender.legacyLoggingConfig 377 | xpack.security.audit.appender.policy.interval 378 | xpack.security.audit.appender.policy.modulate 379 | xpack.security.audit.appender.policy.size 380 | xpack.security.audit.appender.policy.type 381 | xpack.security.audit.appender.strategy.max 382 | xpack.security.audit.appender.strategy.pattern 383 | xpack.security.audit.appender.strategy.type 384 | xpack.security.audit.appender.type 385 | xpack.security.audit.enabled 386 | xpack.security.audit.ignore_filters 387 | xpack.security.authc.http.autoSchemesEnabled 388 | xpack.security.authc.http.enabled 389 | xpack.security.authc.http.schemes 390 | xpack.security.authc.oidc.realm 391 | xpack.security.authc.providers 392 | xpack.security.authc.saml.maxRedirectURLSize 393 | xpack.security.authc.saml.realm 394 | xpack.security.authc.selector.enabled 395 | xpack.security.cookieName 396 | xpack.security.encryptionKey 397 | xpack.security.fipsMode.enabled 398 | xpack.security.loginAssistanceMessage 399 | xpack.security.loginHelp 400 | xpack.security.sameSiteCookies 401 | xpack.security.secureCookies 402 | xpack.security.session.cleanupInterval 403 | xpack.security.session.concurrentSessions.maxSessions 404 | xpack.security.session.idleTimeout 405 | xpack.security.session.lifespan 406 | xpack.security.sessionTimeout 407 | xpack.security.showInsecureClusterWarning 408 | xpack.securitySolution.alertMergeStrategy 409 | xpack.securitySolution.alertIgnoreFields 410 | xpack.securitySolution.maxExceptionsImportSize 411 | xpack.securitySolution.maxRuleImportExportSize 412 | xpack.securitySolution.maxRuleImportPayloadBytes 413 | xpack.securitySolution.maxTimelineImportExportSize 414 | xpack.securitySolution.maxTimelineImportPayloadBytes 415 | xpack.securitySolution.packagerTaskInterval 416 | xpack.securitySolution.prebuiltRulesPackageVersion 417 | xpack.spaces.maxSpaces 418 | xpack.task_manager.capacity 419 | xpack.task_manager.claim_strategy 420 | xpack.task_manager.auto_calculate_default_ech_capacity 421 | xpack.task_manager.discovery.active_nodes_lookback 422 | xpack.task_manager.discovery.interval 423 | xpack.task_manager.kibanas_per_partition 424 | xpack.task_manager.max_attempts 425 | xpack.task_manager.max_workers 426 | xpack.task_manager.monitored_aggregated_stats_refresh_rate 427 | xpack.task_manager.monitored_stats_required_freshness 428 | xpack.task_manager.monitored_stats_running_average_window 429 | xpack.task_manager.monitored_stats_health_verbose_log.enabled 430 | xpack.task_manager.monitored_stats_health_verbose_log.warn_delayed_task_start_in_seconds 431 | xpack.task_manager.monitored_task_execution_thresholds 432 | xpack.task_manager.poll_interval 433 | xpack.task_manager.request_capacity 434 | xpack.task_manager.version_conflict_threshold 435 | xpack.task_manager.event_loop_delay.monitor 436 | xpack.task_manager.event_loop_delay.warn_threshold 437 | xpack.task_manager.worker_utilization_running_average_window 438 | xpack.uptime.index 439 | serverless 440 | ) 441 | 442 | longopts='' 443 | for kibana_var in ${kibana_vars[*]}; do 444 | # 'elasticsearch.hosts' -> 'ELASTICSEARCH_HOSTS' 445 | env_var=$(echo ${kibana_var^^} | tr . _) 446 | 447 | # Indirectly lookup env var values via the name of the var. 448 | # REF: http://tldp.org/LDP/abs/html/bashver2.html#EX78 449 | value=${!env_var} 450 | if [[ -n $value ]]; then 451 | longopt="--${kibana_var}=${value}" 452 | longopts+=" ${longopt}" 453 | fi 454 | done 455 | 456 | # Files created at run-time should be group-writable, for Openshift's sake. 457 | umask 0002 458 | 459 | # The virtual file /proc/self/cgroup should list the current cgroup 460 | # membership. For each hierarchy, you can follow the cgroup path from 461 | # this file to the cgroup filesystem (usually /sys/fs/cgroup/) and 462 | # introspect the statistics for the cgroup for the given 463 | # hierarchy. Alas, Docker breaks this by mounting the container 464 | # statistics at the root while leaving the cgroup paths as the actual 465 | # paths. Therefore, Kibana provides a mechanism to override 466 | # reading the cgroup path from /proc/self/cgroup and instead uses the 467 | # cgroup path defined the configuration properties 468 | # ops.cGroupOverrides.cpuPath and ops.cGroupOverrides.cpuAcctPath. 469 | # Therefore, we set this value here so that cgroup statistics are 470 | # available for the container this process will run in. 471 | 472 | exec /usr/share/kibana/bin/kibana --ops.cGroupOverrides.cpuPath=/ --ops.cGroupOverrides.cpuAcctPath=/ ${longopts} "$@" 473 | --------------------------------------------------------------------------------