├── docker ├── elasticsearch │ ├── do_not_use.yml │ ├── build.sh │ ├── run.sh │ ├── config │ │ ├── elasticsearch.yml │ │ └── logging.yml │ └── Dockerfile ├── fluentd │ ├── build.sh │ ├── Dockerfile │ ├── install.sh │ ├── elasticsearch-template-es2x.json │ └── td-agent.conf ├── kibana │ ├── build.sh │ ├── Dockerfile │ └── run.sh └── kubernetes-events-printer │ ├── build.sh │ ├── Dockerfile │ └── events.sh ├── images ├── kibana1.png └── kibana2.png ├── es-env.yaml ├── update_fluentd-ds.sh ├── es-discovery-svc.yaml ├── es-svc.yaml ├── update_config.sh ├── k8s-events-printer.yaml ├── undeploy.sh ├── es-kibana.yaml ├── es-master.yaml_ ├── es-client.yaml ├── es-fluentd-ds.yaml ├── es-data.yaml_ ├── es-data-master.yaml.tmpl ├── deploy.sh └── README.md /docker/elasticsearch/do_not_use.yml: -------------------------------------------------------------------------------- 1 | path: 2 | plugins: /elasticsearch/plugins -------------------------------------------------------------------------------- /images/kibana1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaoping378/elk-kubernetes/master/images/kibana1.png -------------------------------------------------------------------------------- /images/kibana2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xiaoping378/elk-kubernetes/master/images/kibana2.png -------------------------------------------------------------------------------- /docker/fluentd/build.sh: -------------------------------------------------------------------------------- 1 | IMG=kayrus/fluentd-elasticsearch:1.20 2 | docker build -t $IMG . 3 | docker push $IMG 4 | -------------------------------------------------------------------------------- /docker/kibana/build.sh: -------------------------------------------------------------------------------- 1 | IMG=kayrus/docker-kibana-kubernetes:4.6.1 2 | docker build -t $IMG . 3 | docker push $IMG 4 | -------------------------------------------------------------------------------- /docker/elasticsearch/build.sh: -------------------------------------------------------------------------------- 1 | IMG=kayrus/docker-elasticsearch-kubernetes:2.4.0 2 | docker build -t $IMG . 3 | docker push $IMG 4 | -------------------------------------------------------------------------------- /docker/kubernetes-events-printer/build.sh: -------------------------------------------------------------------------------- 1 | IMG=kayrus/kubernetes-events-printer:latest 2 | docker build -t $IMG . 3 | docker push $IMG 4 | -------------------------------------------------------------------------------- /docker/kubernetes-events-printer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.4 2 | 3 | RUN apk add --no-cache \ 4 | curl \ 5 | jq 6 | 7 | ADD events.sh / 8 | 9 | ENTRYPOINT [ "/events.sh" ] 10 | CMD [ "sh" ] 11 | -------------------------------------------------------------------------------- /es-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | metadata: 3 | name: es-env 4 | kind: ConfigMap 5 | data: 6 | es-cluster-name: es-logs 7 | es-client-heap: 256m 8 | es-master-heap: 256m 9 | es-data-heap: 256m 10 | es-number-of-replicas: "1" 11 | -------------------------------------------------------------------------------- /update_fluentd-ds.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | CDIR=$(cd `dirname "$0"` && pwd) 4 | cd "$CDIR" 5 | 6 | NAMESPACE="monitoring" 7 | 8 | kubectl ${CONTEXT} --namespace="${NAMESPACE}" delete ds fluentd-elasticsearch 9 | kubectl ${CONTEXT} --namespace="${NAMESPACE}" create -f es-fluentd-ds.yaml 10 | -------------------------------------------------------------------------------- /es-discovery-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: elasticsearch-discovery 5 | labels: 6 | component: elasticsearch 7 | role: master 8 | spec: 9 | selector: 10 | component: elasticsearch 11 | role: master 12 | ports: 13 | - name: transport 14 | port: 9300 15 | protocol: TCP 16 | -------------------------------------------------------------------------------- /docker/elasticsearch/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # provision elasticsearch user 4 | addgroup sudo 5 | adduser -D -g '' elasticsearch 6 | adduser elasticsearch sudo 7 | chown -R elasticsearch /elasticsearch /data 8 | echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 9 | 10 | # allow for memlock 11 | ulimit -l unlimited 12 | 13 | # run 14 | sudo -E -u elasticsearch /elasticsearch/bin/elasticsearch 15 | -------------------------------------------------------------------------------- /es-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: elasticsearch-logging 5 | labels: 6 | component: elasticsearch 7 | role: client 8 | annotations: 9 | prometheus.io/scrape: 'true' 10 | prometheus.io/path: '/_prometheus/metrics' 11 | prometheus.io/port: '9200' 12 | spec: 13 | selector: 14 | component: elasticsearch 15 | role: client 16 | ports: 17 | - name: http 18 | port: 9200 19 | protocol: TCP 20 | -------------------------------------------------------------------------------- /update_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | CDIR=$(cd `dirname "$0"` && pwd) 4 | cd "$CDIR" 5 | 6 | NAMESPACE="monitoring" 7 | 8 | kubectl ${CONTEXT} --namespace="${NAMESPACE}" create configmap fluentd-config --from-file=docker/fluentd/td-agent.conf --dry-run -o yaml | kubectl ${CONTEXT} --namespace="${NAMESPACE}" apply -f - 9 | # Just remove pods and daemonsets will recreate new ones with updated config file 10 | kubectl ${CONTEXT} --namespace="${NAMESPACE}" delete $(kubectl ${CONTEXT} --namespace="${NAMESPACE}" get pods -o name | awk '/pod\/fluentd-elasticsearch/') 11 | -------------------------------------------------------------------------------- /k8s-events-printer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: kubernetes-events-printer 5 | labels: 6 | component: fluentd 7 | role: kubernetes-events-printer 8 | spec: 9 | strategy: 10 | type: Recreate 11 | replicas: 1 12 | template: 13 | metadata: 14 | labels: 15 | component: fluentd 16 | role: kubernetes-events-printer 17 | spec: 18 | containers: 19 | - name: kubernetes-events-printer 20 | image: kayrus/kubernetes-events-printer:latest 21 | imagePullPolicy: Always 22 | -------------------------------------------------------------------------------- /docker/kibana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mhart/alpine-node:5 2 | 3 | ############################################################# Kibana Docker 4 | 5 | EXPOSE 5601 6 | 7 | ENV KIBANA_VERSION 4.6.1 8 | 9 | # Install Kibana 10 | 11 | RUN apk add --update curl ca-certificates sudo && \ 12 | 13 | ( curl -Lskj https://download.elastic.co/kibana/kibana/kibana-${KIBANA_VERSION}-linux-x86_64.tar.gz | \ 14 | gunzip -c - | tar xf - ) && \ 15 | mv /kibana-${KIBANA_VERSION}-linux-x86_64 /kibana-linux-x86_64 && \ 16 | rm -rf /kibana-linux-x86_64/node && \ 17 | apk del curl 18 | 19 | # Copy run script 20 | COPY run.sh / 21 | 22 | CMD ["/run.sh"] 23 | -------------------------------------------------------------------------------- /docker/elasticsearch/config/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | cluster: 2 | name: ${CLUSTER_NAME} 3 | 4 | node: 5 | master: ${NODE_MASTER} 6 | data: ${NODE_DATA} 7 | 8 | network.host: ${NETWORK_HOST} 9 | 10 | path: 11 | data: /data/data 12 | logs: /data/log 13 | plugins: /elasticsearch/plugins 14 | work: /data/work 15 | 16 | bootstrap.mlockall: true 17 | 18 | http: 19 | enabled: ${HTTP_ENABLE} 20 | compression: true 21 | cors: 22 | enabled: ${HTTP_CORS_ENABLE} 23 | allow-origin: ${HTTP_CORS_ALLOW_ORIGIN} 24 | 25 | cloud: 26 | kubernetes: 27 | service: ${DISCOVERY_SERVICE} 28 | namespace: ${NAMESPACE} 29 | discovery: 30 | type: kubernetes 31 | zen: 32 | minimum_master_nodes: ${NUMBER_OF_MASTERS} 33 | 34 | index: 35 | number_of_shards: ${NUMBER_OF_SHARDS} 36 | number_of_replicas: ${NUMBER_OF_REPLICAS} 37 | -------------------------------------------------------------------------------- /docker/kibana/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Copyright 2015 The Kubernetes Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | export ELASTICSEARCH_URL=${ELASTICSEARCH_URL:-"http://localhost:9200"} 18 | echo ELASTICSEARCH_URL=${ELASTICSEARCH_URL} 19 | /kibana-linux-x86_64/bin/kibana -e ${ELASTICSEARCH_URL} 20 | -------------------------------------------------------------------------------- /undeploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CDIR=$(cd `dirname "$0"` && pwd) 4 | cd "$CDIR" 5 | 6 | print_red() { 7 | printf '%b' "\033[91m$1\033[0m\n" 8 | } 9 | 10 | print_green() { 11 | printf '%b' "\033[92m$1\033[0m\n" 12 | } 13 | 14 | CONTEXT="" 15 | #CONTEXT="--context=foo" 16 | NAMESPACE="monitoring" 17 | 18 | INSTANCES=(deployment/es-client deployment/es-data deployment/es-master deployment/es-data-master deployment/kibana-logging-v2 deployment/kubernetes-events-printer daemonset/fluentd-elasticsearch service/elasticsearch-logging service/elasticsearch-discovery service/kibana-logging configmap/es-env configmap/fluentd-config) 19 | 20 | for instance in ${INSTANCES[@]}; do 21 | kubectl ${CONTEXT} --namespace="${NAMESPACE}" delete "${instance}" 22 | done 23 | 24 | PODS=$(kubectl ${CONTEXT} --namespace="${NAMESPACE}" get pods -o name | awk '/^pods\/es-/ {print $1}' | tr '\n' ' ') 25 | while [ ! "${PODS}" = "" ]; do 26 | echo "Waiting 1 second for ${PODS}pods to shutdown..." 27 | sleep 1 28 | PODS=$(kubectl ${CONTEXT} --namespace="${NAMESPACE}" get pods -o name | awk '/^pods\/es-/ {print $1}' | tr '\n' ' ') 29 | done 30 | -------------------------------------------------------------------------------- /es-kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kibana-logging 5 | labels: 6 | k8s-app: kibana-logging 7 | kubernetes.io/name: "Kibana" 8 | spec: 9 | ports: 10 | - port: 5601 11 | protocol: TCP 12 | targetPort: ui 13 | selector: 14 | k8s-app: kibana-logging 15 | --- 16 | apiVersion: extensions/v1beta1 17 | kind: Deployment 18 | metadata: 19 | name: kibana-logging-v2 20 | labels: 21 | k8s-app: kibana-logging 22 | version: v2 23 | spec: 24 | template: 25 | metadata: 26 | labels: 27 | k8s-app: kibana-logging 28 | version: v2 29 | spec: 30 | containers: 31 | - name: kibana-logging 32 | image: kayrus/docker-kibana-kubernetes:4.6.1 33 | imagePullPolicy: Always 34 | resources: 35 | # keep request = limit to keep this container in guaranteed class 36 | limits: 37 | cpu: 100m 38 | requests: 39 | cpu: 100m 40 | env: 41 | - name: "ELASTICSEARCH_URL" 42 | value: "http://elasticsearch-logging:9200" 43 | ports: 44 | - containerPort: 5601 45 | name: ui 46 | protocol: TCP 47 | -------------------------------------------------------------------------------- /es-master.yaml_: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: es-master 5 | labels: 6 | component: elasticsearch 7 | role: master 8 | spec: 9 | template: 10 | metadata: 11 | labels: 12 | component: elasticsearch 13 | role: master 14 | spec: 15 | containers: 16 | - name: es-master 17 | securityContext: 18 | privileged: true 19 | capabilities: 20 | add: 21 | - IPC_LOCK 22 | image: kayrus/docker-elasticsearch-kubernetes:2.4.0 23 | imagePullPolicy: Always 24 | env: 25 | - name: NAMESPACE 26 | valueFrom: 27 | fieldRef: 28 | fieldPath: metadata.namespace 29 | - name: "CLUSTER_NAME" 30 | valueFrom: 31 | configMapKeyRef: 32 | name: es-env 33 | key: es-cluster-name 34 | - name: NODE_MASTER 35 | value: "true" 36 | - name: NODE_DATA 37 | value: "false" 38 | - name: HTTP_ENABLE 39 | value: "false" 40 | - name: ES_HEAP_SIZE 41 | valueFrom: 42 | configMapKeyRef: 43 | name: es-env 44 | key: es-master-heap 45 | ports: 46 | - containerPort: 9300 47 | name: transport 48 | protocol: TCP 49 | -------------------------------------------------------------------------------- /es-client.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: es-client 5 | labels: 6 | component: elasticsearch 7 | role: client 8 | spec: 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | component: elasticsearch 14 | role: client 15 | spec: 16 | containers: 17 | - name: es-client 18 | securityContext: 19 | privileged: true 20 | capabilities: 21 | add: 22 | - IPC_LOCK 23 | image: kayrus/docker-elasticsearch-kubernetes:2.4.0 24 | imagePullPolicy: Always 25 | env: 26 | - name: NAMESPACE 27 | valueFrom: 28 | fieldRef: 29 | fieldPath: metadata.namespace 30 | - name: "CLUSTER_NAME" 31 | valueFrom: 32 | configMapKeyRef: 33 | name: es-env 34 | key: es-cluster-name 35 | - name: NODE_MASTER 36 | value: "false" 37 | - name: NODE_DATA 38 | value: "false" 39 | - name: HTTP_ENABLE 40 | value: "true" 41 | - name: ES_HEAP_SIZE 42 | valueFrom: 43 | configMapKeyRef: 44 | name: es-env 45 | key: es-client-heap 46 | ports: 47 | - containerPort: 9200 48 | name: http 49 | protocol: TCP 50 | - containerPort: 9300 51 | name: transport 52 | protocol: TCP 53 | -------------------------------------------------------------------------------- /es-fluentd-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: fluentd-elasticsearch 5 | labels: 6 | k8s-app: fluentd-logging 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | k8s-app: fluentd-logging 12 | name: fluentd-logging 13 | spec: 14 | containers: 15 | - name: fluentd-elasticsearch 16 | image: kayrus/fluentd-elasticsearch:1.20 17 | imagePullPolicy: Always 18 | # suppress "info" log level 19 | args: 20 | - -q 21 | resources: 22 | limits: 23 | memory: 200Mi 24 | requests: 25 | cpu: 100m 26 | memory: 200Mi 27 | volumeMounts: 28 | - name: var-log 29 | mountPath: /var/log 30 | - name: run-log 31 | mountPath: /run/log 32 | - name: var-lib-docker-containers 33 | mountPath: /var/lib/docker/containers 34 | readOnly: true 35 | - name: fluentd-config 36 | mountPath: /etc/td-agent 37 | readOnly: true 38 | terminationGracePeriodSeconds: 30 39 | volumes: 40 | - name: var-log 41 | hostPath: 42 | path: /var/log 43 | - name: run-log 44 | hostPath: 45 | path: /run/log 46 | - name: var-lib-docker-containers 47 | hostPath: 48 | path: /var/lib/docker/containers 49 | - name: fluentd-config 50 | configMap: 51 | name: fluentd-config 52 | -------------------------------------------------------------------------------- /es-data.yaml_: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: es-data 5 | labels: 6 | component: elasticsearch 7 | role: data 8 | spec: 9 | template: 10 | metadata: 11 | labels: 12 | component: elasticsearch 13 | role: data 14 | spec: 15 | containers: 16 | - name: es-data 17 | securityContext: 18 | privileged: true 19 | capabilities: 20 | add: 21 | - IPC_LOCK 22 | image: kayrus/docker-elasticsearch-kubernetes:2.4.0 23 | imagePullPolicy: Always 24 | env: 25 | - name: NAMESPACE 26 | valueFrom: 27 | fieldRef: 28 | fieldPath: metadata.namespace 29 | - name: "CLUSTER_NAME" 30 | valueFrom: 31 | configMapKeyRef: 32 | name: es-env 33 | key: es-cluster-name 34 | - name: NODE_MASTER 35 | value: "false" 36 | - name: NODE_DATA 37 | value: "true" 38 | - name: HTTP_ENABLE 39 | value: "false" 40 | - name: ES_HEAP_SIZE 41 | valueFrom: 42 | configMapKeyRef: 43 | name: es-env 44 | key: es-data-heap 45 | ports: 46 | - containerPort: 9300 47 | name: transport 48 | protocol: TCP 49 | # Random hostPort to prevent multiple ES data instances on one node 50 | hostPort: 28651 51 | volumeMounts: 52 | - name: storage 53 | mountPath: /data 54 | volumes: 55 | - name: storage 56 | emptyDir: {} 57 | -------------------------------------------------------------------------------- /docker/elasticsearch/config/logging.yml: -------------------------------------------------------------------------------- 1 | # you can override this using by setting a system property, for example -Des.logger.level=DEBUG 2 | es.logger.level: INFO 3 | rootLogger: ${es.logger.level}, console, file 4 | logger: 5 | # log action execution errors for easier debugging 6 | action: DEBUG 7 | # reduce the logging for aws, too much is logged under the default INFO 8 | com.amazonaws: WARN 9 | 10 | # gateway 11 | #gateway: DEBUG 12 | #index.gateway: DEBUG 13 | 14 | # peer shard recovery 15 | #indices.recovery: DEBUG 16 | 17 | # discovery 18 | #discovery: TRACE 19 | 20 | index.search.slowlog: TRACE, index_search_slow_log_file 21 | index.indexing.slowlog: TRACE, index_indexing_slow_log_file 22 | 23 | additivity: 24 | index.search.slowlog: false 25 | index.indexing.slowlog: false 26 | 27 | appender: 28 | console: 29 | type: console 30 | layout: 31 | type: consolePattern 32 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 33 | 34 | file: 35 | type: dailyRollingFile 36 | file: ${path.logs}/${cluster.name}.log 37 | datePattern: "'.'yyyy-MM-dd" 38 | layout: 39 | type: pattern 40 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 41 | 42 | index_search_slow_log_file: 43 | type: dailyRollingFile 44 | file: ${path.logs}/${cluster.name}_index_search_slowlog.log 45 | datePattern: "'.'yyyy-MM-dd" 46 | layout: 47 | type: pattern 48 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 49 | 50 | index_indexing_slow_log_file: 51 | type: dailyRollingFile 52 | file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log 53 | datePattern: "'.'yyyy-MM-dd" 54 | layout: 55 | type: pattern 56 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 57 | -------------------------------------------------------------------------------- /docker/kubernetes-events-printer/events.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | # oneliner 4 | #curl -s "https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT_HTTPS}/api/v1/watch/events?resourceVersion=$(curl -s "https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT_HTTPS}/api/v1/events" --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | jq -r '.metadata.resourceVersion')" --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" 5 | 6 | KUBERNETES_SERVICE_HOST=${KUBERNETES_SERVICE_HOST:?Please specify KUBERNETES_SERVICE_HOST env variable} 7 | KUBERNETES_SERVICE_PORT_HTTPS=${KUBERNETES_SERVICE_PORT_HTTPS:?Please specify KUBERNETES_SERVICE_PORT_HTTPS env variable} 8 | 9 | BASE_URL="https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT_HTTPS}/api/v1" 10 | CA_CERT="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" 11 | TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) 12 | 13 | trap 'kill -TERM $PID' TERM INT 14 | while true; do 15 | # curl exits from time to time, most probably because of kube-proxy reloads iptables rules, needs deeper investigation. As for now, let's use a loop trick. 16 | RESOURCE_VERSION=$(curl -s "${BASE_URL}/events" --cacert "${CA_CERT}" -H "Authorization: Bearer ${TOKEN}" | jq -r '.metadata.resourceVersion') 17 | DATE=$(date --utc +"%Y-%m-%dT%TZ") 18 | echo "{\"time\":\"${DATE}\",\"object\":{\"message\":\"Monitoring Kubernetes events staring from ${RESOURCE_VERSION} resourceVersion\"}}" >&2 19 | curl -s "${BASE_URL}/watch/events?resourceVersion=${RESOURCE_VERSION}" --cacert "${CA_CERT}" -H "Authorization: Bearer ${TOKEN}" & 20 | PID=$! 21 | wait $PID 22 | done 23 | -------------------------------------------------------------------------------- /docker/fluentd/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # This Dockerfile will build an image that is configured 16 | # to run Fluentd with an Elasticsearch plug-in and the 17 | # provided configuration file. 18 | # TODO(a-robinson): Use a lighter base image, e.g. some form of busybox. 19 | # The image acts as an executable for the binary /usr/sbin/td-agent. 20 | # Note that fluentd is run with root permssion to allow access to 21 | # log files with root only access under /var/log/containers/* 22 | # Please see http://docs.fluentd.org/articles/install-by-deb for more 23 | # information about installing fluentd using deb package. 24 | 25 | FROM gcr.io/google_containers/ubuntu-slim:0.4 26 | MAINTAINER Alex Robinson "arob@google.com" 27 | MAINTAINER Jimmi Dyson "jimmidyson@gmail.com" 28 | 29 | # Ensure there are enough file descriptors for running Fluentd. 30 | RUN ulimit -n 65536 31 | 32 | # Disable prompts from apt. 33 | ENV DEBIAN_FRONTEND noninteractive 34 | 35 | # Copy Elasticsearch indices template for GEO data 36 | COPY elasticsearch-template-es2x.json /etc/elasticsearch-template-es2x.json 37 | 38 | # Install fluentd 39 | COPY install.sh /tmp/install.sh 40 | RUN /tmp/install.sh 41 | 42 | # Copy the Fluentd configuration file. 43 | COPY td-agent.conf /etc/td-agent/td-agent.conf 44 | 45 | # Run the Fluentd service. 46 | ENTRYPOINT ["td-agent"] 47 | -------------------------------------------------------------------------------- /docker/elasticsearch/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.4 2 | 3 | ENV JAVA_HOME=/usr/lib/jvm/default-jvm/jre 4 | 5 | RUN apk upgrade --update-cache; \ 6 | apk add openjdk8-jre; \ 7 | rm -rf /tmp/* /var/cache/apk/* 8 | 9 | ############################################################# ELK Docker 10 | 11 | EXPOSE 9200 9300 12 | 13 | ENV VERSION 2.4.0 14 | 15 | # Install Elasticsearch. 16 | RUN apk add --update curl ca-certificates sudo && \ 17 | 18 | ( curl -Lskj https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/$VERSION/elasticsearch-$VERSION.tar.gz | \ 19 | gunzip -c - | tar xf - ) && \ 20 | mv /elasticsearch-$VERSION /elasticsearch && \ 21 | rm -rf $(find /elasticsearch | egrep "(\.(exe|bat)$|sigar/.*(dll|winnt|x86-linux|solaris|ia64|freebsd|macosx))") 22 | 23 | # Volume for Elasticsearch data 24 | VOLUME ["/data"] 25 | 26 | # Copy configuration 27 | COPY config /elasticsearch/config 28 | 29 | # Copy run script 30 | COPY run.sh / 31 | 32 | # Set environment variables defaults 33 | ENV ES_HEAP_SIZE 512m 34 | ENV CLUSTER_NAME elasticsearch-default 35 | ENV NODE_MASTER true 36 | ENV NODE_DATA true 37 | ENV HTTP_ENABLE true 38 | ENV NETWORK_HOST _site_ 39 | ENV HTTP_CORS_ENABLE true 40 | ENV HTTP_CORS_ALLOW_ORIGIN * 41 | ENV NUMBER_OF_MASTERS 1 42 | ENV NUMBER_OF_SHARDS 1 43 | ENV NUMBER_OF_REPLICAS 1 44 | 45 | ############################################################# ELK Kubernetes 46 | # Override elasticsearch.yml config, otherwise plug-in install will fail 47 | ADD do_not_use.yml /elasticsearch/config/elasticsearch.yml 48 | 49 | # Install Elasticsearch plug-ins 50 | RUN /elasticsearch/bin/plugin install io.fabric8/elasticsearch-cloud-kubernetes/2.4.0_01 --verbose && /elasticsearch/bin/plugin install lmenezes/elasticsearch-kopf/2.x --verbose && /elasticsearch/bin/plugin install https://github.com/vvanholl/elasticsearch-prometheus-exporter/releases/download/2.4.0.0/elasticsearch-prometheus-exporter-2.4.0.0.zip --verbose 51 | 52 | # Override elasticsearch.yml config, otherwise plug-in install will fail 53 | ADD config/elasticsearch.yml /elasticsearch/config/elasticsearch.yml 54 | 55 | # Set environment 56 | ENV NAMESPACE default 57 | ENV DISCOVERY_SERVICE elasticsearch-discovery 58 | 59 | CMD ["/run.sh"] 60 | -------------------------------------------------------------------------------- /es-data-master.yaml.tmpl: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: es-data-master 5 | labels: 6 | component: elasticsearch 7 | role: data 8 | role: master 9 | spec: 10 | replicas: ${ES_DATA_REPLICAS} 11 | strategy: 12 | type: RollingUpdate 13 | rollingUpdate: 14 | maxSurge: 0 15 | maxUnavailable: 1 16 | template: 17 | metadata: 18 | labels: 19 | component: elasticsearch 20 | role: data 21 | role: master 22 | spec: 23 | containers: 24 | - name: es-data-master 25 | securityContext: 26 | privileged: true 27 | capabilities: 28 | add: 29 | - IPC_LOCK 30 | image: kayrus/docker-elasticsearch-kubernetes:2.4.0 31 | imagePullPolicy: Always 32 | #readinessProbe: 33 | # exec: 34 | # command: 35 | # - curl 36 | # - -so /dev/null 37 | # - http://elasticsearch-logging:9200/_cluster/health?wait_for_status=green 38 | # timeoutSeconds: 30 39 | # successThreshold: 3 40 | env: 41 | - name: NAMESPACE 42 | valueFrom: 43 | fieldRef: 44 | fieldPath: metadata.namespace 45 | - name: CLUSTER_NAME 46 | valueFrom: 47 | configMapKeyRef: 48 | name: es-env 49 | key: es-cluster-name 50 | - name: NUMBER_OF_REPLICAS 51 | valueFrom: 52 | configMapKeyRef: 53 | name: es-env 54 | key: es-number-of-replicas 55 | - name: NODE_MASTER 56 | value: 'true' 57 | - name: NODE_DATA 58 | value: 'true' 59 | - name: HTTP_ENABLE 60 | value: 'false' 61 | - name: ES_HEAP_SIZE 62 | valueFrom: 63 | configMapKeyRef: 64 | name: es-env 65 | key: es-data-heap 66 | ports: 67 | - containerPort: 9300 68 | name: transport 69 | protocol: TCP 70 | # Random hostPort to prevent multiple ES data instances on one node 71 | #hostPort: 28651 72 | volumeMounts: 73 | - name: storage 74 | mountPath: /data 75 | volumes: 76 | - name: storage 77 | emptyDir: {} 78 | -------------------------------------------------------------------------------- /docker/fluentd/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | # Copyright 2015 The Kubernetes Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | 18 | # Install prerequisites. 19 | apt-get update 20 | 21 | apt-get install -y -q --no-install-recommends \ 22 | curl ca-certificates make g++ sudo bash git libgeoip-dev 23 | 24 | # Install Fluentd. 25 | /usr/bin/curl -sSL https://toolbelt.treasuredata.com/sh/install-ubuntu-xenial-td-agent2.sh | sh 26 | 27 | curl "http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz" | gunzip > /opt/GeoLiteCity.dat 28 | 29 | # Change the default user and group to root. 30 | # Needed to allow access to /var/log/docker/... files. 31 | sed -i -e "s/USER=td-agent/USER=root/" -e "s/GROUP=td-agent/GROUP=root/" /etc/init.d/td-agent 32 | 33 | # Install the Elasticsearch Fluentd plug-in. 34 | # http://docs.fluentd.org/articles/plugin-management 35 | td-agent-gem install --no-document fluent-plugin-kubernetes_metadata_filter -v 0.24.0 36 | td-agent-gem install --no-document fluent-plugin-elasticsearch -v 1.7.0 37 | td-agent-gem install --no-document fluent-plugin-parser -v 0.6.1 38 | td-agent-gem install --no-document fluent-plugin-concat -v 0.6.2 39 | td-agent-gem install --no-document fluent-plugin-systemd -v 0.0.4 40 | td-agent-gem install --no-document fluent-plugin-geoip 41 | td-agent-gem install --no-document fluent-plugin-rewrite 42 | td-agent-gem install --no-document specific_install 43 | td-agent-gem specific_install https://github.com/kayrus/fluent-plugin-color-stripper.git 44 | 45 | # Remove docs and postgres references 46 | rm -rf /opt/td-agent/embedded/share/doc \ 47 | /opt/td-agent/embedded/share/gtk-doc \ 48 | /opt/td-agent/embedded/lib/postgresql \ 49 | /opt/td-agent/embedded/bin/postgres \ 50 | /opt/td-agent/embedded/share/postgresql 51 | 52 | apt-get remove -y make g++ git 53 | apt-get autoremove -y 54 | apt-get clean -y 55 | 56 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 57 | -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CDIR=$(cd `dirname "$0"` && pwd) 4 | cd "$CDIR" 5 | 6 | print_red() { 7 | printf '%b' "\033[91m$1\033[0m\n" 8 | } 9 | 10 | print_green() { 11 | printf '%b' "\033[92m$1\033[0m\n" 12 | } 13 | 14 | render_template() { 15 | eval "echo \"$(< "$1")\"" 16 | } 17 | 18 | CONTEXT="" 19 | #CONTEXT="--context=foo" 20 | NAMESPACE="monitoring" 21 | 22 | ES_DATA_REPLICAS=$(kubectl get nodes --no-headers ${CONTEXT} | awk '!/SchedulingDisabled/ {print $1}' | wc -l) 23 | 24 | for yaml in *.yaml.tmpl; do 25 | render_template "${yaml}" | kubectl ${CONTEXT} --namespace="${NAMESPACE}" create -f - 26 | done 27 | 28 | for yaml in *.yaml; do 29 | kubectl ${CONTEXT} --namespace="${NAMESPACE}" create -f "${yaml}" 30 | done 31 | 32 | kubectl ${CONTEXT} --namespace="${NAMESPACE}" create configmap fluentd-config --from-file=docker/fluentd/td-agent.conf --dry-run -o yaml | kubectl ${CONTEXT} --namespace="${NAMESPACE}" apply -f - 33 | 34 | # Set replicas to amount of worker nodes 35 | #kubectl ${CONTEXT} --namespace="${NAMESPACE}" scale deployment es-data --replicas=${ES_DATA_REPLICAS} 36 | #kubectl ${CONTEXT} --namespace="${NAMESPACE}" scale deployment es-master --replicas=${ES_DATA_REPLICAS} 37 | #kubectl ${CONTEXT} --namespace="${NAMESPACE}" scale deployment es-data-master --replicas=${ES_DATA_REPLICAS} 38 | 39 | # Wait for Elasticsearch client nodes 40 | echo -n "Waiting for Elasticsearch client pods" 41 | while true; do 42 | echo -n . 43 | kubectl ${CONTEXT} --namespace="${NAMESPACE}" get pods -l role=client,component=elasticsearch -o jsonpath={.items[0].status.phase} | grep -q Running && break || sleep 1 44 | done 45 | echo 46 | 47 | # Wait for Elasticsearch cluster readiness, and then apply "readinessProbe" to allow smooth rolling upgrade 48 | echo -n "Waiting for Elasticsearch cluster readiness" 49 | while true; do 50 | echo -n . 51 | kubectl ${CONTEXT} --namespace="${NAMESPACE}" exec $(kubectl ${CONTEXT} --namespace="${NAMESPACE}" get pods -l role=client,component=elasticsearch -o jsonpath={.items[0].metadata.name}) -- sh -c 'curl -so/dev/null http://elasticsearch-logging:9200/_cluster/health?wait_for_status=green' >/dev/null 2>&1 && break || sleep 1 52 | done 53 | echo 54 | 55 | # Apply readinessProbe only when our Elasticsearch cluster is up and running 56 | kubectl ${CONTEXT} --namespace="${NAMESPACE}" patch deployment es-data-master -p'{"spec":{"template":{"spec":{"containers":[{"name":"es-data-master","readinessProbe":{"exec":{"command":["curl","-so/dev/null","http://elasticsearch-logging:9200/_cluster/health?wait_for_status=green"]},"timeoutSeconds":30,"successThreshold":3}}]}}}}' 57 | 58 | kubectl ${CONTEXT} --namespace="${NAMESPACE}" get pods --watch 59 | -------------------------------------------------------------------------------- /docker/fluentd/elasticsearch-template-es2x.json: -------------------------------------------------------------------------------- 1 | { 2 | "template" : "logstash-*", 3 | "settings" : { 4 | "index.refresh_interval" : "5s" 5 | }, 6 | "mappings" : { 7 | "_default_" : { 8 | "_all" : {"enabled" : true, "omit_norms" : true}, 9 | "dynamic_templates" : [ { 10 | "message_field" : { 11 | "path_match" : "message", 12 | "match_mapping_type" : "string", 13 | "mapping" : { 14 | "type" : "string", "index" : "analyzed", "omit_norms" : true, 15 | "fielddata" : { "format" : "disabled" } 16 | } 17 | } 18 | }, { 19 | "string_fields" : { 20 | "match" : "*", 21 | "match_mapping_type" : "string", 22 | "mapping" : { 23 | "type" : "string", "index" : "analyzed", "omit_norms" : true, 24 | "fielddata" : { "format" : "disabled" }, 25 | "fields" : { 26 | "raw" : {"type": "string", "index" : "not_analyzed", "doc_values" : true, "ignore_above" : 256} 27 | } 28 | } 29 | } 30 | }, { 31 | "float_fields" : { 32 | "match" : "*", 33 | "match_mapping_type" : "float", 34 | "mapping" : { "type" : "float", "doc_values" : true } 35 | } 36 | }, { 37 | "double_fields" : { 38 | "match" : "*", 39 | "match_mapping_type" : "double", 40 | "mapping" : { "type" : "double", "doc_values" : true } 41 | } 42 | }, { 43 | "byte_fields" : { 44 | "match" : "*", 45 | "match_mapping_type" : "byte", 46 | "mapping" : { "type" : "byte", "doc_values" : true } 47 | } 48 | }, { 49 | "short_fields" : { 50 | "match" : "*", 51 | "match_mapping_type" : "short", 52 | "mapping" : { "type" : "short", "doc_values" : true } 53 | } 54 | }, { 55 | "integer_fields" : { 56 | "match" : "*", 57 | "match_mapping_type" : "integer", 58 | "mapping" : { "type" : "integer", "doc_values" : true } 59 | } 60 | }, { 61 | "long_fields" : { 62 | "match" : "*", 63 | "match_mapping_type" : "long", 64 | "mapping" : { "type" : "long", "doc_values" : true } 65 | } 66 | }, { 67 | "date_fields" : { 68 | "match" : "*", 69 | "match_mapping_type" : "date", 70 | "mapping" : { "type" : "date", "doc_values" : true } 71 | } 72 | }, { 73 | "geo_point_fields" : { 74 | "match" : "*", 75 | "match_mapping_type" : "geo_point", 76 | "mapping" : { "type" : "geo_point", "doc_values" : true } 77 | } 78 | } ], 79 | "properties" : { 80 | "@timestamp": { "type": "date", "doc_values" : true }, 81 | "@version": { "type": "string", "index": "not_analyzed", "doc_values" : true }, 82 | "geoip" : { 83 | "type" : "object", 84 | "dynamic": true, 85 | "properties" : { 86 | "ip": { "type": "ip", "doc_values" : true }, 87 | "location" : { "type" : "geo_point", "doc_values" : true }, 88 | "latitude" : { "type" : "float", "doc_values" : true }, 89 | "longitude" : { "type" : "float", "doc_values" : true } 90 | } 91 | } 92 | } 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Info 2 | 3 | This repo deploys ELK (actually **EFK**: **Elasticsearch, Fluentd, Kibana**. But ELK abbreviation is more popular) stack with the following deployments/daemonsets: 4 | 5 | * Elasticsearch 6 | * ~~es-data~~ 7 | * ~~es-master~~ 8 | * es-data-master (in our case we use emptydir which creates new directory every start and master node requires data storage. If we remove master storage - we will loose all indexes, so it is better to combine data and master nodes. And we can easily reboot one node and restore lost data from replicas. Default elasticsearch `NUMBER_OF_REPLICAS` is set to 1 inside [docker/elasticsearch/Dockerfile](docker/elasticsearch/Dockerfile) which means we can survive one node failure). If you wish to override amount of replicas, set it inside [`es-env.yaml`](es-env.yaml) configmap. 9 | * es-client (client nodes which allow to communicate with the elasticsearch cluster, we use 2 pod replicas) 10 | * fluentd - we use daemonsets, so fluentd is being scheduled on all worker nodes. 11 | * kibana - one instance is enough. But you can easily scale it to two or more instances. 12 | 13 | ## Readiness probe 14 | 15 | [`es-data-master.yaml.tmpl`](es-data-master.yaml.tmpl) template already contains commented `readinessProbe` code. Since we deploy a brand new cluster `readinessProbe` won't work until our Elasticsearch cluster has a green status. That is why `readinessProbe` is being applied only when Elasticsearch client is up and running and returns green status for the cluster. The final stage of the Elasticsearch cluster deployment - rolling upgrade of the each node one by one following the green cluster state. 16 | 17 | ### Possible issues 18 | 19 | When you reboot your whole Kubernetes cluster, readiness probe won't allow to start Elasticsearch cluster because of dependency loop. 20 | 21 | # Ingress example 22 | 23 | Example of an ingress controller to get an access from outside: 24 | 25 | ```yaml 26 | apiVersion: extensions/v1beta1 27 | kind: Ingress 28 | metadata: 29 | annotations: 30 | ingress.kubernetes.io/auth-realm: Authentication Required 31 | ingress.kubernetes.io/auth-secret: internal-services-auth 32 | ingress.kubernetes.io/auth-type: basic 33 | kubernetes.io/ingress.allow-http: "false" 34 | name: ingress-monitoring 35 | namespace: monitoring 36 | spec: 37 | tls: 38 | - hosts: 39 | - kibana.example.com 40 | - elasticsearch.example.com 41 | secretName: example-tls 42 | rules: 43 | - host: kibana.example.com 44 | http: 45 | paths: 46 | - backend: 47 | serviceName: kibana-logging 48 | servicePort: 5601 49 | path: / 50 | - host: elasticsearch.example.com 51 | http: 52 | paths: 53 | - backend: 54 | serviceName: elasticsearch-logging 55 | servicePort: 9200 56 | path: / 57 | ``` 58 | 59 | # Monitoring the cluster state 60 | 61 | We use `kopf` plugin for elasticsearch. You can view the cluster state using links below: 62 | 63 | * [https://elasticsearch.example.com/_plugin/kopf/](https://elasticsearch.example.com/_plugin/kopf/) 64 | * [https://kibana.example.com/status](https://kibana.example.com/status) 65 | 66 | # Surviving the reboot 67 | 68 | When you reboot the node, ES instance will become faily. Quick hook to make it happy - kill it. Kubernetes deployment will create a new pod, it will sync all replicas and ES cluster state will be green. 69 | 70 | # Kibana and GEO data 71 | 72 | Fluentd container is already configured to import indices templates. If templates were not improted, you can import them manually: 73 | 74 | ```sh 75 | wget https://github.com/logstash-plugins/logstash-output-elasticsearch/raw/master/lib/logstash/outputs/elasticsearch/elasticsearch-template-es2x.json 76 | curl -XPUT 'https://elasticsearch.example.com/_template/logstash-*?pretty' -d@docker/fluentd/elasticsearch-template-es2x.json 77 | ``` 78 | 79 | Please note that if the index was already created (i.e. brand new deploy), you have to remove old index with incorrect data: 80 | 81 | ```sh 82 | # This index has incorrect data 83 | curl -s -XGET https://elasticsearch.example.com/logstash-2016.09.28/_mapping | python -mjson.tool | grep -A10 geoip 84 | "geoip": { 85 | "properties": { 86 | "location": { 87 | "type": "double" 88 | } 89 | } 90 | } 91 | # Here how to delete incorrect index (ALL THIS INDEX DATA WILL BE REMOVED) 92 | curl -XDELETE https://elasticsearch.example.com/logstash-2016.09.28 93 | ``` 94 | 95 | or wait until new index will be created (in our setup new index is being created every day). 96 | 97 | # Forward Kubernetes events into Kibana/Elasticsearch 98 | 99 | `k8s-events-printer.yaml` manifest is a simple `alpine` container with `curl` and `jq` tools installed. It prints all Kubernetes events into stdout and `fluentd` just parses and forwards these events into Elasticsearch as a regular json log. 100 | 101 | # Known issues 102 | 103 | * `journald` logs don't show up in Kibana, probably because of the TZ issues 104 | * `DELETED` Kubernetes events could not be stripped for now, you have to create an exclude rule for `type:"DELETED"`, otherwise these events confuse Kibana users. 105 | 106 | # Credits 107 | 108 | This repo uses modified config files from https://github.com/pires/kubernetes-elasticsearch-cluster 109 | 110 | # Pictures 111 | 112 | ![geomap](images/kibana1.png "Geo Map") 113 | ![countries](images/kibana2.png "Countries") 114 | -------------------------------------------------------------------------------- /docker/fluentd/td-agent.conf: -------------------------------------------------------------------------------- 1 | # This configuration file for Fluentd / td-agent is used 2 | # to watch changes to Docker log files. The kubelet creates symlinks that 3 | # capture the pod name, namespace, container name & Docker container ID 4 | # to the docker logs for pods in the /var/log/containers directory on the host. 5 | # If running this fluentd configuration in a Docker container, the /var/log 6 | # directory should be mounted in the container. 7 | # 8 | # These logs are then submitted to Elasticsearch which assumes the 9 | # installation of the fluent-plugin-elasticsearch & the 10 | # fluent-plugin-kubernetes_metadata_filter plugins. 11 | # See https://github.com/uken/fluent-plugin-elasticsearch & 12 | # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for 13 | # more information about the plugins. 14 | # Maintainer: Jimmi Dyson 15 | # 16 | # Example 17 | # ======= 18 | # A line in the Docker log file might look like this JSON: 19 | # 20 | # {"log":"2014/09/25 21:15:03 Got request with path wombat\n", 21 | # "stream":"stderr", 22 | # "time":"2014-09-25T21:15:03.499185026Z"} 23 | # 24 | # The time_format specification below makes sure we properly 25 | # parse the time format produced by Docker. This will be 26 | # submitted to Elasticsearch and should appear like: 27 | # $ curl 'http://elasticsearch-logging:9200/_search?pretty' 28 | # ... 29 | # { 30 | # "_index" : "logstash-2014.09.25", 31 | # "_type" : "fluentd", 32 | # "_id" : "VBrbor2QTuGpsQyTCdfzqA", 33 | # "_score" : 1.0, 34 | # "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n", 35 | # "stream":"stderr","tag":"docker.container.all", 36 | # "@timestamp":"2014-09-25T22:45:50+00:00"} 37 | # }, 38 | # ... 39 | # 40 | # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log 41 | # record & add labels to the log record if properly configured. This enables users 42 | # to filter & search logs on any metadata. 43 | # For example a Docker container's logs might be in the directory: 44 | # 45 | # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b 46 | # 47 | # and in the file: 48 | # 49 | # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log 50 | # 51 | # where 997599971ee6... is the Docker ID of the running container. 52 | # The Kubernetes kubelet makes a symbolic link to this file on the host machine 53 | # in the /var/log/containers directory which includes the pod name and the Kubernetes 54 | # container name: 55 | # 56 | # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log 57 | # -> 58 | # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log 59 | # 60 | # The /var/log directory on the host is mapped to the /var/log directory in the container 61 | # running this instance of Fluentd and we end up collecting the file: 62 | # 63 | # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log 64 | # 65 | # This results in the tag: 66 | # 67 | # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log 68 | # 69 | # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name 70 | # which are added to the log message as a kubernetes field object & the Docker container ID 71 | # is also added under the docker field object. 72 | # The final tag is: 73 | # 74 | # kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log 75 | # 76 | # And the final log record look like: 77 | # 78 | # { 79 | # "log":"2014/09/25 21:15:03 Got request with path wombat\n", 80 | # "stream":"stderr", 81 | # "time":"2014-09-25T21:15:03.499185026Z", 82 | # "kubernetes": { 83 | # "namespace": "default", 84 | # "pod_name": "synthetic-logger-0.25lps-pod", 85 | # "container_name": "synth-lgr" 86 | # }, 87 | # "docker": { 88 | # "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b" 89 | # } 90 | # } 91 | # 92 | # This makes it easier for users to search for logs by pod name or by 93 | # the name of the Kubernetes container regardless of how many times the 94 | # Kubernetes pod has been restarted (resulting in a several Docker container IDs). 95 | # 96 | # TODO: Propagate the labels associated with a container along with its logs 97 | # so users can query logs using labels as well as or instead of the pod name 98 | # and container name. This is simply done via configuration of the Kubernetes 99 | # fluentd plugin but requires secrets to be enabled in the fluent pod. This is a 100 | # problem yet to be solved as secrets are not usable in static pods which the fluentd 101 | # pod must be until a per-node controller is available in Kubernetes. 102 | 103 | # Do not directly collect fluentd's own logs to avoid infinite loops. 104 | 105 | type null 106 | 107 | 108 | # 109 | # type systemd 110 | # path /run/log/journal 111 | # #filters [{ "_SYSTEMD_UNIT": "kube-proxy.service" }] 112 | # pos_file /var/log/run-log-journald.pos 113 | ## time_format %Y-%m-%dT%H:%M:%S%:z 114 | # tag journald 115 | # strip_underscores true 116 | # read_from_head true 117 | # keep_time_key true 118 | # 119 | # 120 | # type stdout 121 | # 122 | # 123 | # type record_transformer 124 | # 125 | # time ${record["@timestamp"]} 126 | # 127 | # 128 | 129 | # Example: 130 | # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} 131 | 132 | type tail 133 | path /var/log/containers/*.log 134 | pos_file /var/log/es-containers.log.pos 135 | time_format %Y-%m-%dT%H:%M:%S.%NZ 136 | tag docker.* 137 | format json 138 | read_from_head true 139 | keep_time_key true 140 | 141 | 142 | # Example: 143 | # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 144 | 145 | type tail 146 | format /^(?