├── .gitignore ├── LICENSE ├── README.md ├── config-templates.py ├── templates ├── 1_k8s-global │ ├── aws-storage.yml │ ├── gce-storage.yml │ └── namespace.yml ├── 2_elasticsearch │ ├── es-config.yml │ ├── es-data.yml │ ├── es-master.yml │ └── es-services.yml ├── 3_kibana │ └── kibana.yml ├── 4_beats_init │ ├── filebeat-dashboards.yml │ ├── filebeat-template.yml │ ├── metricbeat-dashboards.yml │ └── metricbeat-template.yml ├── 5_beats_agents │ ├── filebeat-kubernetes.yml │ └── metricbeat-kubernetes.yml ├── 6_logstash │ ├── logstash-pipelines.yml │ ├── logstash-tls.yml │ ├── logstash.conf │ ├── logstash.yml │ └── ssl-gen.sh └── 7_oauth2-proxy │ ├── oauth2-config.yml │ ├── oauth2-proxy.yml │ └── oauth2_proxy.cfg └── util └── __init__.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pem 2 | *.crt 3 | *.key 4 | credentials 5 | 6 | clusters/** 7 | __pycache__ 8 | 9 | .DS_Store 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Jesse Swidler 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | NOTE: Archived. Since this was written, many releases of Elastic have happened. With them is included authorization and helm scripts for installing on Kubernetes, which was not available when this was written. The information in this guide is outdated. 2 | 3 | # Elasticsearch-Kubed 4 | 5 | The purpose of this project is to provide starter files for deploying a high performance Elasticsearch cluster on Kubernetes running on either GCP or AWS. 6 | 7 | The configuration files will generally be targeted at deployments with at least two nodes, four or more CPUs, and fifteen or more GBs of memory. However, for the purposes of testing the components with less hardware available, there is also a profile that will run on a single node Kubernetes cluster which you can easily set up with minikube. If you really wanted to run Elasticsearch on a single computer, you would just use one container to do it. Our Elasticsearch cluster will have three master nodes, and multiple data and ingest nodes, which you can adjust the number of to meet your hardware requirements. 8 | 9 | ## Related Blog Post 10 | 11 | There is a blog post that walks people through setting up an ES cluster using this repo. You can find that post through the link below: 12 | 13 | [High Performance ELK with Kubernetes](https://engineering.udacity.com/high-performance-elk-with-kubernetes-part-1-1d09f41a4ce2) 14 | 15 | Note that the blog post was originally written for ES 6.3, while this repo has been updated several times to use newer versions of the Elastic Stack. I don't think there is anything in the blog post that needs to be updated as a result of updates. However, if you come across something confusing in the blog post, perhaps as a result of the updates to this repo, please open a GitHub issue to let me know about it. 16 | 17 | ### minikube 18 | 19 | To test the configuration on MaxOS X, `minikube` can be installed to launch a local one node Kubernetes cluster. When starting minikube, increase the default machine size: 20 | 21 | `minikube start --memory 8192 --disk-size 50g --cpus 4` 22 | -------------------------------------------------------------------------------- /config-templates.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from util import prompt, base64, ensure_dir, random_token 4 | import os 5 | import re 6 | import subprocess 7 | from jinja2 import Environment, FileSystemLoader 8 | import shutil 9 | 10 | dirname = os.path.dirname(__file__) 11 | template_dir = os.path.join(dirname, 'templates') 12 | clusters_dir = os.path.join(dirname, 'clusters') 13 | template_secrets_dir = os.path.join(template_dir, 'secrets') 14 | jinja_env = Environment(loader=FileSystemLoader(template_dir)) 15 | jinja_env.filters['b64encode'] = base64 16 | 17 | data_node_configs = { 18 | 'minikube': { 19 | 'replicas': '2', 20 | 'storage_class': 'standard', 21 | 'heap_size': '512m', 22 | 'memory_limit': '1Gi' 23 | }, 24 | '4cpu X N': { 25 | 'storage_class': 'ssd', 26 | 'heap_size': '4g', 27 | 'memory_limit': '8Gi' 28 | }, 29 | '8cpu X N': { 30 | 'storage_class': 'ssd', 31 | 'heap_size': '8g', 32 | 'memory_limit': '16Gi' 33 | }, 34 | '16cpu X N': { 35 | 'storage_class': 'ssd', 36 | 'heap_size': '20g', 37 | 'memory_limit': '40Gi' 38 | } 39 | } 40 | 41 | def check_cert_presence(cert_dir): 42 | files = ['ca.crt', 'ca.key', 'logstash.crt', 'logstash.key'] 43 | for file in files: 44 | fname = os.path.join(cert_dir, file) 45 | if not os.path.isfile(fname): 46 | return False 47 | return True 48 | 49 | def prompt_for_logstash_certs(context, cert_dir): 50 | if check_cert_presence(cert_dir): 51 | print(f"Using keys and certs for Logstash found in {cert_dir}.") 52 | context['skip_logstash'] = False 53 | else: 54 | do_logstash = prompt("Would you like to set up Logstash (with SSL beats input)? (Y/n)", 55 | "^[yYnN]?$" 56 | ) 57 | if do_logstash and do_logstash.lower() != 'y': 58 | context['skip_logstash'] = True 59 | return 60 | else: 61 | context['skip_logstash'] = False 62 | print("Provide the following information to generate self-signed certificates: ") 63 | ca_name = prompt("Certificate Authority Name", default='Logstash CA') 64 | url = prompt("CN - Common Name for Logstash", 65 | regex='^[a-zA-Z.-0-9]+$', 66 | default='logstash.my-domain.com' 67 | ) 68 | country = prompt("C - Country Code", 69 | regex='[A-Z]{1,4}', 70 | default='US' 71 | ) 72 | state = prompt("ST - State", 73 | regex='[A-Z]{1,4}', 74 | default='CA' 75 | ) 76 | loc = prompt("L - Location", 77 | regex='[A-Za-z 0-9-_.]+', 78 | default='San Francisco' 79 | ) 80 | org = prompt("O - Org", 81 | regex='[A-Za-z 0-9-_.]+', 82 | default='Acme' 83 | ) 84 | org_unit = prompt("OU - Org Unit", 85 | regex='[A-Za-z 0-9-_.]+', 86 | default='Computers' 87 | ) 88 | ensure_dir(os.path.join(cert_dir,'afile')) 89 | subprocess.run([ 90 | os.path.join(dirname, 'templates', '6_logstash', 'ssl-gen.sh'), 91 | ca_name, url, country, state, loc, org, org_unit, cert_dir 92 | ], check=True) 93 | if not check_cert_presence(cert_dir): 94 | raise RuntimeError('certs failed to generate') 95 | try: 96 | shutil.rmtree(template_secrets_dir) 97 | except: 98 | pass 99 | shutil.copytree(cert_dir, template_secrets_dir) 100 | context['logstash_beats_port'] = '8751' 101 | 102 | def prompt_for_oauth_config(context): 103 | do_oauth = prompt("Would you like to configure oauth2_proxy to authorize a GitHub team? (y/N)", 104 | "^[yYnN]?$" 105 | ) 106 | if not do_oauth or do_oauth.lower() != 'y': 107 | context['skip_oauth'] = True 108 | return 109 | else: 110 | context['skip_oauth'] = False 111 | context['github_org'] = prompt('Enter the GitHub org', '^[a-z0-9-_]+$') 112 | context['github_team'] = prompt('Enter the GitHub team (optional)', '^[a-z0-9-_]*$') 113 | context['oauth_client_id'] = prompt('Enter the OAuth Client ID', '^[a-z0-9-]+$') 114 | context['oauth_client_secret'] = prompt('Enter the OAuth Client Secret', '^[a-z0-9-]+$') 115 | context['oauth_cookie_name'] = '_ghoauth' 116 | context['oauth_cookie_secret'] = random_token() 117 | context['ssl_crt'] = prompt('Enter the path to the SSL certificate', readFile=True) 118 | context['ssl_key'] = prompt('Enter the path to the SSL private key', readFile=True) 119 | 120 | def do_prompts(): 121 | context = {} 122 | context['namespace'] = prompt( 123 | 'Enter a kubernetes namespace for the elasticsearch cluster', 124 | '^[a-z][-a-z0-9]{1,19}$', 125 | 'default' 126 | ) 127 | context['cluster_name'] = prompt( 128 | 'Enter a name for the elasticsearch cluster', 129 | '^[a-z][-a-z0-9]{1,19}$', 130 | 'my-es-cluster' 131 | ) 132 | print('Select the node size: ') 133 | for i, key in enumerate(data_node_configs): 134 | print(f'{i+1}: {key}') 135 | config_count = len(data_node_configs) # Will break regex if > 9 configs 136 | node_size_choice = int(prompt( 137 | f'[1-{config_count}]: ', 138 | f'^[1-{config_count}]$', 139 | '2' 140 | )) 141 | context['data_node'] = data_node_configs[list(data_node_configs.keys())[node_size_choice-1]] 142 | if node_size_choice != 1: 143 | context['data_node']['replicas'] = int(prompt( 144 | 'Enter the number of nodes (2-9)', 145 | '^[2-9]$', 146 | '2' 147 | )) 148 | context['data_node']['volume_size'] = prompt( 149 | 'Enter the data volume size in GB [10-9999]', 150 | '^[1-9][0-9]{1,3}$', 151 | '250' 152 | ) 153 | prompt_for_logstash_certs(context, os.path.join(clusters_dir, context['namespace'], "logstash-ssl-keys")) 154 | prompt_for_oauth_config(context) 155 | return context 156 | 157 | def main(): 158 | print('These scripts will create configuration files to set up an Elasticsearch cluster in Kubernetes.') 159 | 160 | context = do_prompts() 161 | cluster_dir = os.path.join(clusters_dir, context['namespace']) 162 | do_logstash = '' if context['skip_logstash'] else '6' 163 | do_oauth_proxy = '' if context['skip_oauth'] else '7' 164 | for template in jinja_env.list_templates(filter_func=(lambda x:re.match(f'^[1-5{do_logstash}{do_oauth_proxy}]_.+\.yml$', x))): 165 | if context['namespace'] is 'default' and template.endswith('/namespace.yml'): 166 | continue 167 | if context['data_node']['storage_class'] is 'standard' and template.endswith('-storage.yml'): 168 | continue 169 | output = jinja_env.get_template(template).render(context) 170 | out_path = os.path.join(cluster_dir, template) 171 | ensure_dir(out_path) 172 | with open(out_path, 'w') as output_file: 173 | print(output, file=output_file) 174 | # The files are still in the clusters/namespace/logstash-ssl-keys directory 175 | # This removes a temporary copy in the template directory 176 | try: 177 | shutil.rmtree(template_secrets_dir) 178 | except: 179 | pass 180 | print('\nSuccessfully generated cluster files.') 181 | print(f'configuration files have been saved to {cluster_dir}') 182 | 183 | 184 | if __name__ == "__main__": 185 | main() 186 | -------------------------------------------------------------------------------- /templates/1_k8s-global/aws-storage.yml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: ssd 5 | provisioner: kubernetes.io/aws-ebs 6 | parameters: 7 | type: gp2 8 | -------------------------------------------------------------------------------- /templates/1_k8s-global/gce-storage.yml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: ssd 5 | provisioner: kubernetes.io/gce-pd 6 | parameters: 7 | type: pd-ssd 8 | -------------------------------------------------------------------------------- /templates/1_k8s-global/namespace.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: {{ namespace }} 5 | -------------------------------------------------------------------------------- /templates/2_elasticsearch/es-config.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: es-config 6 | data: 7 | java.security: | 8 | networkaddress.cache.ttl=60 9 | -------------------------------------------------------------------------------- /templates/2_elasticsearch/es-data.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: elasticsearch-data 6 | labels: 7 | app: elasticsearch 8 | role: data 9 | spec: 10 | serviceName: elasticsearch-data 11 | # This is number of nodes that we want to run 12 | replicas: {{ data_node.replicas }} 13 | selector: 14 | matchLabels: 15 | app: elasticsearch 16 | role: data 17 | template: 18 | metadata: 19 | labels: 20 | app: elasticsearch 21 | role: data 22 | spec: 23 | affinity: 24 | # Try to put each ES data node on a different node in the K8s cluster 25 | podAntiAffinity: 26 | preferredDuringSchedulingIgnoredDuringExecution: 27 | - weight: 100 28 | podAffinityTerm: 29 | labelSelector: 30 | matchExpressions: 31 | - key: app 32 | operator: In 33 | values: 34 | - elasticsearch 35 | - key: role 36 | operator: In 37 | values: 38 | - data 39 | topologyKey: kubernetes.io/hostname 40 | terminationGracePeriodSeconds: 300 41 | 42 | # spec.template.spec.initContainers 43 | initContainers: 44 | # Fix the permissions on the volume. 45 | # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#_notes_for_production_use_and_defaults 46 | - name: fix-the-volume-permission 47 | image: busybox 48 | command: ['sh', '-c', 'chown -R 1000:1000 /usr/share/elasticsearch/data'] 49 | securityContext: 50 | privileged: true 51 | volumeMounts: 52 | - name: data 53 | mountPath: /usr/share/elasticsearch/data 54 | # Increase the default vm.max_map_count to 262144 55 | # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-cli-run-prod-mode 56 | - name: increase-the-vm-max-map-count 57 | image: busybox 58 | command: ['sysctl', '-w', 'vm.max_map_count=262144'] 59 | securityContext: 60 | privileged: true 61 | # Increase the ulimit 62 | # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#_notes_for_production_use_and_defaults 63 | - name: increase-the-ulimit 64 | image: busybox 65 | command: ['sh', '-c', 'ulimit -n 65536'] 66 | securityContext: 67 | privileged: true 68 | 69 | # spec.template.spec.containers 70 | containers: 71 | - name: elasticsearch 72 | image: docker.elastic.co/elasticsearch/elasticsearch:7.0.0 73 | ports: 74 | - containerPort: 9200 75 | name: http 76 | - containerPort: 9300 77 | name: transport 78 | livenessProbe: 79 | tcpSocket: 80 | port: transport 81 | initialDelaySeconds: 20 82 | periodSeconds: 10 83 | readinessProbe: 84 | httpGet: 85 | path: /_cluster/health 86 | port: http 87 | initialDelaySeconds: 20 88 | timeoutSeconds: 5 89 | resources: 90 | limits: 91 | memory: {{ data_node.memory_limit }} 92 | 93 | # spec.template.spec.containers[elasticsearch].env 94 | env: 95 | - name: discovery.seed_hosts 96 | value: "elasticsearch-master.{{ namespace }}.svc.cluster.local" 97 | - name: ES_JAVA_OPTS 98 | value: -Xms{{ data_node.heap_size }} -Xmx{{ data_node.heap_size }} 99 | 100 | - name: node.master 101 | value: "false" 102 | - name: node.ingest 103 | value: "true" 104 | - name: node.data 105 | value: "true" 106 | - name: cluster.remote.connect 107 | value: "true" 108 | 109 | - name: cluster.name 110 | value: {{ cluster_name }} 111 | - name: node.name 112 | valueFrom: 113 | fieldRef: 114 | fieldPath: metadata.name 115 | 116 | # spec.template.spec.containers[elasticsearch].volumeMounts 117 | volumeMounts: 118 | - name: data 119 | mountPath: /usr/share/elasticsearch/data 120 | - name: config 121 | mountPath: /usr/share/elasticsearch/jdk/lib/security/java.security 122 | subPath: java.security 123 | 124 | # spec.template.spec 125 | volumes: 126 | - name: config 127 | configMap: 128 | name: es-config 129 | 130 | # spec.volumeClaimTemplates 131 | volumeClaimTemplates: 132 | - metadata: 133 | name: data 134 | spec: 135 | accessModes: 136 | - ReadWriteOnce 137 | storageClassName: {{ data_node.storage_class }} 138 | resources: 139 | requests: 140 | storage: {{ data_node.volume_size }}Gi 141 | --- 142 | apiVersion: policy/v1beta1 143 | kind: PodDisruptionBudget 144 | metadata: 145 | namespace: {{ namespace }} 146 | name: elasticsearch-data 147 | spec: 148 | maxUnavailable: 1 149 | selector: 150 | matchLabels: 151 | app: elasticsearch 152 | role: data 153 | -------------------------------------------------------------------------------- /templates/2_elasticsearch/es-master.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: elasticsearch-master 6 | labels: 7 | app: elasticsearch 8 | role: master 9 | spec: 10 | serviceName: elasticsearch-master 11 | replicas: 3 12 | selector: 13 | matchLabels: 14 | app: elasticsearch 15 | role: master 16 | template: 17 | metadata: 18 | labels: 19 | app: elasticsearch 20 | role: master 21 | spec: 22 | affinity: 23 | # Try to put each ES master node on a different node in the K8s cluster 24 | podAntiAffinity: 25 | preferredDuringSchedulingIgnoredDuringExecution: 26 | - weight: 100 27 | podAffinityTerm: 28 | labelSelector: 29 | matchExpressions: 30 | - key: app 31 | operator: In 32 | values: 33 | - elasticsearch 34 | - key: role 35 | operator: In 36 | values: 37 | - master 38 | topologyKey: kubernetes.io/hostname 39 | # spec.template.spec.initContainers 40 | initContainers: 41 | # Fix the permissions on the volume. 42 | # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#_notes_for_production_use_and_defaults 43 | - name: fix-the-volume-permission 44 | image: busybox 45 | command: ['sh', '-c', 'chown -R 1000:1000 /usr/share/elasticsearch/data'] 46 | securityContext: 47 | privileged: true 48 | volumeMounts: 49 | - name: data 50 | mountPath: /usr/share/elasticsearch/data 51 | # Increase the default vm.max_map_count to 262144 52 | # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-cli-run-prod-mode 53 | - name: increase-the-vm-max-map-count 54 | image: busybox 55 | command: ['sysctl', '-w', 'vm.max_map_count=262144'] 56 | securityContext: 57 | privileged: true 58 | # Increase the ulimit 59 | # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#_notes_for_production_use_and_defaults 60 | - name: increase-the-ulimit 61 | image: busybox 62 | command: ['sh', '-c', 'ulimit -n 65536'] 63 | securityContext: 64 | privileged: true 65 | 66 | # spec.template.spec.containers 67 | containers: 68 | - name: elasticsearch 69 | image: docker.elastic.co/elasticsearch/elasticsearch:7.0.0 70 | ports: 71 | - containerPort: 9200 72 | name: http 73 | - containerPort: 9300 74 | name: transport 75 | livenessProbe: 76 | tcpSocket: 77 | port: transport 78 | initialDelaySeconds: 20 79 | periodSeconds: 10 80 | resources: 81 | requests: 82 | cpu: 0.25 83 | limits: 84 | cpu: 1 85 | memory: 1Gi 86 | 87 | # spec.template.spec.containers[elasticsearch].env 88 | env: 89 | - name: discovery.seed_hosts 90 | value: "elasticsearch-master.{{ namespace }}.svc.cluster.local" 91 | - name: cluster.initial_master_nodes 92 | value: "elasticsearch-master-0,elasticsearch-master-1,elasticsearch-master-2" 93 | - name: ES_JAVA_OPTS 94 | value: -Xms512m -Xmx512m 95 | 96 | - name: node.master 97 | value: "true" 98 | - name: node.ingest 99 | value: "false" 100 | - name: node.data 101 | value: "false" 102 | - name: search.remote.connect 103 | value: "false" 104 | 105 | - name: cluster.name 106 | value: {{ cluster_name }} 107 | - name: node.name 108 | valueFrom: 109 | fieldRef: 110 | fieldPath: metadata.name 111 | 112 | # spec.template.spec.containers[elasticsearch].volumeMounts 113 | volumeMounts: 114 | - name: data 115 | mountPath: /usr/share/elasticsearch/data 116 | - name: config 117 | mountPath: /usr/share/elasticsearch/jdk/lib/security/java.security 118 | subPath: java.security 119 | 120 | # spec.template.spec 121 | volumes: 122 | - name: config 123 | configMap: 124 | name: es-config 125 | 126 | # spec.volumeClaimTemplates 127 | volumeClaimTemplates: 128 | - metadata: 129 | name: data 130 | spec: 131 | accessModes: 132 | - ReadWriteOnce 133 | storageClassName: {{ data_node.storage_class }} 134 | resources: 135 | requests: 136 | storage: 10Gi 137 | --- 138 | apiVersion: policy/v1beta1 139 | kind: PodDisruptionBudget 140 | metadata: 141 | namespace: {{ namespace }} 142 | name: elasticsearch-master 143 | spec: 144 | maxUnavailable: 1 145 | selector: 146 | matchLabels: 147 | app: elasticsearch 148 | role: master 149 | -------------------------------------------------------------------------------- /templates/2_elasticsearch/es-services.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: elasticsearch-master 6 | labels: 7 | app: elasticsearch 8 | role: master 9 | spec: 10 | clusterIP: None 11 | selector: 12 | app: elasticsearch 13 | role: master 14 | ports: 15 | - port: 9200 16 | name: http 17 | - port: 9300 18 | name: node-to-node 19 | --- 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | namespace: {{ namespace }} 24 | name: elasticsearch 25 | labels: 26 | app: elasticsearch 27 | role: data 28 | spec: 29 | clusterIP: None 30 | selector: 31 | app: elasticsearch 32 | role: data 33 | ports: 34 | - port: 9200 35 | name: http 36 | - port: 9300 37 | name: node-to-node 38 | -------------------------------------------------------------------------------- /templates/3_kibana/kibana.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: kibana 6 | labels: 7 | app: kibana 8 | spec: 9 | selector: 10 | app: kibana 11 | ports: 12 | - port: 5601 13 | name: http 14 | --- 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | metadata: 18 | namespace: {{ namespace }} 19 | name: kibana 20 | labels: 21 | app: kibana 22 | spec: 23 | replicas: 1 24 | selector: 25 | matchLabels: 26 | app: kibana 27 | template: 28 | metadata: 29 | labels: 30 | app: kibana 31 | spec: 32 | containers: 33 | - name: kibana 34 | image: docker.elastic.co/kibana/kibana:7.0.0 35 | ports: 36 | - containerPort: 5601 37 | env: 38 | - name: SERVER_NAME 39 | valueFrom: 40 | fieldRef: 41 | fieldPath: metadata.name 42 | - name: SERVER_HOST 43 | value: "0.0.0.0" 44 | - name: ELASTICSEARCH_HOSTS 45 | value: http://elasticsearch.{{ namespace }}.svc.cluster.local:9200 46 | -------------------------------------------------------------------------------- /templates/4_beats_init/filebeat-dashboards.yml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: filebeat-dashboard-init 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: filebeat-template-init 11 | image: docker.elastic.co/beats/filebeat:7.0.0 12 | command: [ "filebeat", "setup", "--dashboards", 13 | "-E", "output.logstash.enabled=false", 14 | "-E", 'output.elasticsearch.hosts=["elasticsearch.{{ namespace }}.svc.cluster.local:9200"]', 15 | "-E", 'setup.kibana.host="kibana.{{ namespace }}.svc.cluster.local:5601"' ] 16 | restartPolicy: Never 17 | backoffLimit: 4 18 | -------------------------------------------------------------------------------- /templates/4_beats_init/filebeat-template.yml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: filebeat-template-init 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: filebeat-template-init 11 | image: docker.elastic.co/beats/filebeat:7.0.0 12 | command: [ "filebeat", "setup", "--template", 13 | "-E", "output.logstash.enabled=false", 14 | "-E", 'output.elasticsearch.hosts=["elasticsearch.{{ namespace }}.svc.cluster.local:9200"]' ] 15 | restartPolicy: Never 16 | backoffLimit: 4 17 | -------------------------------------------------------------------------------- /templates/4_beats_init/metricbeat-dashboards.yml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: metricbeat-dashboard-init 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: metricbeat-template-init 11 | image: docker.elastic.co/beats/metricbeat:7.0.0 12 | command: [ "metricbeat", "setup", "--dashboards", 13 | "-E", "output.logstash.enabled=false", 14 | "-E", 'output.elasticsearch.hosts=["elasticsearch.{{ namespace }}.svc.cluster.local:9200"]', 15 | "-E", 'setup.kibana.host="kibana.{{ namespace }}.svc.cluster.local:5601"' ] 16 | restartPolicy: Never 17 | backoffLimit: 4 18 | -------------------------------------------------------------------------------- /templates/4_beats_init/metricbeat-template.yml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: metricbeat-template-init 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: metricbeat-template-init 11 | image: docker.elastic.co/beats/metricbeat:7.0.0 12 | command: [ "metricbeat", "setup", "--template", 13 | "-E", "output.logstash.enabled=false", 14 | "-E", 'output.elasticsearch.hosts=["elasticsearch.{{ namespace }}.svc.cluster.local:9200"]' ] 15 | restartPolicy: Never 16 | backoffLimit: 4 17 | -------------------------------------------------------------------------------- /templates/5_beats_agents/filebeat-kubernetes.yml: -------------------------------------------------------------------------------- 1 | # Slightly edited version; original obtained from: 2 | # https://raw.githubusercontent.com/elastic/beats/6.3/deploy/kubernetes/filebeat-kubernetes.yaml 3 | --- 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: filebeat-config 8 | namespace: kube-system 9 | labels: 10 | k8s-app: filebeat 11 | data: 12 | filebeat.yml: |- 13 | filebeat.config: 14 | inputs: 15 | # Mounted `filebeat-inputs` configmap: 16 | path: ${path.config}/inputs.d/*.yml 17 | # Reload inputs configs as they change: 18 | reload.enabled: false 19 | modules: 20 | path: ${path.config}/modules.d/*.yml 21 | # Reload module configs as they change: 22 | reload.enabled: false 23 | 24 | processors: 25 | - add_cloud_metadata: 26 | 27 | output.elasticsearch: 28 | hosts: ['http://elasticsearch.{{namespace}}.svc.cluster.local:9200'] 29 | --- 30 | apiVersion: v1 31 | kind: ConfigMap 32 | metadata: 33 | name: filebeat-inputs 34 | namespace: kube-system 35 | labels: 36 | k8s-app: filebeat 37 | data: 38 | kubernetes.yml: |- 39 | - type: docker 40 | json.keys_under_root: false 41 | json.add_error_key: false 42 | json.ignore_decoding_error: true 43 | containers.ids: 44 | - "*" 45 | processors: 46 | - add_kubernetes_metadata: 47 | in_cluster: true 48 | --- 49 | apiVersion: apps/v1 50 | kind: DaemonSet 51 | metadata: 52 | name: filebeat 53 | namespace: kube-system 54 | labels: 55 | k8s-app: filebeat 56 | spec: 57 | selector: 58 | matchLabels: 59 | name: filebeat 60 | template: 61 | metadata: 62 | labels: 63 | k8s-app: filebeat 64 | name: filebeat 65 | spec: 66 | serviceAccountName: filebeat 67 | terminationGracePeriodSeconds: 30 68 | containers: 69 | - name: filebeat 70 | image: docker.elastic.co/beats/filebeat:7.0.0 71 | args: [ 72 | "-c", "/etc/filebeat.yml", 73 | "-e", 74 | ] 75 | securityContext: 76 | runAsUser: 0 77 | resources: 78 | limits: 79 | memory: 200Mi 80 | requests: 81 | cpu: 100m 82 | memory: 100Mi 83 | volumeMounts: 84 | - name: config 85 | mountPath: /etc/filebeat.yml 86 | readOnly: true 87 | subPath: filebeat.yml 88 | - name: inputs 89 | mountPath: /usr/share/filebeat/inputs.d 90 | readOnly: true 91 | - name: data 92 | mountPath: /usr/share/filebeat/data 93 | - name: varlibdockercontainers 94 | mountPath: /var/lib/docker/containers 95 | readOnly: true 96 | volumes: 97 | - name: config 98 | configMap: 99 | defaultMode: 0600 100 | name: filebeat-config 101 | - name: varlibdockercontainers 102 | hostPath: 103 | path: /var/lib/docker/containers 104 | - name: inputs 105 | configMap: 106 | defaultMode: 0600 107 | name: filebeat-inputs 108 | # data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart 109 | - name: data 110 | hostPath: 111 | path: /var/lib/filebeat-data 112 | type: DirectoryOrCreate 113 | --- 114 | apiVersion: rbac.authorization.k8s.io/v1beta1 115 | kind: ClusterRoleBinding 116 | metadata: 117 | name: filebeat 118 | subjects: 119 | - kind: ServiceAccount 120 | name: filebeat 121 | namespace: kube-system 122 | roleRef: 123 | kind: ClusterRole 124 | name: filebeat 125 | apiGroup: rbac.authorization.k8s.io 126 | --- 127 | apiVersion: rbac.authorization.k8s.io/v1beta1 128 | kind: ClusterRole 129 | metadata: 130 | name: filebeat 131 | labels: 132 | k8s-app: filebeat 133 | rules: 134 | - apiGroups: [""] # "" indicates the core API group 135 | resources: 136 | - namespaces 137 | - pods 138 | verbs: 139 | - get 140 | - watch 141 | - list 142 | --- 143 | apiVersion: v1 144 | kind: ServiceAccount 145 | metadata: 146 | name: filebeat 147 | namespace: kube-system 148 | labels: 149 | k8s-app: filebeat 150 | --- 151 | -------------------------------------------------------------------------------- /templates/5_beats_agents/metricbeat-kubernetes.yml: -------------------------------------------------------------------------------- 1 | # Slightly edited version; original obtained from: 2 | # https://raw.githubusercontent.com/elastic/beats/6.3/deploy/kubernetes/metricbeat-kubernetes.yaml 3 | --- 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: metricbeat-config 8 | namespace: kube-system 9 | labels: 10 | k8s-app: metricbeat 11 | data: 12 | metricbeat.yml: |- 13 | metricbeat.config.modules: 14 | # Mounted `metricbeat-daemonset-modules` configmap: 15 | path: ${path.config}/modules.d/*.yml 16 | # Reload module configs as they change: 17 | reload.enabled: false 18 | 19 | processors: 20 | - add_cloud_metadata: 21 | 22 | output.elasticsearch: 23 | hosts: ['http://elasticsearch.{{ namespace }}.svc.cluster.local:9200'] 24 | --- 25 | apiVersion: v1 26 | kind: ConfigMap 27 | metadata: 28 | name: metricbeat-daemonset-modules 29 | namespace: kube-system 30 | labels: 31 | k8s-app: metricbeat 32 | data: 33 | system.yml: |- 34 | - module: system 35 | period: 10s 36 | metricsets: 37 | - cpu 38 | - load 39 | - memory 40 | - network 41 | - process 42 | - process_summary 43 | #- core 44 | #- diskio 45 | #- socket 46 | processes: ['.*'] 47 | process.include_top_n: 48 | by_cpu: 5 # include top 5 processes by CPU 49 | by_memory: 5 # include top 5 processes by memory 50 | 51 | - module: system 52 | period: 1m 53 | metricsets: 54 | - filesystem 55 | - fsstat 56 | processors: 57 | - drop_event.when.regexp: 58 | system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib)($|/)' 59 | kubernetes.yml: |- 60 | - module: kubernetes 61 | metricsets: 62 | - node 63 | - system 64 | - pod 65 | - container 66 | - volume 67 | period: 10s 68 | hosts: ["localhost:10255"] 69 | --- 70 | # Deploy a Metricbeat instance per node for node metrics retrieval 71 | apiVersion: apps/v1 72 | kind: DaemonSet 73 | metadata: 74 | name: metricbeat 75 | namespace: kube-system 76 | labels: 77 | k8s-app: metricbeat 78 | spec: 79 | selector: 80 | matchLabels: 81 | name: metricbeat 82 | template: 83 | metadata: 84 | labels: 85 | k8s-app: metricbeat 86 | name: metricbeat 87 | spec: 88 | serviceAccountName: metricbeat 89 | terminationGracePeriodSeconds: 30 90 | hostNetwork: true 91 | dnsPolicy: ClusterFirstWithHostNet 92 | containers: 93 | - name: metricbeat 94 | image: docker.elastic.co/beats/metricbeat:7.0.0 95 | args: [ 96 | "-c", "/etc/metricbeat.yml", 97 | "-e", 98 | "-system.hostfs=/hostfs", 99 | ] 100 | env: 101 | - name: POD_NAMESPACE 102 | valueFrom: 103 | fieldRef: 104 | fieldPath: metadata.namespace 105 | securityContext: 106 | runAsUser: 0 107 | resources: 108 | limits: 109 | memory: 200Mi 110 | requests: 111 | cpu: 100m 112 | memory: 100Mi 113 | volumeMounts: 114 | - name: config 115 | mountPath: /etc/metricbeat.yml 116 | readOnly: true 117 | subPath: metricbeat.yml 118 | - name: modules 119 | mountPath: /usr/share/metricbeat/modules.d 120 | readOnly: true 121 | - name: dockersock 122 | mountPath: /var/run/docker.sock 123 | - name: proc 124 | mountPath: /hostfs/proc 125 | readOnly: true 126 | - name: cgroup 127 | mountPath: /hostfs/sys/fs/cgroup 128 | readOnly: true 129 | volumes: 130 | - name: proc 131 | hostPath: 132 | path: /proc 133 | - name: cgroup 134 | hostPath: 135 | path: /sys/fs/cgroup 136 | - name: dockersock 137 | hostPath: 138 | path: /var/run/docker.sock 139 | - name: config 140 | configMap: 141 | defaultMode: 0600 142 | name: metricbeat-config 143 | - name: modules 144 | configMap: 145 | defaultMode: 0600 146 | name: metricbeat-daemonset-modules 147 | # We set an `emptyDir` here to ensure the manifest will deploy correctly. 148 | # It's recommended to change this to a `hostPath` folder, to ensure internal data 149 | # files survive pod changes (ie: version upgrade) 150 | - name: data 151 | emptyDir: {} 152 | --- 153 | apiVersion: v1 154 | kind: ConfigMap 155 | metadata: 156 | name: metricbeat-deployment-modules 157 | namespace: kube-system 158 | labels: 159 | k8s-app: metricbeat 160 | data: 161 | # This module requires `kube-state-metrics` up and running under `kube-system` namespace 162 | kubernetes.yml: |- 163 | - module: kubernetes 164 | metricsets: 165 | - state_node 166 | - state_deployment 167 | - state_replicaset 168 | - state_pod 169 | - state_container 170 | # Uncomment this to get k8s events: 171 | #- event 172 | period: 10s 173 | hosts: ["kube-state-metrics:8080"] 174 | --- 175 | # Deploy singleton instance in the whole cluster for some unique data sources, like kube-state-metrics 176 | apiVersion: apps/v1 177 | kind: Deployment 178 | metadata: 179 | name: metricbeat 180 | namespace: kube-system 181 | labels: 182 | k8s-app: metricbeat 183 | spec: 184 | selector: 185 | matchLabels: 186 | name: metricbeat 187 | template: 188 | metadata: 189 | labels: 190 | k8s-app: metricbeat 191 | name: metricbeat 192 | spec: 193 | serviceAccountName: metricbeat 194 | containers: 195 | - name: metricbeat 196 | image: docker.elastic.co/beats/metricbeat:7.0.0 197 | args: [ 198 | "-c", "/etc/metricbeat.yml", 199 | "-e", 200 | ] 201 | env: 202 | - name: POD_NAMESPACE 203 | valueFrom: 204 | fieldRef: 205 | fieldPath: metadata.namespace 206 | securityContext: 207 | runAsUser: 0 208 | resources: 209 | limits: 210 | memory: 200Mi 211 | requests: 212 | cpu: 100m 213 | memory: 100Mi 214 | volumeMounts: 215 | - name: config 216 | mountPath: /etc/metricbeat.yml 217 | readOnly: true 218 | subPath: metricbeat.yml 219 | - name: modules 220 | mountPath: /usr/share/metricbeat/modules.d 221 | readOnly: true 222 | volumes: 223 | - name: config 224 | configMap: 225 | defaultMode: 0600 226 | name: metricbeat-config 227 | - name: modules 228 | configMap: 229 | defaultMode: 0600 230 | name: metricbeat-deployment-modules 231 | --- 232 | apiVersion: rbac.authorization.k8s.io/v1beta1 233 | kind: ClusterRoleBinding 234 | metadata: 235 | name: metricbeat 236 | subjects: 237 | - kind: ServiceAccount 238 | name: metricbeat 239 | namespace: kube-system 240 | roleRef: 241 | kind: ClusterRole 242 | name: metricbeat 243 | apiGroup: rbac.authorization.k8s.io 244 | --- 245 | apiVersion: rbac.authorization.k8s.io/v1beta1 246 | kind: ClusterRole 247 | metadata: 248 | name: metricbeat 249 | labels: 250 | k8s-app: metricbeat 251 | rules: 252 | - apiGroups: [""] # "" indicates the core API group 253 | resources: 254 | - namespaces 255 | - events 256 | - pods 257 | verbs: 258 | - get 259 | - watch 260 | - list 261 | --- 262 | apiVersion: v1 263 | kind: ServiceAccount 264 | metadata: 265 | name: metricbeat 266 | namespace: kube-system 267 | labels: 268 | k8s-app: metricbeat 269 | --- 270 | -------------------------------------------------------------------------------- /templates/6_logstash/logstash-pipelines.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: logstash-pipelines 6 | data: 7 | {% macro inc(str) %}{% include str %}{% endmacro %} 8 | logstash.conf: | 9 | {{ inc('6_logstash/logstash.conf')|indent(first=true) }} 10 | -------------------------------------------------------------------------------- /templates/6_logstash/logstash-tls.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: logstash-tls 6 | type: Opaque 7 | data: 8 | {% macro inc(str) %}{% include str %}{% endmacro %} 9 | ca.key: {{ inc('secrets/ca.key') | b64encode }} 10 | ca.crt: {{ inc('secrets/ca.crt') | b64encode }} 11 | logstash.key: {{ inc('secrets/logstash.key') | b64encode }} 12 | logstash.crt: {{ inc('secrets/logstash.crt') | b64encode }} 13 | -------------------------------------------------------------------------------- /templates/6_logstash/logstash.conf: -------------------------------------------------------------------------------- 1 | # Most configuration is taken from the examples at 2 | # https://www.elastic.co/guide/en/logstash/6.3/logstash-config-for-filebeat-modules.html 3 | input { 4 | beats { 5 | port => {{ logstash_beats_port }} 6 | ssl => true 7 | ssl_certificate_authorities => ["/usr/share/logstash/ssl/ca.crt"] 8 | ssl_key => "/usr/share/logstash/ssl/logstash.key" 9 | ssl_certificate => "/usr/share/logstash/ssl/logstash.crt" 10 | ssl_verify_mode => force_peer 11 | } 12 | } 13 | 14 | filter { 15 | if [fileset][module] == "nginx" { 16 | if [fileset][name] == "access" { 17 | grok { 18 | match => { "message" => ["%{IPORHOST:[nginx][access][remote_ip]} - %{DATA:[nginx][access][user_name]} \[%{HTTPDATE:[nginx][access][time]}\] \"%{WORD:[nginx][access][method]} %{DATA:[nginx][access][url]} HTTP/%{NUMBER:[nginx][access][http_version]}\" %{NUMBER:[nginx][access][response_code]} %{NUMBER:[nginx][access][body_sent][bytes]} \"%{DATA:[nginx][access][referrer]}\" \"%{DATA:[nginx][access][agent]}\""] } 19 | remove_field => "message" 20 | } 21 | mutate { 22 | add_field => { "read_timestamp" => "%{@timestamp}" } 23 | } 24 | date { 25 | match => [ "[nginx][access][time]", "dd/MMM/YYYY:H:m:s Z" ] 26 | remove_field => "[nginx][access][time]" 27 | } 28 | useragent { 29 | source => "[nginx][access][agent]" 30 | target => "[nginx][access][user_agent]" 31 | remove_field => "[nginx][access][agent]" 32 | } 33 | geoip { 34 | source => "[nginx][access][remote_ip]" 35 | target => "[nginx][access][geoip]" 36 | } 37 | } 38 | else if [fileset][name] == "error" { 39 | grok { 40 | match => { "message" => ["%{DATA:[nginx][error][time]} \[%{DATA:[nginx][error][level]}\] %{NUMBER:[nginx][error][pid]}#%{NUMBER:[nginx][error][tid]}: (\*%{NUMBER:[nginx][error][connection_id]} )?%{GREEDYDATA:[nginx][error][message]}"] } 41 | remove_field => "message" 42 | } 43 | mutate { 44 | rename => { "@timestamp" => "read_timestamp" } 45 | } 46 | date { 47 | match => [ "[nginx][error][time]", "YYYY/MM/dd H:m:s" ] 48 | remove_field => "[nginx][error][time]" 49 | } 50 | } 51 | } else if [fileset][module] == "system" { 52 | if [fileset][name] == "auth" { 53 | grok { 54 | match => { "message" => ["%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} %{DATA:[system][auth][ssh][method]} for (invalid user )?%{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]} port %{NUMBER:[system][auth][ssh][port]} ssh2(: %{GREEDYDATA:[system][auth][ssh][signature]})?", 55 | "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} user %{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]}", 56 | "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: Did not receive identification string from %{IPORHOST:[system][auth][ssh][dropped_ip]}", 57 | "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sudo(?:\[%{POSINT:[system][auth][pid]}\])?: \s*%{DATA:[system][auth][user]} :( %{DATA:[system][auth][sudo][error]} ;)? TTY=%{DATA:[system][auth][sudo][tty]} ; PWD=%{DATA:[system][auth][sudo][pwd]} ; USER=%{DATA:[system][auth][sudo][user]} ; COMMAND=%{GREEDYDATA:[system][auth][sudo][command]}", 58 | "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} groupadd(?:\[%{POSINT:[system][auth][pid]}\])?: new group: name=%{DATA:system.auth.groupadd.name}, GID=%{NUMBER:system.auth.groupadd.gid}", 59 | "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} useradd(?:\[%{POSINT:[system][auth][pid]}\])?: new user: name=%{DATA:[system][auth][user][add][name]}, UID=%{NUMBER:[system][auth][user][add][uid]}, GID=%{NUMBER:[system][auth][user][add][gid]}, home=%{DATA:[system][auth][user][add][home]}, shell=%{DATA:[system][auth][user][add][shell]}$", 60 | "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} %{DATA:[system][auth][program]}(?:\[%{POSINT:[system][auth][pid]}\])?: %{GREEDYMULTILINE:[system][auth][message]}"] } 61 | pattern_definitions => { 62 | "GREEDYMULTILINE"=> "(.|\n)*" 63 | } 64 | remove_field => "message" 65 | } 66 | date { 67 | match => [ "[system][auth][timestamp]", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] 68 | } 69 | geoip { 70 | source => "[system][auth][ssh][ip]" 71 | target => "[system][auth][ssh][geoip]" 72 | } 73 | } 74 | else if [fileset][name] == "syslog" { 75 | grok { 76 | match => { "message" => ["%{SYSLOGTIMESTAMP:[system][syslog][timestamp]} %{SYSLOGHOST:[system][syslog][hostname]} %{DATA:[system][syslog][program]}(?:\[%{POSINT:[system][syslog][pid]}\])?: %{GREEDYMULTILINE:[system][syslog][message]}"] } 77 | pattern_definitions => { "GREEDYMULTILINE" => "(.|\n)*" } 78 | remove_field => "message" 79 | } 80 | date { 81 | match => [ "[system][syslog][timestamp]", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] 82 | } 83 | } 84 | } 85 | } 86 | 87 | output { 88 | elasticsearch { 89 | hosts => ["http://elasticsearch.{{ namespace }}.svc.cluster.local:9200"] 90 | manage_template => false 91 | index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /templates/6_logstash/logstash.yml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: logstash 6 | spec: 7 | type: LoadBalancer 8 | selector: 9 | app: logstash 10 | ports: 11 | - name: beats 12 | port: {{ logstash_beats_port }} 13 | --- 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | metadata: 17 | namespace: {{ namespace }} 18 | name: logstash 19 | labels: 20 | app: logstash 21 | spec: 22 | replicas: 2 23 | selector: 24 | matchLabels: 25 | app: logstash 26 | template: 27 | metadata: 28 | labels: 29 | app: logstash 30 | spec: 31 | containers: 32 | - name: logstash 33 | image: docker.elastic.co/logstash/logstash:7.0.0 34 | ports: 35 | - containerPort: {{ logstash_beats_port }} 36 | volumeMounts: 37 | - name: logstash-pipelines 38 | mountPath: /usr/share/logstash/pipeline 39 | - name: logstash-tls 40 | mountPath: /usr/share/logstash/ssl 41 | 42 | # spec.template.spec.volumes 43 | volumes: 44 | - name: logstash-pipelines 45 | configMap: 46 | name: logstash-pipelines 47 | - name: logstash-tls 48 | secret: 49 | secretName: logstash-tls 50 | -------------------------------------------------------------------------------- /templates/6_logstash/ssl-gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | CA_NAME="$1" 5 | URL="$2" 6 | COUNTRY="$3" 7 | STATE="$4" 8 | LOC="$5" 9 | ORG="$6" 10 | ORG_UNIT="$7" 11 | DEST="$8" 12 | 13 | function make_ca { 14 | openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout ca.key -out ca.crt -subj "/C=$COUNTRY/ST=$STATE/L=$LOC/O=$ORG/OU=$ORG_UNIT/CN=$CA_NAME" 15 | } 16 | 17 | function make_key_and_csr { 18 | openssl genrsa -out $1.pem 2048 19 | openssl pkcs8 -in $1.pem -topk8 -nocrypt -out $1.key 20 | openssl req -new -key $1.key -out $1.csr -subj "/C=$COUNTRY/ST=$STATE/L=$LOC/O=$ORG/OU=$ORG_UNIT/CN=$2" 21 | } 22 | 23 | function sign { 24 | openssl x509 -req -in $1.csr -CA $2.crt -CAkey $2.key -CAcreateserial -out $1.crt -days 3650 -sha256 25 | rm $1.csr 26 | } 27 | 28 | cd "$DEST" 29 | 30 | make_ca 31 | make_key_and_csr logstash "$URL" 32 | sign logstash ca 33 | make_key_and_csr client "*" 34 | sign client ca 35 | rm ca.srl 36 | -------------------------------------------------------------------------------- /templates/7_oauth2-proxy/oauth2-config.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: oauth2-config 6 | type: Opaque 7 | data: 8 | {% macro inc(str) %}{% include str %}{% endmacro %} 9 | oauth2_proxy.cfg: {{ inc('7_oauth2-proxy/oauth2_proxy.cfg') | b64encode }} 10 | tls.crt: {{ ssl_crt | b64encode }} 11 | tls.key: {{ ssl_key | b64encode }} 12 | -------------------------------------------------------------------------------- /templates/7_oauth2-proxy/oauth2-proxy.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: {{ namespace }} 5 | name: oauth2-proxy 6 | labels: 7 | app: oauth2-proxy 8 | spec: 9 | selector: 10 | app: oauth2-proxy 11 | type: LoadBalancer 12 | ports: 13 | - port: 443 14 | targetPort: 4433 15 | --- 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | namespace: {{ namespace }} 20 | name: oauth2-proxy 21 | labels: 22 | app: oauth2-proxy 23 | spec: 24 | replicas: 1 25 | selector: 26 | matchLabels: 27 | app: oauth2-proxy 28 | template: 29 | metadata: 30 | labels: 31 | app: oauth2-proxy 32 | spec: 33 | containers: 34 | - name: oauth2-proxy 35 | image: machinedata/oauth2_proxy:2.2 36 | ports: 37 | - name: https 38 | containerPort: 4433 39 | livenessProbe: 40 | httpGet: 41 | path: /ping 42 | port: http 43 | scheme: HTTPS 44 | volumeMounts: 45 | - name: oauth2-config 46 | mountPath: /conf 47 | # spec.template.spec.volumes 48 | volumes: 49 | - name: oauth2-config 50 | secret: 51 | secretName: oauth2-config 52 | -------------------------------------------------------------------------------- /templates/7_oauth2-proxy/oauth2_proxy.cfg: -------------------------------------------------------------------------------- 1 | client_id = "{{ oauth_client_id }}" 2 | client_secret = "{{ oauth_client_secret }}" 3 | cookie_name = "{{ oauth_cookie_name }}" 4 | cookie_secret = "{{ oauth_client_secret }}" 5 | email_domains = [ 6 | "*", 7 | ] 8 | github_org = "{{ github_org }}" 9 | {% if github_team|length == 0 %}github_team = "{{ github_team }}"{% endif %} 10 | provider = "github" 11 | https_address = "0.0.0.0:4433" 12 | tls_cert_file = "/conf/tls.crt" 13 | tls_key_file = "/conf/tls.key" 14 | upstreams = [ 15 | "http://kibana.{{ namespace }}.svc.cluster.local:5601", 16 | ] 17 | -------------------------------------------------------------------------------- /util/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | from base64 import b64encode, urlsafe_b64encode 4 | 5 | base64 = lambda s: b64encode(s.encode()).decode() 6 | random_token = lambda: urlsafe_b64encode(os.urandom(16)).decode() 7 | 8 | def ensure_dir(filename): 9 | if not os.path.exists(os.path.dirname(filename)): 10 | try: 11 | os.makedirs(os.path.dirname(filename)) 12 | except FileExistsError: 13 | pass 14 | 15 | def prompt(msg, regex='.*', default=None, readFile=False): 16 | p = f" (default='{default}'): " if default else ': ' 17 | while True: 18 | try: 19 | result = input(msg + p) 20 | if not result and default: 21 | return default 22 | if not re.match(regex, result): 23 | raise ValueError(f"expect response to match the regex '{regex}'.") 24 | if readFile: 25 | with open(result, 'r') as file: 26 | data=file.read() 27 | return data 28 | else: 29 | return result 30 | except Exception as e: 31 | print(f'bad input: {str(e)}') 32 | --------------------------------------------------------------------------------