├── src
├── .gitignore
├── package.json
├── config.js.example
├── app.js
└── lib
│ └── auth.js
├── .gitignore
├── extra
└── docker
│ ├── logstash-simple.conf
│ ├── nginx
│ ├── nginx-mapping.yml
│ └── kibana.conf
│ ├── logstash-tcp-es.conf
│ ├── elk.sh
│ ├── README.md
│ ├── Dockerfile
│ └── kibana-config.js
├── config
├── systemd-kibana.service
├── systemd-logcabin.service
├── curator
│ ├── curator.yml
│ └── delete-old-indexes.yml
├── logstash-indexer.conf
├── config.js
└── elasticsearch.yml
├── scripts
├── backup.sh
└── get_ami_images.sh
├── CHANGELOG.md
├── README.md
├── LICENSE
└── cloudformation
└── ELK_Stack_Multi_AZ_in_Private_VPC.yml
/src/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .idea
3 |
--------------------------------------------------------------------------------
/extra/docker/logstash-simple.conf:
--------------------------------------------------------------------------------
1 | input {
2 | stdin { }
3 | }
4 |
5 | output {
6 | stdout {}
7 | }
8 |
--------------------------------------------------------------------------------
/extra/docker/nginx/nginx-mapping.yml:
--------------------------------------------------------------------------------
1 | name: elk
2 | mappings:
3 | - prefix: elasticsearch
4 | port: 9200
5 | - prefix: logs
6 | port: 9292
7 |
--------------------------------------------------------------------------------
/extra/docker/logstash-tcp-es.conf:
--------------------------------------------------------------------------------
1 | input {
2 | tcp {
3 | port => 6789
4 | codec => "json_lines"
5 | }
6 | }
7 | output {
8 | elasticsearch {
9 | protocol => "http"
10 | host => "localhost"
11 | port => 9200
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/extra/docker/elk.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ES_PID_FILE=$ES_HOME/pidfile
3 | $ES_HOME/bin/elasticsearch -dp $ES_PID_FILE
4 | echo "RUNNING Elastic Search"
5 | #trap 'echo Killing elasticsearch ; kill $(<${ES_PID_FILE})' EXIT
6 | sleep 20
7 | nginx
8 | $LOGSTASH_HOME/bin/logstash agent -f $LOGSTASH_HOME/conf.d
9 |
--------------------------------------------------------------------------------
/config/systemd-kibana.service:
--------------------------------------------------------------------------------
1 | [Service]
2 | ExecStart=/usr/share/kibana/bin/kibana
3 | Restart=always
4 | StandardOutput=syslog
5 | StandardError=syslog
6 | SyslogIdentifier=kibana5
7 | User=root
8 | Group=root
9 | Environment=NODE_ENV=production
10 |
11 | [Install]
12 | WantedBy=multi-user.target
13 |
--------------------------------------------------------------------------------
/config/systemd-logcabin.service:
--------------------------------------------------------------------------------
1 | [Service]
2 | WorkingDirectory=/opt/logcabin
3 | ExecStart=/usr/bin/nodejs app
4 | Restart=always
5 | StandardOutput=syslog
6 | StandardError=syslog
7 | SyslogIdentifier=logcabin
8 | User=logcabin
9 | Group=logcabin
10 | Environment=
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/scripts/backup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -ev
2 |
3 | MASTER=$(curl -s http://localhost:9200/_cat/master?h=ip)
4 | MY_IP=$(curl -s http://instance-data/latest/meta-data/local-ipv4)
5 |
6 | if [ $MY_IP = $MASTER ]; then
7 | DATE=$(date +%Y-%m-%d)
8 | curl -s -XPUT http://localhost:9200/_snapshot/s3/${DATE}?wait_for_completion=true
9 | fi
10 |
11 |
--------------------------------------------------------------------------------
/config/curator/curator.yml:
--------------------------------------------------------------------------------
1 | client:
2 | hosts:
3 | - 127.0.0.1
4 | port: 9200
5 | url_prefix:
6 | use_ssl: False
7 | certificate:
8 | client_cert:
9 | client_key:
10 | ssl_no_validate: False
11 | http_auth:
12 | timeout: 30
13 | master_only: False
14 | logging:
15 | loglevel: INFO
16 | logfile:
17 | logformat: default
18 | blacklist: ['elasticsearch', 'urllib3']
19 |
--------------------------------------------------------------------------------
/extra/docker/README.md:
--------------------------------------------------------------------------------
1 | Dockerfile for Kibana 3
2 | =======================
3 |
4 | For example:
5 |
6 | docker build -t elk .
7 | docker run -ti -P -p 6789:6789 -v $PWD/logstash-tcp.conf:/opt/logstash/conf.d/logstash-tcp.conf elk
8 |
9 | Currently the configuration that is applied in the `Dockerfile` is
10 | quite specific to the needs of the Editorial Tools team, and should be
11 | tweaked to become more general.
12 |
--------------------------------------------------------------------------------
/src/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "logcabin",
3 | "private": true,
4 | "main": "app.js",
5 | "dependencies": {
6 | "connect-restreamer": "*",
7 | "express": "3.*",
8 | "passport": "*",
9 | "passport-github": "*",
10 | "passport-google-oauth": "*",
11 | "request": "2.82.0",
12 | "client-sessions": "0.6.0"
13 | },
14 | "scripts": {
15 | "start": "node app"
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/config/logstash-indexer.conf:
--------------------------------------------------------------------------------
1 | input {
2 | tcp {
3 | port => 6379
4 | codec => json_lines
5 | }
6 |
7 | kinesis {
8 | kinesis_stream_name => '@@KINESIS_STREAM_NAME'
9 | application_name => '@@KINESIS_APPLICATION_NAME'
10 | region => '@@KINESIS_REGION'
11 | codec => json { }
12 | }
13 | }
14 |
15 | output {
16 | elasticsearch { hosts => ["localhost:9200"] }
17 | # stdout { codec => rubydebug }
18 | }
19 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Change Log
2 |
3 | ## 2.0 (2015-06-25)
4 |
5 | Updates:
6 |
7 | - Kibana 4.10
8 | - Elasticsearch 1.6.0
9 | - Logstash 1.5.1
10 | - Ubuntu Vivid 15.04 (systemd has replaced upstart)
11 | - Java 8
12 | - HVM instead of PV AMI (to support new instance types)
13 |
14 | ## 1.0 (2014-07-11)
15 |
16 | First Release:
17 |
18 | - Kibana 3.0
19 | - Elasticsearch 1.1
20 | - Logstash 1.4
21 | - Ubuntu Trusty 14.04
22 | - Java 7
23 | - PV AMI
24 |
--------------------------------------------------------------------------------
/config/config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | 'host': '@@LOGCABIN_HOST',
3 | 'listen_port': 8080,
4 | 'apiKey': '@@API_KEY',
5 | 'cookie_secret': '@@COOKIE_SECRET',
6 | 'oauth_unauthenticated': ['/__es/', '/__es/_cat/health'],
7 | 'oauth_application_name': 'logcabin',
8 | 'oauth_client_id': '@@CLIENT_ID',
9 | 'oauth_client_secret': '@@CLIENT_SECRET',
10 | 'allowed_domain': '@@ALLOWED_DOMAIN',
11 | 'kibana_host': 'localhost',
12 | 'kibana_port': 5601,
13 | 'es_host': 'localhost',
14 | 'es_port': 9200
15 | }
16 |
--------------------------------------------------------------------------------
/src/config.js.example:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | 'host': 'http://localhost:9201',
3 | 'listen_port': 9201,
4 | 'apiKeys': ['YOUR_API_KEY'],
5 | 'cookie_secret': 'DAX52Zo15CWfBUnoyp4rjY',
6 | 'oauth_unauthenticated': ['/__es/'],
7 | 'oauth_application_name': 'logcabin-local',
8 | 'oauth_client_id': '968588183953-v3767ro0sr1parm613pmsujgrupapo3a.apps.googleusercontent.com',
9 | 'oauth_client_secret': 'z6t0aUbgrnIp3Ot4DtjMTB2A',
10 | 'allowed_domain': 'guardian.co.uk',
11 | 'es_host': 'localhost',
12 | 'es_port': 9200
13 | }
14 |
--------------------------------------------------------------------------------
/scripts/get_ami_images.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | AMI_IMAGE_NAME="ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20180122"
4 |
5 | # http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region
6 |
7 | for region in us-east-1 us-west-2 us-west-1 eu-west-1 eu-west-2 eu-central-1 ap-southeast-1 ap-northeast-1 ap-southeast-2 ap-northeast-2 sa-east-1
8 | do
9 | printf " $region:\n"
10 | IMAGE_ID=`aws ec2 describe-images --filters Name=name,Values=$AMI_IMAGE_NAME --region $region | jq .Images[].ImageId`
11 | printf " ImageId: ${IMAGE_ID//\"}\n"
12 | done
13 |
--------------------------------------------------------------------------------
/extra/docker/nginx/kibana.conf:
--------------------------------------------------------------------------------
1 | events {
2 | worker_connections 1024;
3 | }
4 |
5 | http {
6 |
7 | types {
8 | text/html html htm shtml;
9 | text/css css;
10 | }
11 |
12 | server {
13 | listen 9292;
14 | server_name logs.local.dev-gutools.co.uk;
15 |
16 | location /css {
17 | types { }
18 | root /opt/kibana-3.1.1;
19 | default_type text/css;
20 | }
21 | location / {
22 | root /opt/kibana-3.1.1;
23 | index index.html;
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/config/curator/delete-old-indexes.yml:
--------------------------------------------------------------------------------
1 | actions:
2 | 1:
3 | action: delete_indices
4 | description: 'Delete indices older than @@NUMBER_OF_DAYS days (based on index name), for logstash- prefixed indices.
5 | Ignore the error if the filter does not result in an actionable list of indices (ignore_empty_list) and exit cleanly.'
6 | options:
7 | ignore_empty_list: True
8 | timeout_override:
9 | continue_if_exception: False
10 | filters:
11 | - filtertype: pattern
12 | kind: prefix
13 | value: logstash-
14 | exclude:
15 | - filtertype: age
16 | source: name
17 | direction: older
18 | timestring: '%Y.%m.%d'
19 | unit: days
20 | unit_count: @@NUMBER_OF_DAYS
21 | exclude:
22 |
--------------------------------------------------------------------------------
/src/app.js:
--------------------------------------------------------------------------------
1 | var express = require('express')
2 | var http = require('http')
3 | var fs = require('fs')
4 | var config = require('./config')
5 | var auth = require('./lib/auth')
6 | var sessions = require("client-sessions")
7 |
8 | var app = express()
9 |
10 | console.log('Logcabin starting...')
11 |
12 | app.use(sessions({ cookieName: 'session', secret: config.cookie_secret }))
13 |
14 | auth.setup(express, app, config)
15 |
16 | proxyES()
17 | proxyKibana4()
18 |
19 | http.createServer(app).listen(config.listen_port)
20 | console.log('Logcabin listening on ' + config.listen_port)
21 |
22 | function proxyES() {
23 | app.use("/__es", function(request, response, next) {
24 |
25 | var proxyRequest = http.request({host: config.es_host, port: config.es_port, path: request.url, method: request.method, headers: request.headers}, function(proxyResponse) {
26 | response.writeHead(proxyResponse.statusCode, proxyResponse.headers)
27 | proxyResponse.pipe(response)
28 | })
29 | request.pipe(proxyRequest)
30 | })
31 | }
32 |
33 | function proxyKibana4() {
34 | app.use("/", function(request, response, next) {
35 |
36 | var proxyRequest = http.request({host: config.kibana_host, port: config.kibana_port, path: request.url, method: request.method, headers: request.headers}, function(proxyResponse) {
37 | response.writeHead(proxyResponse.statusCode, proxyResponse.headers)
38 | proxyResponse.pipe(response)
39 | })
40 | request.pipe(proxyRequest)
41 | })
42 | }
43 |
--------------------------------------------------------------------------------
/extra/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM archlinux/jre
2 | RUN yes | pacman -Syu
3 | RUN yes | pacman -S nginx
4 | WORKDIR /opt
5 | RUN curl -L https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.4.3.tar.gz | tar -xzf -
6 | # RUN echo '/opt/elasticsearch-1.4.3/bin/elasticsearch -d' >>/app/elasticsearch.sh
7 | EXPOSE 9200 9292
8 | # COPY logstash-simple.conf /opt/logstash/conf.d/logstash.conf
9 | # COPY kibana-config.js /opt/logstash/vendor/kibana/config.js
10 |
11 | # ELASTIC SEARCH
12 |
13 | ENV ES_HOME=/opt/elasticsearch-1.4.3/
14 | ENV ES_CONFIG=$ES_HOME/config/elasticsearch.yml
15 | RUN echo 'http.cors.enabled: true' >>$ES_CONFIG
16 | RUN echo 'http.cors.allow-origin: /https?:\/\/.*\.local.dev-gutools.co.uk/' >>$ES_CONFIG
17 |
18 | # LOGSTASH
19 |
20 | RUN curl -L 'https://download.elasticsearch.org/logstash/logstash/logstash-1.4.2.tar.gz' | tar -xzf -
21 | ENV LOGSTASH_HOME=/opt/logstash-1.4.2
22 | ENV KIBANA_PORT=9292
23 | RUN ln -s $LOGSTASH_HOME /opt/logstash
24 | COPY logstash-simple.conf $LOGSTASH_HOME/conf.d/logstash.conf
25 |
26 | # KIBANA
27 | RUN curl -L 'https://download.elasticsearch.org/kibana/kibana/kibana-3.1.1.tar.gz' | tar -xzf -
28 | #RUN curl -L 'https://download.elasticsearch.org/kibana/kibana/kibana-4.0.0-linux-x64.tar.gz' | tar -xzf -
29 | RUN curl -L 'http://nodejs.org/dist/v0.12.0/node-v0.12.0-linux-x64.tar.gz' | tar -xzf -
30 | RUN ln -s /opt/node-v0.12.0-linux-x64 $KIBANA_HOME/node
31 | ENV KIBANA_HOME=/opt/kibana-3.1.1
32 | ENV KIBANA_CONF_FILE=$KIBANA_HOME/config.js
33 | RUN sed -i.orig -e 's@^\(\s*elasticsearch:\s*\)\(.*\),@\1"https://elasticsearch.local.dev-gutools.co.uk",@' \
34 | -e 's@^\(\s*port:\s*\)\(.*\)@\1'${KIBANA_PORT}'@' \
35 | $KIBANA_CONF_FILE
36 | COPY nginx/kibana.conf /etc/nginx/nginx.conf
37 | # APP START SCRIPT
38 | COPY elk.sh /
39 | CMD /elk.sh
40 |
--------------------------------------------------------------------------------
/extra/docker/kibana-config.js:
--------------------------------------------------------------------------------
1 | /** @scratch /configuration/config.js/1
2 | *
3 | * == Configuration
4 | * config.js is where you will find the core Kibana configuration. This file contains parameter that
5 | * must be set before kibana is run for the first time.
6 | */
7 | define(['settings'],
8 | function (Settings) {
9 |
10 |
11 | /** @scratch /configuration/config.js/2
12 | *
13 | * === Parameters
14 | */
15 | return new Settings({
16 |
17 | /** @scratch /configuration/config.js/5
18 | *
19 | * ==== elasticsearch
20 | *
21 | * The URL to your elasticsearch server. You almost certainly don't
22 | * want +http://localhost:9200+ here. Even if Kibana and Elasticsearch are on
23 | * the same host. By default this will attempt to reach ES at the same host you have
24 | * kibana installed on. You probably want to set it to the FQDN of your
25 | * elasticsearch host
26 | *
27 | * Note: this can also be an object if you want to pass options to the http client. For example:
28 | *
29 | * +elasticsearch: {server: "http://localhost:9200", withCredentials: true}+
30 | *
31 | */
32 | // elasticsearch: "http://"+window.location.hostname+":9200",
33 | elasticsearch: "https://elasticsearch.local.dev-gutools.co.uk",
34 |
35 | /** @scratch /configuration/config.js/5
36 | *
37 | * ==== default_route
38 | *
39 | * This is the default landing page when you don't specify a dashboard to load. You can specify
40 | * files, scripts or saved dashboards here. For example, if you had saved a dashboard called
41 | * `WebLogs' to elasticsearch you might use:
42 | *
43 | * default_route: '/dashboard/elasticsearch/WebLogs',
44 | */
45 | // default_route : '/dashboard/file/default.json',
46 |
47 | /** @scratch /configuration/config.js/5
48 | *
49 | * ==== kibana-int
50 | *
51 | * The default ES index to use for storing Kibana specific object
52 | * such as stored dashboards
53 | */
54 | kibana_index: "kibana-int",
55 |
56 | /** @scratch /configuration/config.js/5
57 | *
58 | * ==== panel_name
59 | *
60 | * An array of panel modules available. Panels will only be loaded when they are defined in the
61 | * dashboard, but this list is used in the "add panel" interface.
62 | */
63 | panel_names: [
64 | 'histogram',
65 | 'map',
66 | 'goal',
67 | 'table',
68 | 'filtering',
69 | 'timepicker',
70 | 'text',
71 | 'hits',
72 | 'column',
73 | 'trends',
74 | 'bettermap',
75 | 'query',
76 | 'terms',
77 | 'stats',
78 | 'sparklines'
79 | ]
80 | });
81 | });
82 |
--------------------------------------------------------------------------------
/src/lib/auth.js:
--------------------------------------------------------------------------------
1 | var request = require('request')
2 | var passport = require('passport')
3 | var GoogleStrategy = require('passport-google-oauth').OAuth2Strategy
4 |
5 |
6 | exports.setup = function(express, app, config) {
7 |
8 | console.log('Google OAuth2 authentication used')
9 |
10 |
11 | passport.serializeUser(function(user, done) {
12 | done(null, user)
13 | })
14 |
15 | passport.deserializeUser(function(obj, done) {
16 | done(null, obj)
17 | })
18 |
19 | var callbackUrl = config.host + '/auth/google/callback'
20 |
21 | passport.use(new GoogleStrategy({
22 | clientID: config.oauth_client_id,
23 | clientSecret: config.oauth_client_secret,
24 | callbackURL: callbackUrl
25 | }, function(accessToken, refreshToken, profile, done) {
26 | findUser(profile, accessToken, config, function(succeed, msg) {
27 | return succeed ? done(null, profile): done(null, false, { message: msg})
28 | })
29 | }))
30 |
31 | app.use(function(req, res, next) {
32 | if (req.session.authenticated || nonAuthenticated(config, req.url) || verifyApiKey(config, req)) {
33 | return next()
34 | }
35 | req.session.beforeLoginURL = req.url
36 | res.redirect('/auth/google')
37 | })
38 | app.use(passport.initialize())
39 | app.use(passport.session())
40 |
41 |
42 | var scope = ['https://www.googleapis.com/auth/userinfo.profile', 'https://www.googleapis.com/auth/userinfo.email']
43 |
44 | app.get('/auth/google',
45 | passport.authenticate('google', { scope: scope }),
46 | function(req, res) {
47 | /* do nothing as this request will be redirected to google for authentication */
48 | }
49 | )
50 |
51 | app.get('/auth/google/callback',
52 | passport.authenticate('google', { failureRedirect: '/auth/google/fail' }),
53 | function(req, res) {
54 | /* Successful authentication, redirect home. */
55 | req.session.authenticated = true
56 | res.redirect(req.session.beforeLoginURL || '/')
57 | }
58 | )
59 |
60 | app.get('/auth/google/fail', function(req, res) {
61 | res.statusCode = 403
62 | res.end('
Unauthorized')
63 | })
64 | }
65 |
66 | function nonAuthenticated(config, url) {
67 | return url.indexOf('/auth/google') === 0 || config.oauth_unauthenticated.indexOf(url) > -1
68 | }
69 |
70 | function findUser(profile, accessToken, config, callback) {
71 | var username = profile.displayName || 'unknown';
72 | var email = profile.emails[0].value || '';
73 | var domain = profile._json.domain || '';
74 |
75 | if ( ( email.split('@')[1] === config.allowed_domain ) || domain === config.allowed_domain ) {
76 | return callback(true, username)
77 | } else {
78 | console.log('access refused to: ' + username + ' (email=' + email + ';domain=' + domain + ')');
79 | return callback(false, username + ' is not authorized')
80 | }
81 | }
82 |
83 | function verifyApiKey(config, req) {
84 | var apiKey = req.headers['authorization'] || '';
85 | return (config.apiKey.length > 0 && "ApiKey " + config.apiKey === apiKey)
86 | }
87 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ELK Stack with Google OAuth
2 | ===========================
3 |
4 | ELK stands for [Elasticsearch][1], [Logstash][2] and [Kibana][3]. It is being promoted by Elasticsearch as a "devops" logging solution.
5 |
6 | This implemenation of an ELK stack is designed to run in AWS EC2 VPC and is secured using Google OAuth 2.0. It consists of one or more instances behind an Elastic Load Balancer (ELB) running the following components:
7 |
8 | * Kibana 5.x
9 | * Elasticsearch 5.x
10 | * Logstash 5.x indexer
11 | * Node.js application proxy
12 |
13 | Security
14 | --------
15 |
16 | Only the Logstash indexer and the application proxy ports are exposed on the ELB and all requests to the application proxy for Kibana or Elasticsearch are authenticated using Google OAuth.
17 |
18 | Elasticsearch is configured to listen only on the local loopback address. Dynamic scripting has been disabled to address security concerns with [remote code execution][4] since elasticsearch version 1.4.3.
19 |
20 | Healthcheck
21 | -----------
22 |
23 | The ELB requires a healthcheck to ensure instances in the load balancer are healthy. To achieve this, access to the root URL for Elasticsearch is available at the path `/__es` and it is *not* authenticated.
24 |
25 | Log Shippers
26 | ------------
27 |
28 | ### via TCP
29 |
30 | Shipping logs to the ELK stack via tcp is left as an exercise for the user however example configurations are included in the repo under the `/examples` directory. TBC
31 |
32 | A very simple one that reads from stdin and tails a log file then echoes to stdout and forwards to the ELK stack is below:
33 |
34 | ```
35 | $ logstash --debug -e '
36 | input { stdin { } file { path => "/var/log/system.log" } }
37 | output { stdout { } tcp { host => "INSERT-ELB-DNS-NAME-HERE" port => 6379 codec => json_lines } }'
38 | ```
39 |
40 | ### via a Kinesis Stream
41 |
42 | Logstash is also setup to ingest logs via a Kinesis Stream using the [logstash-input-kinesis](https://github.com/codekitchen/logstash-input-kinesis) plugin.
43 | You can find the Kinesis stream information in the Cloudformation stack output.
44 | The expected input codec is `json`.
45 |
46 | VPC Configuration
47 | -----------------
48 |
49 | This ELK stack assumes your AWS VPC is configured as per AWS guidelines which is to have a public and private subnet in each availability zone for the region. See [Your VPC and Subnets][6] guide for more information.
50 |
51 | The easiest way to ensure you have the required VPC setup would be to delete your existing VPC, if possible, and then use the [Start VPC Wizard][7] which will create a correctly configured VPC for you.
52 |
53 | Installation
54 | ------------
55 |
56 | 1. Go to [Google Developer Console][5] and create a new client ID for a web application
57 |
58 | You can leave the URLs as they are and update them once the ELK stack has been created. Take note of the Client ID and Client Secret as you will need them in the next step.
59 |
60 | 2. Enable the "Google+ API" for your new client. This is the only Google API needed.
61 |
62 | 3. Launch the ELK stack using the AWS console or `aws` command-line tool and enter the required parameters. Note that some parameters, like providing a Route53 Hosted Zone Name to create a DNS alias for the public ELB, are optional.
63 |
64 | 4. Once the ELK stack has launched revisit the Google developer console and update the URLs copying the output for `GoogleOAuthRedirectURL` to `AUTHORIZED REDIRECT URI` and the same URL but without to path to `AUTHORISED JAVASCRIPT ORIGINS`.
65 |
66 | Plugins
67 | -------
68 |
69 | The following elasticsearch plugins are installed:
70 |
71 | * [X-Pack][8] - Elastic extension that bundles security, alerting, monitoring, reporting, and graph.
72 | * [EC2 Discovery][9] - uses AWS API for the unicast discovery mechanism.
73 | * [S3 Repository][10] - adds support for using S3 as a repository for Snapshot.
74 |
75 | Configuration
76 | -------------
77 |
78 | This ELK stack cloudformation template takes many parameters, explainations for each are shown when launching the stack. Note that Route 53 DNS, EBS volumes and S3 snapshots are optional.
79 |
80 | Logstash grok patterns can be tested online at https://grokdebug.herokuapp.com/
81 |
82 | The Kibana dashboards are configured via the GUI.
83 |
84 | License
85 | -------
86 |
87 | Guardian ELK Stack Cloudformation Templates and Logcabin Proxy
88 | Copyright 2014-2016 Guardian News & Media
89 |
90 | Licensed under the Apache License, Version 2.0 (the "License");
91 | you may not use this file except in compliance with the License.
92 | You may obtain a copy of the License at
93 |
94 | http://www.apache.org/licenses/LICENSE-2.0
95 |
96 | Unless required by applicable law or agreed to in writing, software
97 | distributed under the License is distributed on an "AS IS" BASIS,
98 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
99 | See the License for the specific language governing permissions and
100 | limitations under the License.
101 |
102 | [1]: "Elasticsearch"
103 | [2]: "Logstash"
104 | [3]: "Kibana"
105 | [4]: "ES Scripting"
106 | [5]: "Google Developer Console"
107 | [6]: "AWS: Your VPC and Subnets"
108 | [7]:
109 | [8]:
110 | [9]:
111 | [10]:
112 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | ##################### Elasticsearch Configuration Example #####################
2 |
3 | # This file contains an overview of various configuration settings,
4 | # targeted at operations staff. Application developers should
5 | # consult the guide at .
6 | #
7 | # The installation procedure is covered at
8 | # .
9 | #
10 | # Elasticsearch comes with reasonable defaults for most settings,
11 | # so you can try it out without bothering with configuration.
12 | #
13 | # Most of the time, these defaults are just fine for running a production
14 | # cluster. If you're fine-tuning your cluster, or wondering about the
15 | # effect of certain configuration option, please _do ask_ on the
16 | # mailing list or IRC channel [http://elasticsearch.org/community].
17 |
18 | # Any element in the configuration can be replaced with environment variables
19 | # by placing them in ${...} notation. For example:
20 | #
21 | # node.rack: ${RACK_ENV_VAR}
22 |
23 | # For information on supported formats and syntax for the config file, see
24 | #
25 |
26 |
27 | ################################### Cluster ###################################
28 |
29 | # Cluster name identifies your cluster for auto-discovery. If you're running
30 | # multiple clusters on the same network, make sure you're using unique names.
31 | #
32 | cluster.name: logger
33 |
34 |
35 | #################################### Node #####################################
36 |
37 | # Node names are generated dynamically on startup, so you're relieved
38 | # from configuring them manually. You can tie this node to a specific name:
39 | #
40 | # node.name: "Franz Kafka"
41 |
42 | # Every node can be configured to allow or deny being eligible as the master,
43 | # and to allow or deny to store the data.
44 | #
45 | # Allow this node to be eligible as a master node (enabled by default):
46 | #
47 | # node.master: true
48 | #
49 | # Allow this node to store data (enabled by default):
50 | #
51 | # node.data: true
52 |
53 | # You can exploit these settings to design advanced cluster topologies.
54 | #
55 | # 1. You want this node to never become a master node, only to hold data.
56 | # This will be the "workhorse" of your cluster.
57 | #
58 | # node.master: false
59 | # node.data: true
60 | #
61 | # 2. You want this node to only serve as a master: to not store any data and
62 | # to have free resources. This will be the "coordinator" of your cluster.
63 | #
64 | # node.master: true
65 | # node.data: false
66 | #
67 | # 3. You want this node to be neither master nor data node, but
68 | # to act as a "search load balancer" (fetching data from nodes,
69 | # aggregating results, etc.)
70 | #
71 | # node.master: false
72 | # node.data: false
73 |
74 | # Use the Cluster Health API [http://localhost:9200/_cluster/health] or the
75 | # Node Info API [http://localhost:9200/_nodes] to inspect the cluster state.
76 |
77 | # A node can have generic attributes associated with it, which can later be used
78 | # for customized shard allocation filtering, or allocation awareness. An attribute
79 | # is a simple key value pair, similar to node.key: value, here is an example:
80 | #
81 | # node.rack: rack314
82 |
83 | # By default, multiple nodes are allowed to start from the same installation location
84 | # to disable it, set the following:
85 | # node.max_local_storage_nodes: 1
86 |
87 | #################################### Paths ####################################
88 |
89 | # Path to directory containing configuration (this file and logging.yml):
90 | #
91 | path.conf: /etc/elasticsearch/
92 |
93 | # Path to directory where to store index data allocated for this node.
94 | #
95 | # path.data: /path/to/data
96 | #
97 | # Can optionally include more than one location, causing data to be striped across
98 | # the locations (a la RAID 0) on a file level, favouring locations with most free
99 | # space on creation. For example:
100 | #
101 | path.data: /data
102 |
103 | # Path to log files:
104 | #
105 | path.logs: /var/log/elasticsearch/
106 |
107 | # Path to where plugins are installed:
108 | #
109 | # path.plugins: /path/to/plugins
110 |
111 |
112 | #################################### Plugin ###################################
113 |
114 | # If a plugin listed here is not installed for current node, the node will not start.
115 | #
116 | plugin.mandatory: discovery-ec2, x-pack
117 |
118 | ############################## Network And HTTP ###############################
119 |
120 | # Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
121 | # on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
122 | # communication. (the range means that if the port is busy, it will automatically
123 | # try the next port).
124 |
125 | # Set the bind address specifically (IPv4 or IPv6):
126 | #
127 | # network.bind_host: 192.168.0.1
128 |
129 | # Set the address other nodes will use to communicate with this node. If not
130 | # set, it is automatically derived. It must point to an actual IP address.
131 | #
132 | # network.publish_host: 192.168.0.1
133 |
134 | # Set both 'bind_host' and 'publish_host':
135 | #
136 | # network.host: 192.168.0.1
137 |
138 | # Set a custom port for the node to node communication (9300 by default):
139 | #
140 | # transport.tcp.port: 9300
141 |
142 | # Enable compression for all communication between nodes (disabled by default):
143 | #
144 | # transport.tcp.compress: true
145 |
146 | # Set a custom port to listen for HTTP traffic:
147 | #
148 | # http.port: 9200
149 |
150 | # Set a custom allowed content length:
151 | #
152 | # http.max_content_length: 100mb
153 |
154 | # Disable HTTP completely:
155 | #
156 | # http.enabled: false
157 |
158 | http.host: _local_
159 | network.host: _ec2_
160 |
161 | http.cors.enabled: true
162 | http.cors.allow-origin: @@HOST
163 |
164 | ################################### Gateway ###################################
165 |
166 | # The gateway allows for persisting the cluster state between full cluster
167 | # restarts. Every change to the state (such as adding an index) will be stored
168 | # in the gateway, and when the cluster starts up for the first time,
169 | # it will read its state from the gateway.
170 |
171 | # There are several types of gateway implementations. For more information, see
172 | # .
173 |
174 | # The default gateway type is the "local" gateway (recommended):
175 | #
176 | # gateway.type: local
177 |
178 | # Settings below control how and when to start the initial recovery process on
179 | # a full cluster restart (to reuse as much local data as possible when using shared
180 | # gateway).
181 |
182 | # Allow recovery process after N nodes in a cluster are up:
183 | #
184 | # gateway.recover_after_nodes: 1
185 |
186 | # Set the timeout to initiate the recovery process, once the N nodes
187 | # from previous setting are up (accepts time value):
188 | #
189 | # gateway.recover_after_time: 5m
190 |
191 | # Set how many nodes are expected in this cluster. Once these N nodes
192 | # are up (and recover_after_nodes is met), begin recovery process immediately
193 | # (without waiting for recover_after_time to expire):
194 | #
195 | # gateway.expected_nodes: 2
196 |
197 |
198 | ############################# Recovery Throttling #############################
199 |
200 | # These settings allow to control the process of shards allocation between
201 | # nodes during initial recovery, replica allocation, rebalancing,
202 | # or when adding and removing nodes.
203 |
204 | # Set the number of concurrent recoveries happening on a node:
205 | #
206 | # 1. During the initial recovery
207 | #
208 | # cluster.routing.allocation.node_initial_primaries_recoveries: 4
209 | #
210 | # 2. During adding/removing nodes, rebalancing, etc
211 | #
212 | cluster.routing.allocation.cluster_concurrent_rebalance: 20
213 | cluster.routing.allocation.node_concurrent_recoveries: 5
214 |
215 | # Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
216 | #
217 | # indices.recovery.max_bytes_per_sec: 20mb
218 |
219 | # Set to limit the number of open concurrent streams when
220 | # recovering a shard from a peer:
221 | #
222 | # indices.recovery.concurrent_streams: 5
223 |
224 | indices.memory.index_buffer_size: 20%
225 |
226 | ################################## Discovery ##################################
227 |
228 | # Discovery infrastructure ensures nodes can be found within a cluster
229 | # and master node is elected. Multicast discovery is the default.
230 |
231 | # Set to ensure a node sees N other master eligible nodes to be considered
232 | # operational within the cluster. Its recommended to set it to a higher value
233 | # than 1 when running more than 2 nodes in the cluster.
234 | #
235 | # discovery.zen.minimum_master_nodes: 1
236 |
237 | # Set the time to wait for ping responses from other nodes when discovering.
238 | # Set this option to a higher value on a slow or congested network
239 | # to minimize discovery failures:
240 | #
241 | # discovery.zen.ping.timeout: 3s
242 |
243 | # For more information, see
244 | #
245 |
246 | # Unicast discovery allows to explicitly control which nodes will be used
247 | # to discover the cluster. It can be used when multicast is not present,
248 | # or to restrict the cluster communication-wise.
249 | #
250 | # 1. Disable multicast discovery (enabled by default):
251 | #
252 | # discovery.zen.ping.multicast.enabled: false
253 | #
254 | # 2. Configure an initial list of master nodes in the cluster
255 | # to perform discovery when new nodes (master or data) are started:
256 | #
257 | # discovery.zen.ping.unicast.hosts: ["host1", "host2:port"]
258 |
259 | # EC2 discovery allows to use AWS EC2 API in order to perform discovery.
260 | #
261 | # You have to install the cloud-aws plugin for enabling the EC2 discovery.
262 | #
263 | # For more information, see
264 | #
265 | #
266 | # See
267 | # for a step-by-step tutorial.
268 |
269 | discovery.zen.hosts_provider: ec2
270 | discovery.ec2.tag.Stack: "@@STACK"
271 | discovery.ec2.tag.App: "kibana"
272 |
273 | cloud.aws.region: @@REGION
274 |
275 | # GCE discovery allows to use Google Compute Engine API in order to perform discovery.
276 | #
277 | # You have to install the cloud-gce plugin for enabling the GCE discovery.
278 | #
279 | # For more information, see .
280 |
281 | # Azure discovery allows to use Azure API in order to perform discovery.
282 | #
283 | # You have to install the cloud-azure plugin for enabling the Azure discovery.
284 | #
285 | # For more information, see .
286 |
287 | ################################## Slow Log ##################################
288 |
289 | # Shard level query and fetch threshold logging.
290 |
291 | #index.search.slowlog.threshold.query.warn: 10s
292 | #index.search.slowlog.threshold.query.info: 5s
293 | #index.search.slowlog.threshold.query.debug: 2s
294 | #index.search.slowlog.threshold.query.trace: 500ms
295 |
296 | #index.search.slowlog.threshold.fetch.warn: 1s
297 | #index.search.slowlog.threshold.fetch.info: 800ms
298 | #index.search.slowlog.threshold.fetch.debug: 500ms
299 | #index.search.slowlog.threshold.fetch.trace: 200ms
300 |
301 | #index.indexing.slowlog.threshold.index.warn: 10s
302 | #index.indexing.slowlog.threshold.index.info: 5s
303 | #index.indexing.slowlog.threshold.index.debug: 2s
304 | #index.indexing.slowlog.threshold.index.trace: 500ms
305 |
306 | ################################## GC Logging ################################
307 |
308 | #monitor.jvm.gc.young.warn: 1000ms
309 | #monitor.jvm.gc.young.info: 700ms
310 | #monitor.jvm.gc.young.debug: 400ms
311 |
312 | #monitor.jvm.gc.old.warn: 10s
313 | #monitor.jvm.gc.old.info: 5s
314 | #monitor.jvm.gc.old.debug: 2s
315 |
316 | ################################# Index Actions ##############################
317 |
318 | action.auto_create_index: true
319 |
320 | ## Add breaker settings (to prevent out of memory errors)
321 | indices.breaker.fielddata.limit: 40%
322 |
323 | ################################ X-Pack Plugin ##############################
324 | xpack.security.enabled: false
325 |
--------------------------------------------------------------------------------
/cloudformation/ELK_Stack_Multi_AZ_in_Private_VPC.yml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: '2010-09-09'
3 | Description: ELK Stack - Elasticsearch, Logstash, Kibana 5
4 | Parameters:
5 | Stack:
6 | Description: Stack applied as a tag
7 | Type: String
8 | KeyName:
9 | Default: bootstrap
10 | Description: Name of an existing EC2 KeyPair for SSH access
11 | Type: AWS::EC2::KeyPair::KeyName
12 | Stage:
13 | Description: Stage applied as a tag
14 | Type: String
15 | Default: PROD
16 | AllowedValues:
17 | - PROD
18 | - CODE
19 | - INFRA
20 | ConstraintDescription: must be a valid stage eg. PROD, CODE, INFRA
21 | ElkCapacity:
22 | Description: Autoscale Size
23 | Type: Number
24 | Default: '1'
25 | MinValue: 1
26 | MaxValue: 12
27 | ElkInstanceType:
28 | Description: EC2 instance type
29 | Type: String
30 | Default: m4.large
31 | AllowedValues:
32 | - t2.medium
33 | - m4.large
34 | - m4.xlarge
35 | - m4.2xlarge
36 | ConstraintDescription: must be a valid EC2 instance type
37 | CookieSecret:
38 | Description: Secret used to sign the session ID cookie ie. any random string
39 | Type: String
40 | NoEcho: true
41 | AllowedPattern: "[a-zA-Z0-9_-]*"
42 | ConstraintDescription: must only contain upper and lower case letters, numbers,
43 | dashes and underscores
44 | ApiKey:
45 | Description: 'Api key used to make server to server request to the cluster without
46 | Google OAuth, ie: any random string'
47 | Type: String
48 | NoEcho: true
49 | GoogleOAuthClientId:
50 | Description: Google OAuth 2.0 Client ID
51 | Type: String
52 | GoogleOAuthClientSecret:
53 | Description: Google OAuth 2.0 Client Secret
54 | Type: String
55 | NoEcho: true
56 | AllowedDomain:
57 | Description: Google apps domain eg. gmail.com or example.com
58 | Type: String
59 | Default: guardian.co.uk
60 | VpcId:
61 | Description: ID of the VPC onto which to launch the application eg. vpc-1234abcd
62 | Type: AWS::EC2::VPC::Id
63 | PublicVpcSubnets:
64 | Description: Subnets to use in VPC for public ELB eg. subnet-abcd1234
65 | Type: List
66 | PrivateVpcSubnets:
67 | Description: Subnets to use in VPC for instances eg. subnet-abcd1234
68 | Type: List
69 | VpcIpRangeCidr:
70 | Description: VPC IP range eg. 10.0.0.0/8
71 | Type: String
72 | Default: 0.0.0.0/0
73 | AllowedSshCidr:
74 | Description: IP range to allow SSH access from eg. 1.2.3.4/21
75 | Type: String
76 | Default: 0.0.0.0/0
77 | AllowedHttpCidr:
78 | Description: IP range to allow HTTP access from eg. 1.2.3.4/21
79 | Type: String
80 | Default: 0.0.0.0/0
81 | HostedZoneName:
82 | Description: Route53 Hosted Zone in which kibana aliases will be created (without
83 | the trailing dot). Leave blank for no ALIAS.
84 | Type: String
85 | AllowedPattern: "^(.*[^.]|)$"
86 | EBSVolumeSize:
87 | Description: EBS storage to be attached to each instance (in GB). Set to zero
88 | for no attached EBS volume (the on-instance storage will be used instead).
89 | Type: Number
90 | Default: 0
91 | MaxValue: 1000
92 | MinValue: 0
93 | SnapshotRepository:
94 | Description: S3 bucket name for elasticsearch snapshots repository
95 | Type: String
96 | SnapshotRepositoryEncryption:
97 | Description: Whether to apply server side encryption to snapshots stored in S3
98 | Default: true
99 | Type: String
100 | AllowedValues: [true, false]
101 | IndexKeepDays:
102 | Description: Keep elasticsearch indices for x number of days
103 | Type: Number
104 | Default: '8'
105 | PublicLoadBalancerSSLCertificateARN:
106 | Description: ARN of the SSL certificate applied to the public load balancer
107 | Type: String
108 |
109 | Conditions:
110 | HasDNS: !Not [!Equals [!Ref HostedZoneName, '']]
111 | UseEBS: !Not [!Equals [!Ref EBSVolumeSize, '0']]
112 | HasS3: !Not [!Equals [!Ref SnapshotRepository, '']]
113 | HasSSLCertificate: !Not [!Equals [!Ref PublicLoadBalancerSSLCertificateARN, '']]
114 |
115 | Mappings:
116 | RegionMap:
117 | us-east-1:
118 | ImageId: ami-263d0b5c
119 | us-west-2:
120 | ImageId: ami-bd8f33c5
121 | us-west-1:
122 | ImageId: ami-8b3a37eb
123 | eu-west-1:
124 | ImageId: ami-7a187c03
125 | eu-west-2:
126 | ImageId: ami-aca2b9c8
127 | eu-central-1:
128 | ImageId: ami-37940d58
129 | ap-southeast-1:
130 | ImageId: ami-da3d45a6
131 | ap-northeast-1:
132 | ImageId: ami-4b7e1c2d
133 | ap-southeast-2:
134 | ImageId: ami-d7f30db5
135 | ap-northeast-2:
136 | ImageId: ami-9cb211f2
137 | sa-east-1:
138 | ImageId: ami-e024688c
139 | InstanceMap:
140 | t2.medium:
141 | ESHeapSize: 2g
142 | m4.large:
143 | ESHeapSize: 4g
144 | m4.xlarge:
145 | ESHeapSize: 8g
146 | m4.2xlarge:
147 | ESHeapSize: 15g
148 |
149 | Resources:
150 | ElkS3Bucket:
151 | Type: AWS::S3::Bucket
152 | Condition: HasS3
153 | Properties:
154 | BucketName: !Ref SnapshotRepository
155 | AccessControl: Private
156 | ElkS3Policy:
157 | Type: AWS::IAM::Policy
158 | Condition: HasS3
159 | Properties:
160 | PolicyName: ElkS3Policy
161 | Groups: []
162 | Roles:
163 | - !Ref Role
164 | Users: []
165 | PolicyDocument:
166 | Version: '2012-10-17'
167 | Statement:
168 | - Action:
169 | - s3:ListBucket
170 | Effect: Allow
171 | Resource: !Sub 'arn:aws:s3:::${ElkS3Bucket}'
172 | - Action:
173 | - s3:GetObject
174 | - s3:PutObject
175 | - s3:DeleteObject
176 | Effect: Allow
177 | Resource: !Sub 'arn:aws:s3:::${ElkS3Bucket}/*'
178 | Role:
179 | Type: AWS::IAM::Role
180 | Properties:
181 | Path: "/"
182 | AssumeRolePolicyDocument:
183 | Statement:
184 | - Action: sts:AssumeRole
185 | Effect: Allow
186 | Principal:
187 | Service:
188 | - ec2.amazonaws.com
189 | Policies:
190 | - PolicyName: ec2-describe-instances
191 | PolicyDocument:
192 | Version: '2012-10-17'
193 | Statement:
194 | - Action: ec2:DescribeInstances
195 | Effect: Allow
196 | Resource: "*"
197 | ElkKinesisPublisherRole:
198 | Type: AWS::IAM::Role
199 | Properties:
200 | Path: "/"
201 | AssumeRolePolicyDocument:
202 | Statement:
203 | - Action: sts:AssumeRole
204 | Effect: Allow
205 | Principal:
206 | AWS: !Sub 'arn:aws:iam::${AWS::AccountId}:root'
207 | ElkKinesisDynamoDBTable:
208 | Type: AWS::DynamoDB::Table
209 | Properties:
210 | AttributeDefinitions:
211 | - AttributeName: leaseKey
212 | AttributeType: S
213 | KeySchema:
214 | - AttributeName: leaseKey
215 | KeyType: HASH
216 | ProvisionedThroughput:
217 | ReadCapacityUnits: '10'
218 | WriteCapacityUnits: '5'
219 | ElkKinesisStream:
220 | Type: AWS::Kinesis::Stream
221 | Properties:
222 | ShardCount: '1'
223 | ElkKinesisPublisherPolicy2:
224 | Type: AWS::IAM::ManagedPolicy
225 | Properties:
226 | Description: "Policy for allowing writes to the kinesis logging stream"
227 | PolicyDocument:
228 | Version: '2012-10-17'
229 | Statement:
230 | - Effect: Allow
231 | Action:
232 | - kinesis:PutRecord
233 | - kinesis:DescribeStream
234 | Resource: !Sub 'arn:aws:kinesis:${AWS::Region}:${AWS::AccountId}:stream/${ElkKinesisStream}'
235 | Roles:
236 | - !Ref ElkKinesisPublisherRole
237 | ElkKinesisReaderPolicy:
238 | Type: AWS::IAM::Policy
239 | Properties:
240 | PolicyName: elk-kinesis-reader
241 | PolicyDocument:
242 | Version: '2012-10-17'
243 | Statement:
244 | - Effect: Allow
245 | Action:
246 | - dynamodb:*
247 | Resource: !Sub 'arn:aws:dynamodb:${AWS::Region}:${AWS::AccountId}:table/${ElkKinesisDynamoDBTable}'
248 | - Effect: Allow
249 | Action:
250 | - kinesis:GetRecords
251 | - kinesis:GetShardIterator
252 | - kinesis:DescribeStream
253 | Resource: !Sub 'arn:aws:kinesis:${AWS::Region}:${AWS::AccountId}:stream/${ElkKinesisStream}'
254 | - Effect: Allow
255 | Action:
256 | - kinesis:ListStreams
257 | Resource:
258 | - "*"
259 | Roles:
260 | - !Ref Role
261 | InstanceProfile:
262 | Type: AWS::IAM::InstanceProfile
263 | Properties:
264 | Path: "/"
265 | Roles:
266 | - !Ref Role
267 | ElkPublicLoadBalancer:
268 | Type: AWS::ElasticLoadBalancing::LoadBalancer
269 | Properties:
270 | CrossZone: true
271 | Listeners:
272 | - Protocol:
273 | Fn::If:
274 | - HasSSLCertificate
275 | - HTTPS
276 | - HTTP
277 | LoadBalancerPort:
278 | Fn::If:
279 | - HasSSLCertificate
280 | - '443'
281 | - '80'
282 | InstanceProtocol: HTTP
283 | InstancePort: '8080'
284 | SSLCertificateId: !Ref PublicLoadBalancerSSLCertificateARN
285 | HealthCheck:
286 | Target: HTTP:8080/__es/
287 | Timeout: '10'
288 | Interval: '20'
289 | UnhealthyThreshold: '10'
290 | HealthyThreshold: '2'
291 | Subnets: !Ref PublicVpcSubnets
292 | SecurityGroups:
293 | - !Ref ElkPublicLoadBalancerSecurityGroup
294 | ElkInternalLoadBalancer:
295 | Type: AWS::ElasticLoadBalancing::LoadBalancer
296 | Properties:
297 | Scheme: internal
298 | CrossZone: true
299 | Listeners:
300 | - Protocol: TCP
301 | LoadBalancerPort: '6379'
302 | InstancePort: '6379'
303 | HealthCheck:
304 | Target: TCP:6379
305 | Timeout: '10'
306 | Interval: '20'
307 | UnhealthyThreshold: '10'
308 | HealthyThreshold: '2'
309 | Subnets: !Ref PrivateVpcSubnets
310 | SecurityGroups:
311 | - !Ref ElkInternalLoadBalancerSecurityGroup
312 | ElkAutoscalingGroup:
313 | Type: AWS::AutoScaling::AutoScalingGroup
314 | Properties:
315 | AvailabilityZones:
316 | Fn::GetAZs: ''
317 | VPCZoneIdentifier: !Ref PrivateVpcSubnets
318 | LaunchConfigurationName: !Ref ElkLaunchConfig
319 | MinSize: '1'
320 | MaxSize: '12'
321 | DesiredCapacity: !Ref ElkCapacity
322 | HealthCheckType: EC2
323 | HealthCheckGracePeriod: 600
324 | LoadBalancerNames:
325 | - !Ref ElkPublicLoadBalancer
326 | - !Ref ElkInternalLoadBalancer
327 | Tags:
328 | - Key: Stage
329 | Value:
330 | !Ref Stage
331 | PropagateAtLaunch: 'true'
332 | - Key: Stack
333 | Value:
334 | !Ref Stack
335 | PropagateAtLaunch: 'true'
336 | - Key: App
337 | Value: kibana
338 | PropagateAtLaunch: 'true'
339 | - Key: Name
340 | Value: kibana
341 | PropagateAtLaunch: 'true'
342 | ElkLaunchConfig:
343 | Type: AWS::AutoScaling::LaunchConfiguration
344 | Properties:
345 | ImageId: !FindInMap [RegionMap, !Ref 'AWS::Region', ImageId]
346 | SecurityGroups:
347 | - !Ref ElkSecurityGroup
348 | InstanceType:
349 | !Ref ElkInstanceType
350 | BlockDeviceMappings:
351 | - Fn::If:
352 | - UseEBS
353 | - DeviceName: "/dev/sdk"
354 | Ebs:
355 | VolumeSize: !Ref EBSVolumeSize
356 | VolumeType: gp2
357 | Encrypted: true
358 | - !Ref AWS::NoValue
359 | IamInstanceProfile: !Ref InstanceProfile
360 | KeyName: !Ref KeyName
361 | UserData:
362 | Fn::Base64: !Sub
363 | - |
364 | #!/bin/bash -ev
365 |
366 | # Update repositories
367 | wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
368 | sudo apt-get install apt-transport-https
369 | echo "deb https://artifacts.elastic.co/packages/5.x/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-5.x.list
370 | echo "deb http://packages.elastic.co/curator/4/debian stable main" | sudo tee -a /etc/apt/sources.list.d/curator.list
371 | apt-get -y upgrade
372 | update-ca-certificates -f
373 |
374 | # Install prerequesites
375 | apt-get -y update && apt-get -y install language-pack-en ntp openjdk-8-jdk unzip libwww-perl libdatetime-perl
376 |
377 | # Install Logstash, Elasticsearch, Kibana, etc...
378 | apt-get -y update && apt-get -y install elasticsearch kibana logstash elasticsearch-curator nodejs npm
379 |
380 | # Configure system
381 | cat >/etc/security/limits.conf << EOF
382 | elasticsearch - nofile 65536
383 | elasticsearch - memlock unlimited
384 | EOF
385 |
386 | # Mount Volume
387 | ${MountVolume}
388 | chown elasticsearch /data
389 |
390 | # Setup free disk space monitoring
391 | curl http://aws-cloudwatch.s3.amazonaws.com/downloads/CloudWatchMonitoringScripts-1.2.1.zip -O
392 | unzip CloudWatchMonitoringScripts-1.2.1.zip -d /usr/local/bin
393 | rm CloudWatchMonitoringScripts-1.2.1.zip
394 | echo '*/30 * * * * root /usr/local/bin/aws-scripts-mon/mon-put-instance-data.pl --disk-space-util --disk-path=/data --from-cron' >/etc/cron.d/monitor-instance
395 |
396 | # Install ES plugins
397 | /usr/share/elasticsearch/bin/elasticsearch-plugin install x-pack --batch
398 | /usr/share/elasticsearch/bin/elasticsearch-plugin install discovery-ec2 --batch
399 | /usr/share/elasticsearch/bin/elasticsearch-plugin install repository-s3 --batch
400 |
401 | # Configure Elasticsearch
402 | echo 'ES_JAVA_OPTS="-Xms${ESHeapSize} -Xmx${ESHeapSize}"' >>/etc/default/elasticsearch
403 | wget -O /etc/elasticsearch/elasticsearch.yml https://raw.githubusercontent.com/guardian/elk-stack/master/config/elasticsearch.yml
404 | sed -i \
405 | -e 's,@@REGION,${AWS::Region},g' \
406 | -e 's,@@STACK,${Stack},g' \
407 | -e 's,@@HOST,${ElkHost},g' \
408 | /etc/elasticsearch/elasticsearch.yml
409 |
410 | # Install Kibana plugins
411 | /usr/share/kibana/bin/kibana-plugin install x-pack
412 | # Configure Kibana
413 | cat >/etc/kibana/kibana.yml << EOF
414 | xpack.security.enabled: false
415 | EOF
416 | # Install Logstash plugins
417 | /usr/share/logstash/bin/logstash-plugin install logstash-input-kinesis
418 | # Configure Logstash
419 | wget -O /etc/logstash/conf.d/logstash-indexer.conf https://raw.githubusercontent.com/guardian/elk-stack/master/config/logstash-indexer.conf
420 | sed -i \
421 | -e 's,@@KINESIS_STREAM_NAME,${ElkKinesisStream},g' \
422 | -e 's,@@KINESIS_APPLICATION_NAME,${ElkKinesisDynamoDBTable},g' \
423 | -e 's,@@KINESIS_REGION,${AWS::Region},g' \
424 | /etc/logstash/conf.d/logstash-indexer.conf
425 |
426 | # Install Logcabin
427 | wget -O /tmp/elk-stack.tar.gz https://github.com/guardian/elk-stack/archive/3be9a2dc131c7703a6dce31ef2bcd11de4444fec.tar.gz
428 | tar zxf /tmp/elk-stack.tar.gz -C /tmp
429 | mv /tmp/elk-stack-3be9a2dc131c7703a6dce31ef2bcd11de4444fec/src /opt/logcabin
430 | adduser --disabled-password --gecos "" logcabin
431 | cd /opt/logcabin && npm install && cd ..
432 | chown -R logcabin /opt/logcabin
433 | wget -O /opt/logcabin/config.js https://raw.githubusercontent.com/guardian/elk-stack/master/config/config.js
434 | sed -i \
435 | -e 's,@@LOGCABIN_HOST,${ElkHost},g' \
436 | -e 's,@@API_KEY,${ApiKey},g' \
437 | -e 's,@@COOKIE_SECRET,${CookieSecret},g' \
438 | -e 's,@@CLIENT_ID,${GoogleOAuthClientId},g' \
439 | -e 's,@@CLIENT_SECRET,${GoogleOAuthClientSecret},g' \
440 | -e 's,@@ALLOWED_DOMAIN,${AllowedDomain},g' \
441 | /opt/logcabin/config.js
442 |
443 | # Start services
444 | wget -O /etc/systemd/system/logcabin.service https://raw.githubusercontent.com/guardian/elk-stack/master/config/systemd-logcabin.service
445 | wget -O /etc/systemd/system/kibana.service https://raw.githubusercontent.com/guardian/elk-stack/master/config/systemd-kibana.service
446 | systemctl start logstash && systemctl enable logstash
447 | systemctl start elasticsearch && systemctl enable elasticsearch
448 | systemctl start kibana && systemctl enable kibana
449 | systemctl start logcabin && systemctl enable logcabin
450 | # Setup S3 snapshot
451 | ${SetupS3Snapshot}
452 | # Setup schedule to delete old indexes
453 | mkdir /etc/curator
454 | wget -O /etc/curator/curator.yml https://raw.githubusercontent.com/guardian/elk-stack/master/config/curator/curator.yml
455 |
456 | wget -O /etc/curator/delete-old-indexes.yml https://raw.githubusercontent.com/guardian/elk-stack/master/config/curator/delete-old-indexes.yml
457 | sed -i \
458 | -e 's,@@NUMBER_OF_DAYS,${IndexKeepDays},g' \
459 | /etc/curator/delete-old-indexes.yml
460 | echo '30 0 * * * root /usr/bin/curator --config /etc/curator/curator.yml /etc/curator/delete-old-indexes.yml' >/etc/cron.d/curator
461 | - ESHeapSize: !FindInMap [InstanceMap, !Ref ElkInstanceType, ESHeapSize]
462 | MountVolume: !If
463 | - UseEBS
464 | - !Sub |
465 | mkfs.ext4 /dev/xvdk
466 | mkdir /data
467 | mount /dev/xvdk /data
468 | echo '/dev/xvdk /data ext4 defaults 0 2' > /etc/fstab
469 | - !Sub |
470 | mkdir /data
471 | mount /dev/xvda1 /data
472 | ElkHost: !Join
473 | - ''
474 | - - !If [HasSSLCertificate, 'https://', 'http://']
475 | - !If [HasDNS, !Sub 'kibana.${HostedZoneName}', !GetAtt ElkPublicLoadBalancer.DNSName]
476 | SetupS3Snapshot: !If
477 | - HasS3
478 | - !Sub |
479 | while ! nc -z localhost 9200; do sleep 5; done; echo Elasticsearch is up!
480 | curl -XPUT 'http://localhost:9200/_snapshot/s3' -d '{
481 | "type": "s3",
482 | "settings": {
483 | "bucket": "${ElkS3Bucket}",
484 | "region": "${AWS::Region}",
485 | "server_side_encryption": "${SnapshotRepositoryEncryption}"
486 | }
487 | }'
488 | curl 'http://localhost:9200/_snapshot/s3?pretty'
489 | wget -O /usr/local/bin/backup.sh https://raw.githubusercontent.com/guardian/elk-stack/master/scripts/backup.sh
490 | chmod +x /usr/local/bin/backup.sh
491 | echo '15 0 * * * root /usr/local/bin/backup.sh' >/etc/cron.d/backup
492 | - ''
493 |
494 | ElkPublicLoadBalancerSecurityGroup:
495 | Type: AWS::EC2::SecurityGroup
496 | Properties:
497 | VpcId: !Ref VpcId
498 | GroupDescription: Allow access to kibana on public ELB from internet
499 | SecurityGroupIngress:
500 | - IpProtocol: tcp
501 | FromPort:
502 | Fn::If:
503 | - HasSSLCertificate
504 | - '443'
505 | - '80'
506 | ToPort:
507 | Fn::If:
508 | - HasSSLCertificate
509 | - '443'
510 | - '80'
511 | CidrIp: !Ref AllowedHttpCidr
512 | SecurityGroupEgress:
513 | - IpProtocol: tcp
514 | FromPort: '8080'
515 | ToPort: '8080'
516 | CidrIp: 0.0.0.0/0
517 | ElkInternalLoadBalancerSecurityGroup:
518 | Type: AWS::EC2::SecurityGroup
519 | Properties:
520 | VpcId: !Ref VpcId
521 | GroupDescription: Allow logstash messages to internal ELB
522 | SecurityGroupIngress:
523 | - IpProtocol: tcp
524 | FromPort: '6379'
525 | ToPort: '6379'
526 | CidrIp: !Ref VpcIpRangeCidr
527 | SecurityGroupEgress:
528 | - IpProtocol: tcp
529 | FromPort: '6379'
530 | ToPort: '6379'
531 | CidrIp: !Ref VpcIpRangeCidr
532 | ElkSecurityGroup:
533 | Type: AWS::EC2::SecurityGroup
534 | Properties:
535 | GroupDescription: Allow kibana from public and logstash from internal ELBs
536 | VpcId: !Ref VpcId
537 | SecurityGroupIngress:
538 | - IpProtocol: tcp
539 | FromPort: '6379'
540 | ToPort: '6379'
541 | SourceSecurityGroupId: !Ref ElkInternalLoadBalancerSecurityGroup
542 | - IpProtocol: tcp
543 | FromPort: '8080'
544 | ToPort: '8080'
545 | SourceSecurityGroupId: !Ref ElkPublicLoadBalancerSecurityGroup
546 | - IpProtocol: tcp
547 | FromPort: '22'
548 | ToPort: '22'
549 | CidrIp: !Ref AllowedSshCidr
550 | ElkSecurityGroupIngress:
551 | Type: AWS::EC2::SecurityGroupIngress
552 | Properties:
553 | GroupId: !GetAtt ElkSecurityGroup.GroupId
554 | IpProtocol: tcp
555 | FromPort: '9300'
556 | ToPort: '9400'
557 | SourceSecurityGroupId: !GetAtt ElkSecurityGroup.GroupId
558 | KibanaAlias:
559 | Type: AWS::Route53::RecordSetGroup
560 | Condition: HasDNS
561 | Properties:
562 | HostedZoneName: !Sub '${HostedZoneName}.'
563 | Comment: Alias to kibana elb
564 | RecordSets:
565 | - Name: !Sub 'kibana.${HostedZoneName}'
566 | Type: A
567 | AliasTarget:
568 | HostedZoneId: !GetAtt ElkPublicLoadBalancer.CanonicalHostedZoneNameID
569 | DNSName: !GetAtt ElkPublicLoadBalancer.DNSName
570 | - Name: !Sub 'logstash.${HostedZoneName}'
571 | Type: A
572 | AliasTarget:
573 | HostedZoneId: !GetAtt ElkInternalLoadBalancer.CanonicalHostedZoneNameID
574 | DNSName: !GetAtt ElkInternalLoadBalancer.DNSName
575 | Outputs:
576 | LogstashEndpoint:
577 | Value: !Join
578 | - ''
579 | - - !GetAtt ElkInternalLoadBalancer.DNSName
580 | - ':6379'
581 | Description: Logging endpoint for Logstash TCP input
582 | KibanaURL:
583 | Value: !Join
584 | - ''
585 | - - !If
586 | - HasSSLCertificate
587 | - https://
588 | - http://
589 | - !If
590 | - HasDNS
591 | - !Join
592 | - "."
593 | - - kibana
594 | - !Ref HostedZoneName
595 | - !GetAtt ElkPublicLoadBalancer.DNSName
596 | - "/"
597 | Description: URL for the Kibana 4 Dashboard
598 | GoogleOAuthRedirectUrl:
599 | Value: !Join
600 | - ''
601 | - - !If
602 | - HasSSLCertificate
603 | - https://
604 | - http://
605 | - !If
606 | - HasDNS
607 | - !Join
608 | - "."
609 | - - kibana
610 | - !Ref HostedZoneName
611 | - !GetAtt ElkPublicLoadBalancer.DNSName
612 | - "/auth/google/callback"
613 | Description: Redirect URL for the Google Developers Console
614 | KinesisStream:
615 | Value: !Ref ElkKinesisStream
616 | KinesisRegion:
617 | Value: !Ref AWS::Region
618 | KinesisRole:
619 | Value: !GetAtt ElkKinesisPublisherRole.Arn
620 |
--------------------------------------------------------------------------------