├── README.md ├── authentication ├── README.md ├── auth0-deployment.yml ├── auth0-secrets.yml └── auth0-service.yml ├── authorization ├── README.md └── role.yml ├── federation ├── README.md ├── helloworld-deployment.yml └── helloworld-service.yml ├── helm └── README.md ├── jobs ├── README.md ├── cronjob.yml └── job.yml ├── kubernetes-auth-server ├── .dockerignore ├── .gitignore ├── Dockerfile ├── README.md ├── __pycache__ │ ├── constants.cpython-35.pyc │ └── server.cpython-35.pyc ├── cli-auth.py ├── constants.py ├── public │ └── app.css ├── requirements-cli.txt ├── requirements.txt ├── server.py └── templates │ ├── dashboard.html │ └── home.html ├── linkerd ├── README.md ├── hello-world.yml ├── linkerd-viz.yml ├── linkerd.yml └── node-name-test.yml ├── logging ├── README.md ├── es-service.yaml ├── es-statefulset.yaml ├── fluentd-es-configmap.yaml ├── fluentd-es-ds.yaml ├── kibana-deployment.yaml ├── kibana-service.yaml └── storage.yml ├── prometheus ├── README.md ├── example-app.yml ├── kubernetes-monitoring.yml ├── prometheus-resource.yml ├── prometheus.yml └── rbac.yml └── spinnaker ├── README.md └── spinnaker.yml /README.md: -------------------------------------------------------------------------------- 1 | # Advanced Kubernetes course 2 | This repository contains the course files for my Advanced Kubernetes course on Udemy. See https://www.udemy.com/learn-devops-advanced-kubernetes-usage/?couponCode=GITHUB 3 | -------------------------------------------------------------------------------- /authentication/README.md: -------------------------------------------------------------------------------- 1 | # kube authentication resources 2 | 3 | Add oidc setup to kops cluster: 4 | 5 | ``` 6 | spec: 7 | kubeAPIServer: 8 | oidcIssuerURL: https://account.eu.auth0.com/ 9 | oidcClientID: clientid 10 | oidcUsernameClaim: sub 11 | ``` 12 | 13 | Create UI: 14 | 15 | ``` 16 | kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.6.3.yaml 17 | ``` 18 | 19 | -------------------------------------------------------------------------------- /authentication/auth0-deployment.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kubernetes-auth-server 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: kubernetes-auth-server 10 | template: 11 | metadata: 12 | labels: 13 | app: kubernetes-auth-server 14 | spec: 15 | containers: 16 | - name: kubernetes-auth-server 17 | image: wardviaene/kubernetes-auth-server:1.0.1 18 | imagePullPolicy: Always 19 | ports: 20 | - name: app-port 21 | containerPort: 3000 22 | env: 23 | - name: AUTH0_CLIENT_ID 24 | value: # change into your client id 25 | - name: AUTH0_DOMAIN 26 | value: newtechacademy.eu.auth0.com # change into your domain 27 | - name: AUTH0_CALLBACK_URL 28 | value: http://authserver.kubernetes.newtech.academy/callback # change into your callback url 29 | - name: AUTH0_API_ID 30 | value: https://newtechacademy.eu.auth0.com/userinfo # change into your identifier 31 | - name: AUTH0_CONNECTION 32 | value: Username-Password-Authentication # auth0 user database connection 33 | - name: KUBERNETES_UI_HOST 34 | value: api.kubernetes.newtech.academy 35 | - name: APP_HOST 36 | value: authserver.kubernetes.newtech.academy 37 | - name: AUTH0_CLIENT_SECRET 38 | valueFrom: 39 | secretKeyRef: 40 | name: auth0-secrets 41 | key: AUTH0_CLIENT_SECRET 42 | -------------------------------------------------------------------------------- /authentication/auth0-secrets.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: auth0-secrets 5 | type: Opaque 6 | data: 7 | AUTH0_CLIENT_SECRET: # enter the auth0 secret here 8 | -------------------------------------------------------------------------------- /authentication/auth0-service.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubernetes-auth-server 5 | spec: 6 | ports: 7 | - port: 80 8 | targetPort: app-port 9 | protocol: TCP 10 | selector: 11 | app: kubernetes-auth-server 12 | type: LoadBalancer 13 | -------------------------------------------------------------------------------- /authorization/README.md: -------------------------------------------------------------------------------- 1 | # kube authorization with auth0 2 | 3 | Add oidc setup to kops cluster: 4 | 5 | ``` 6 | spec: 7 | kubeAPIServer: 8 | oidcIssuerURL: https://account.eu.auth0.com/ 9 | oidcClientID: clientid 10 | oidcUsernameClaim: name 11 | oidcGroupsClaim: http://authserver.kubernetes.newtech.academy/claims/groups 12 | authorization: 13 | rbac: {} 14 | 15 | ``` 16 | 17 | Auth0 rule for groups 18 | 19 | ``` 20 | function (user, context, callback) { 21 | var namespace = 'http://authserver.kubernetes.newtech.academy/claims/'; // You can set your own namespace, but do not use an Auth0 domain 22 | 23 | // Add the namespaced tokens. Remove any which is not necessary for your scenario 24 | context.idToken[namespace + "permissions"] = user.permissions; 25 | context.idToken[namespace + "groups"] = user.groups; 26 | context.idToken[namespace + "roles"] = user.roles; 27 | 28 | callback(null, user, context); 29 | } 30 | ``` 31 | -------------------------------------------------------------------------------- /authorization/role.yml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | namespace: default 5 | name: pod-reader 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods"] 9 | verbs: ["get", "watch", "list", "create", "update", "patch", "delete"] 10 | - apiGroups: ["extensions", "apps"] 11 | resources: ["deployments"] 12 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 13 | --- 14 | kind: RoleBinding 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | metadata: 17 | name: read-pods 18 | namespace: default 19 | subjects: 20 | - kind: Group 21 | name: developers 22 | apiGroup: rbac.authorization.k8s.io 23 | roleRef: 24 | kind: Role 25 | name: pod-reader 26 | apiGroup: rbac.authorization.k8s.io 27 | --- 28 | kind: Role 29 | apiVersion: rbac.authorization.k8s.io/v1 30 | metadata: 31 | namespace: kubernetes-dashboard 32 | name: dashboard 33 | rules: 34 | - apiGroups: [""] 35 | resources: ["pods", "services", "services/proxy"] 36 | verbs: ["get", "watch", "list", "create", "update", "patch", "delete"] 37 | - apiGroups: [""] 38 | resources: ["*"] 39 | verbs: ["proxy"] 40 | --- 41 | kind: RoleBinding 42 | apiVersion: rbac.authorization.k8s.io/v1 43 | metadata: 44 | name: access-dashboard 45 | namespace: kubernetes-dashboard 46 | subjects: 47 | - kind: Group 48 | name: developers 49 | apiGroup: rbac.authorization.k8s.io 50 | roleRef: 51 | kind: Role 52 | name: dashboard 53 | apiGroup: rbac.authorization.k8s.io 54 | -------------------------------------------------------------------------------- /federation/README.md: -------------------------------------------------------------------------------- 1 | # install kubefed 2 | 3 | ## Linux 4 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/kubernetes-client-linux-amd64.tar.gz 5 | tar -xzvf kubernetes-client-linux-amd64.tar.gz 6 | 7 | ## OS X 8 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/kubernetes-client-darwin-amd64.tar.gz 9 | tar -xzvf kubernetes-client-darwin-amd64.tar.gz 10 | 11 | ## Windows 12 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/kubernetes-client-windows-amd64.tar.gz 13 | tar -xzvf kubernetes-client-windows-amd64.tar.gz 14 | 15 | # setup the clusters 16 | 17 | Replace newtech.academy with your domain 18 | 19 | ``` 20 | kops create cluster --name=kubernetes.newtech.academy --state=s3://kops-state-b429b --zones=eu-west-1a --node-count=2 --node-size=t2.small --master-size=t2.small --dns-zone=kubernetes.newtech.academy 21 | kops create cluster --name=kubernetes-2.newtech.academy --state=s3://kops-state-b429b --zones=eu-west-1b --node-count=2 --node-size=t2.small --master-size=t2.small --dns-zone=kubernetes-2.newtech.academy 22 | kops update cluster kubernetes.newtech.academy --state=s3://kops-state-b429b --yes 23 | kops update cluster kubernetes-2.newtech.academy --state=s3://kops-state-b429b --yes 24 | ``` 25 | 26 | # initialize federation 27 | kubefed init federated --host-cluster-context=kubernetes.newtech.academy --dns-provider="aws-route53" --dns-zone-name="federated.newtech.academy." 28 | 29 | # join a cluster 30 | kubectl config set-context federated # important, if you don't switch, the next command might fail with an API not found error 31 | kubefed join kubernetes-2 --host-cluster-context=kubernetes.newtech.academy --cluster-context=kubernetes-2.newtech.academy 32 | kubefed join kubernetes-1 --host-cluster-context=kubernetes.newtech.academy --cluster-context=kubernetes.newtech.academy 33 | kubectl create namespace default --context=federated 34 | 35 | -------------------------------------------------------------------------------- /federation/helloworld-deployment.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: helloworld-deployment 5 | spec: 6 | replicas: 4 7 | selector: 8 | matchLabels: 9 | app: helloworld 10 | template: 11 | metadata: 12 | labels: 13 | app: helloworld 14 | spec: 15 | containers: 16 | - name: k8s-demo 17 | image: wardviaene/k8s-demo 18 | ports: 19 | - name: nodejs-port 20 | containerPort: 3000 21 | -------------------------------------------------------------------------------- /federation/helloworld-service.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: helloworld-service 5 | spec: 6 | ports: 7 | - port: 80 8 | targetPort: nodejs-port 9 | protocol: TCP 10 | selector: 11 | app: helloworld 12 | type: LoadBalancer 13 | -------------------------------------------------------------------------------- /helm/README.md: -------------------------------------------------------------------------------- 1 | # helm mysql package 2 | 3 | Download helm first from https://github.com/kubernetes/helm 4 | 5 | Initialize helm: 6 | 7 | helm init 8 | 9 | To install mysql, enter: 10 | 11 | helm install --name my-mysql --set mysqlRootPassword=secretpassword,mysqlUser=my-user,mysqlPassword=my-password,mysqlDatabase=my-database stable/mysql 12 | 13 | -------------------------------------------------------------------------------- /jobs/README.md: -------------------------------------------------------------------------------- 1 | # Runtime config 2 | 3 | To run batch/v2alpha1 API resources you need to specify --runtime-config=batch/v2alpha1 when starting the API Server 4 | 5 | For kops, add this spec when executing kops edit: 6 | ``` 7 | spec: 8 | kubeAPIServer: 9 | runtimeConfig: 10 | batch/v2alpha1: "true" 11 | ``` 12 | -------------------------------------------------------------------------------- /jobs/cronjob.yml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | name: hello 5 | spec: 6 | schedule: "*/1 * * * *" 7 | jobTemplate: 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: my-cronjob 13 | image: busybox 14 | args: 15 | - /bin/sh 16 | - -c 17 | - echo This commmand runs every minute 18 | restartPolicy: OnFailure 19 | -------------------------------------------------------------------------------- /jobs/job.yml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: pi 5 | spec: 6 | template: 7 | metadata: 8 | name: pi 9 | spec: 10 | containers: 11 | - name: pi 12 | image: perl 13 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] 14 | restartPolicy: Never 15 | -------------------------------------------------------------------------------- /kubernetes-auth-server/.dockerignore: -------------------------------------------------------------------------------- 1 | .env 2 | .env.example 3 | .gitignore 4 | .git 5 | README.md -------------------------------------------------------------------------------- /kubernetes-auth-server/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .pyc 3 | *.iml 4 | .directory 5 | .idea -------------------------------------------------------------------------------- /kubernetes-auth-server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.5 2 | 3 | WORKDIR /home/app 4 | 5 | #If we add the requirements and install dependencies first, docker can use cache if requirements don't change 6 | ADD requirements.txt /home/app 7 | RUN pip install --no-cache-dir -r requirements.txt 8 | 9 | ADD . /home/app 10 | CMD python server.py 11 | 12 | EXPOSE 3000 13 | -------------------------------------------------------------------------------- /kubernetes-auth-server/README.md: -------------------------------------------------------------------------------- 1 | # Auth0 Python Web App Sample 2 | 3 | Original: https://github.com/auth0-samples/auth0-python-web-app/tree/master/01-Login 4 | 5 | Adapted to be used with Kubernetes 6 | 7 | -------------------------------------------------------------------------------- /kubernetes-auth-server/__pycache__/constants.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wardviaene/advanced-kubernetes-course/ed13cb2e7836cb870da31f0e20a029e93d218ea8/kubernetes-auth-server/__pycache__/constants.cpython-35.pyc -------------------------------------------------------------------------------- /kubernetes-auth-server/__pycache__/server.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wardviaene/advanced-kubernetes-course/ed13cb2e7836cb870da31f0e20a029e93d218ea8/kubernetes-auth-server/__pycache__/server.cpython-35.pyc -------------------------------------------------------------------------------- /kubernetes-auth-server/cli-auth.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | from os import environ as env 4 | from dotenv import load_dotenv, find_dotenv 5 | from jose import jwt 6 | from six.moves.urllib.request import urlopen 7 | from os.path import expanduser 8 | 9 | import getpass 10 | import requests 11 | import json 12 | import sys 13 | 14 | ENV_FILE = find_dotenv() 15 | if ENV_FILE: 16 | load_dotenv(ENV_FILE) 17 | 18 | AUTH0_CLIENT_ID = env.get('AUTH0_CLIENT_ID') 19 | AUTH0_DOMAIN = env.get('AUTH0_DOMAIN') 20 | APP_HOST = env.get('APP_HOST') 21 | HOME = expanduser("~") 22 | 23 | def auth(): 24 | sys.stderr.write("Login: ") 25 | login = input() 26 | password = getpass.getpass() 27 | 28 | r = requests.get("http://"+APP_HOST+"/kubectl?username="+login+"&password="+password) 29 | 30 | resp = json.loads(r.text) 31 | 32 | if 'error' in resp: 33 | 34 | print("There was an auth0 error: "+resp['error']+": "+resp['error_description']) 35 | 36 | else: 37 | 38 | id_token = resp['id_token'] 39 | 40 | jwks = urlopen("https://"+AUTH0_DOMAIN+"/.well-known/jwks.json") 41 | 42 | with open(HOME+'/.kube/jwks.json', 'w') as f: f.write (jwks.read().decode('utf-8')) 43 | with open(HOME+'/.kube/id_token', 'w') as f: f.write (id_token) 44 | 45 | print(id_token) 46 | 47 | def main(): 48 | try: 49 | with open(HOME+'/.kube/jwks.json', 'r') as content_file: jwks = content_file.read() 50 | with open(HOME+'/.kube/id_token', 'r') as content_file: id_token = content_file.read() 51 | 52 | payload = jwt.decode(id_token, jwks, algorithms=['RS256'], 53 | audience=AUTH0_CLIENT_ID, issuer="https://"+AUTH0_DOMAIN+"/") 54 | 55 | print(id_token) 56 | except OSError as e: 57 | auth() 58 | except jwt.ExpiredSignatureError as e: 59 | auth() 60 | except jwt.JWTClaimsError as e: 61 | auth() 62 | 63 | 64 | if __name__ == '__main__': 65 | main() 66 | -------------------------------------------------------------------------------- /kubernetes-auth-server/constants.py: -------------------------------------------------------------------------------- 1 | """ Constants file for Auth0's seed project 2 | """ 3 | ACCESS_TOKEN_KEY = 'access_token' 4 | API_ID = 'API_ID' 5 | APP_JSON_KEY = 'application/json' 6 | AUTH0_CLIENT_ID = 'AUTH0_CLIENT_ID' 7 | AUTH0_CLIENT_SECRET = 'AUTH0_CLIENT_SECRET' 8 | AUTH0_CALLBACK_URL = 'AUTH0_CALLBACK_URL' 9 | AUTH0_DOMAIN = 'AUTH0_DOMAIN' 10 | AUTH0_AUDIENCE = 'AUTH0_AUDIENCE' 11 | AUTHORIZATION_CODE_KEY = 'authorization_code' 12 | CLIENT_ID_KEY = 'client_id' 13 | CLIENT_SECRET_KEY = 'client_secret' 14 | CODE_KEY = 'code' 15 | CONTENT_TYPE_KEY = 'content-type' 16 | GRANT_TYPE_KEY = 'grant_type' 17 | PROFILE_KEY = 'profile' 18 | REDIRECT_URI_KEY = 'redirect_uri' 19 | SECRET_KEY = 'ThisIsTheSecretKey' 20 | JWT_PAYLOAD = 'jwt_payload' 21 | ID_TOKEN = 'ID_TOKEN' 22 | APP_HOST = 'APP_HOST' 23 | KUBERNETES_UI_HOST = 'KUBERNETES_UI_HOST' 24 | AUTH0_CONNECTION = 'AUTH0_CONNECTION' 25 | -------------------------------------------------------------------------------- /kubernetes-auth-server/public/app.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: "proxima-nova", sans-serif; 3 | text-align: center; 4 | font-size: 300%; 5 | font-weight: 100; 6 | } 7 | pre { 8 | text-align: left; 9 | } 10 | input[type=checkbox], 11 | input[type=radio] { 12 | position: absolute; 13 | opacity: 0; 14 | } 15 | input[type=checkbox] + label, 16 | input[type=radio] + label { 17 | display: inline-block; 18 | } 19 | input[type=checkbox] + label:before, 20 | input[type=radio] + label:before { 21 | content: ""; 22 | display: inline-block; 23 | vertical-align: -0.2em; 24 | width: 1em; 25 | height: 1em; 26 | border: 0.15em solid #0074d9; 27 | border-radius: 0.2em; 28 | margin-right: 0.3em; 29 | background-color: white; 30 | } 31 | input[type=radio] + label:before { 32 | border-radius: 50%; 33 | } 34 | input[type=radio]:checked + label:before, 35 | input[type=checkbox]:checked + label:before { 36 | background-color: #0074d9; 37 | box-shadow: inset 0 0 0 0.15em white; 38 | } 39 | input[type=radio]:focus + label:before, 40 | input[type=checkbox]:focus + label:before { 41 | outline: 0; 42 | } 43 | .btn { 44 | font-size: 140%; 45 | text-transform: uppercase; 46 | letter-spacing: 1px; 47 | border: 0; 48 | background-color: #16214D; 49 | color: white; 50 | } 51 | .btn:hover { 52 | background-color: #44C7F4; 53 | } 54 | .btn:focus { 55 | outline: none !important; 56 | } 57 | .btn.btn-lg { 58 | padding: 20px 30px; 59 | } 60 | .btn:disabled { 61 | background-color: #333; 62 | color: #666; 63 | } 64 | h1, 65 | h2, 66 | h3 { 67 | font-weight: 100; 68 | } 69 | #logo img { 70 | width: 300px; 71 | margin-bottom: 60px; 72 | } 73 | .home-description { 74 | font-weight: 100; 75 | margin: 100px 0; 76 | } 77 | h2 { 78 | margin-top: 30px; 79 | margin-bottom: 40px; 80 | font-size: 200%; 81 | } 82 | label { 83 | font-size: 100%; 84 | font-weight: 300; 85 | } 86 | .btn-next { 87 | margin-top: 30px; 88 | } 89 | .answer { 90 | width: 70%; 91 | margin: auto; 92 | text-align: left; 93 | padding-left: 10%; 94 | margin-bottom: 20px; 95 | } 96 | .login-page .login-box { 97 | padding: 100px 0; 98 | } 99 | -------------------------------------------------------------------------------- /kubernetes-auth-server/requirements-cli.txt: -------------------------------------------------------------------------------- 1 | python-dotenv 2 | requests 3 | python-jose 4 | -------------------------------------------------------------------------------- /kubernetes-auth-server/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | python-dotenv 3 | requests 4 | flask-oauthlib 5 | python-jose 6 | -------------------------------------------------------------------------------- /kubernetes-auth-server/server.py: -------------------------------------------------------------------------------- 1 | """Python Flask WebApp Auth0 integration example 2 | """ 3 | from functools import wraps 4 | import json 5 | import requests 6 | from os import environ as env 7 | 8 | from dotenv import load_dotenv, find_dotenv 9 | from flask import Flask 10 | from flask import jsonify 11 | from flask import redirect 12 | from flask import render_template 13 | from flask import request 14 | from flask import session 15 | from flask import url_for 16 | from flask_oauthlib.client import OAuth 17 | from jose import jwt 18 | from six.moves.urllib.parse import urlencode 19 | from six.moves.urllib.request import urlopen 20 | 21 | from flask import Response 22 | 23 | import urllib3 24 | urllib3.disable_warnings() 25 | 26 | import constants 27 | 28 | ENV_FILE = find_dotenv() 29 | if ENV_FILE: 30 | load_dotenv(ENV_FILE) 31 | 32 | AUTH0_CALLBACK_URL = env.get(constants.AUTH0_CALLBACK_URL) 33 | AUTH0_CLIENT_ID = env.get(constants.AUTH0_CLIENT_ID) 34 | AUTH0_CLIENT_SECRET = env.get(constants.AUTH0_CLIENT_SECRET) 35 | AUTH0_DOMAIN = env.get(constants.AUTH0_DOMAIN) 36 | AUTH0_AUDIENCE = env.get(constants.API_ID) 37 | AUTH0_CONNECTION = env.get(constants.AUTH0_CONNECTION) 38 | APP_HOST = env.get(constants.APP_HOST) 39 | KUBERNETES_UI_HOST = env.get(constants.KUBERNETES_UI_HOST) 40 | 41 | APP = Flask(__name__, static_url_path='/public', static_folder='./public') 42 | APP.secret_key = constants.SECRET_KEY 43 | APP.debug = True 44 | 45 | 46 | # Format error response and append status code. 47 | class AuthError(Exception): 48 | def __init__(self, error, status_code): 49 | self.error = error 50 | self.status_code = status_code 51 | 52 | 53 | #@APP.errorhandler(Exception) 54 | #def handle_auth_error(ex): 55 | # response = jsonify(ex.error) 56 | # response.status_code = ex.status_code 57 | # return response 58 | 59 | oauth = OAuth(APP) 60 | 61 | 62 | auth0 = oauth.remote_app( 63 | 'auth0', 64 | consumer_key=AUTH0_CLIENT_ID, 65 | consumer_secret=AUTH0_CLIENT_SECRET, 66 | request_token_params={ 67 | 'scope': 'openid profile', 68 | 'audience': AUTH0_AUDIENCE 69 | }, 70 | base_url='https://%s' % AUTH0_DOMAIN, 71 | access_token_method='POST', 72 | access_token_url='/oauth/token', 73 | authorize_url='/authorize', 74 | ) 75 | 76 | def requires_auth(f): 77 | @wraps(f) 78 | def decorated(*args, **kwargs): 79 | if constants.PROFILE_KEY not in session: 80 | return redirect('/login') 81 | return f(*args, **kwargs) 82 | return decorated 83 | 84 | 85 | # Controllers API 86 | @APP.route('/') 87 | def home(): 88 | return render_template('home.html') 89 | 90 | 91 | @APP.route('/callback') 92 | def callback_handling(): 93 | resp = auth0.authorized_response() 94 | if resp is None: 95 | raise AuthError({'code': request.args['error_reason'], 96 | 'description': request.args['error_description']}, 401) 97 | 98 | # Obtain JWT and the keys to validate the signature 99 | id_token = resp['id_token'] 100 | jwks = urlopen("https://"+AUTH0_DOMAIN+"/.well-known/jwks.json") 101 | 102 | payload = jwt.decode(id_token, jwks.read().decode('utf-8'), algorithms=['RS256'], 103 | audience=AUTH0_CLIENT_ID, issuer="https://"+AUTH0_DOMAIN+"/") 104 | 105 | session[constants.JWT_PAYLOAD] = payload 106 | 107 | session[constants.ID_TOKEN] = id_token 108 | 109 | session[constants.PROFILE_KEY] = { 110 | 'user_id': payload['sub'], 111 | 'name': payload['name'], 112 | 'picture': payload['picture'] 113 | } 114 | 115 | return redirect('/dashboard') 116 | 117 | 118 | @APP.route('/login') 119 | def login(): 120 | return auth0.authorize(callback=AUTH0_CALLBACK_URL) 121 | 122 | @APP.route('/logout') 123 | def logout(): 124 | session.clear() 125 | params = {'returnTo': url_for('home', _external=True), 'client_id': AUTH0_CLIENT_ID} 126 | return redirect(auth0.base_url + '/v2/logout?' + urlencode(params)) 127 | 128 | 129 | @APP.route('/dashboard') 130 | @requires_auth 131 | def dashboard(): 132 | return render_template('dashboard.html', 133 | userinfo=session[constants.PROFILE_KEY], 134 | id_token=session[constants.ID_TOKEN], 135 | userinfo_pretty=json.dumps(session[constants.JWT_PAYLOAD], indent=4)) 136 | 137 | @APP.route('/ui', defaults={'path': ''}) 138 | @APP.route('/api', defaults={'path': ''}) 139 | @APP.route('/api/') 140 | @requires_auth 141 | def proxy_ui(path): 142 | # add bearer token 143 | new_headers = {key: value for (key, value) in request.headers if key != 'Host'} 144 | new_headers['Authorization'] = 'Bearer ' +session[constants.ID_TOKEN] 145 | 146 | url = request.url.replace(APP_HOST, KUBERNETES_UI_HOST).replace('http://', 'https://') 147 | try: 148 | resp = requests.request( 149 | method=request.method, 150 | url=url, 151 | headers=new_headers, 152 | data=request.get_data(), 153 | cookies=request.cookies, 154 | allow_redirects=False, 155 | verify=False # remove this line when using real SSL certs 156 | ) 157 | 158 | print("proxied: " + url + " - with status: " + str(resp.status_code)) 159 | excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection'] 160 | headers = [(name, value) for (name, value) in resp.raw.headers.items() 161 | if name.lower() not in excluded_headers] 162 | 163 | response = Response(resp.content, resp.status_code, headers) 164 | return response 165 | 166 | except Exception as inst: 167 | print(inst) 168 | raise inst 169 | #return 'error: ' + str(inst) 170 | 171 | @APP.route('/kubectl') 172 | def kubectl(): 173 | payload = {"grant_type":"http://auth0.com/oauth/grant-type/password-realm","username": request.args.get('username'),"password": request.args.get('password'),"client_id": AUTH0_CLIENT_ID, "client_secret": AUTH0_CLIENT_SECRET,"realm": AUTH0_CONNECTION, "scope": "openid", "audience": AUTH0_AUDIENCE} 174 | r = requests.post("https://"+AUTH0_DOMAIN+"/oauth/token", json=payload) 175 | return r.text 176 | 177 | 178 | 179 | 180 | if __name__ == "__main__": 181 | APP.run(host='0.0.0.0', port=env.get('PORT', 3000)) 182 | -------------------------------------------------------------------------------- /kubernetes-auth-server/templates/dashboard.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 |
12 | 24 |
25 | 26 | 27 | -------------------------------------------------------------------------------- /kubernetes-auth-server/templates/home.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 |
14 | 22 |
23 | 24 | 25 | -------------------------------------------------------------------------------- /linkerd/README.md: -------------------------------------------------------------------------------- 1 | # install linkerd 2 | linkerd: 3 | ``` 4 | kubectl apply -f linkerd.yml 5 | INGRESS_LB=$(kubectl get svc l5d -o jsonpath="{.status.loadBalancer.ingress[0].*}") 6 | echo http://$INGRESS_LB:9990 7 | ``` 8 | 9 | visualization: 10 | ``` 11 | kubectl apply -f linkerd-viz.yml 12 | VIZ_INGRESS_LB=$(kubectl get svc linkerd-viz -o jsonpath="{.status.loadBalancer.ingress[0].*}") 13 | echo http://$VIZ_INGRESS_LB 14 | ``` 15 | 16 | # Example app 17 | ``` 18 | kubectl create -f hello-world.yml 19 | http_proxy=$INGRESS_LB:4140 curl -s http://hello 20 | ``` 21 | Examples are from https://github.com/linkerd/linkerd-examples/tree/master/k8s-daemonset/k8s/ 22 | 23 | # minikube 24 | The example doesn't work on minikube, but there is a hello-world-legacy.yml at https://github.com/linkerd/linkerd-examples/tree/master/k8s-daemonset/k8s/ which can be used instead 25 | -------------------------------------------------------------------------------- /linkerd/hello-world.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ReplicationController 4 | metadata: 5 | name: hello 6 | spec: 7 | replicas: 3 8 | selector: 9 | app: hello 10 | template: 11 | metadata: 12 | labels: 13 | app: hello 14 | spec: 15 | dnsPolicy: ClusterFirst 16 | containers: 17 | - name: service 18 | image: buoyantio/helloworld:0.1.4 19 | env: 20 | - name: NODE_NAME 21 | valueFrom: 22 | fieldRef: 23 | fieldPath: spec.nodeName 24 | - name: POD_IP 25 | valueFrom: 26 | fieldRef: 27 | fieldPath: status.podIP 28 | - name: http_proxy 29 | value: $(NODE_NAME):4140 30 | args: 31 | - "-addr=:7777" 32 | - "-text=Hello" 33 | - "-target=world" 34 | ports: 35 | - name: service 36 | containerPort: 7777 37 | --- 38 | apiVersion: v1 39 | kind: Service 40 | metadata: 41 | name: hello 42 | spec: 43 | selector: 44 | app: hello 45 | clusterIP: None 46 | ports: 47 | - name: http 48 | port: 7777 49 | --- 50 | apiVersion: v1 51 | kind: ReplicationController 52 | metadata: 53 | name: world-v1 54 | spec: 55 | replicas: 3 56 | selector: 57 | app: world-v1 58 | template: 59 | metadata: 60 | labels: 61 | app: world-v1 62 | spec: 63 | dnsPolicy: ClusterFirst 64 | containers: 65 | - name: service 66 | image: buoyantio/helloworld:0.1.4 67 | env: 68 | - name: POD_IP 69 | valueFrom: 70 | fieldRef: 71 | fieldPath: status.podIP 72 | - name: TARGET_WORLD 73 | value: world 74 | args: 75 | - "-addr=:7778" 76 | ports: 77 | - name: service 78 | containerPort: 7778 79 | --- 80 | apiVersion: v1 81 | kind: Service 82 | metadata: 83 | name: world-v1 84 | spec: 85 | selector: 86 | app: world-v1 87 | clusterIP: None 88 | ports: 89 | - name: http 90 | port: 7778 91 | -------------------------------------------------------------------------------- /linkerd/linkerd-viz.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ReplicationController 4 | metadata: 5 | name: linkerd-viz 6 | labels: 7 | name: linkerd-viz 8 | spec: 9 | replicas: 1 10 | selector: 11 | name: linkerd-viz 12 | template: 13 | metadata: 14 | labels: 15 | name: linkerd-viz 16 | spec: 17 | serviceAccount: linkerd 18 | containers: 19 | - name: linkerd-viz 20 | image: buoyantio/linkerd-viz:0.1.5 21 | args: ["k8s"] 22 | imagePullPolicy: Always 23 | env: 24 | - name: PUBLIC_PORT 25 | value: "3000" 26 | - name: STATS_PORT 27 | value: "9191" 28 | - name: SCRAPE_INTERVAL 29 | value: "30s" 30 | ports: 31 | - name: grafana 32 | containerPort: 3000 33 | - name: prometheus 34 | containerPort: 9191 35 | 36 | - name: kubectl 37 | image: buoyantio/kubectl:v1.4.0 38 | args: 39 | - "proxy" 40 | - "-p" 41 | - "8001" 42 | --- 43 | apiVersion: v1 44 | kind: Service 45 | metadata: 46 | name: linkerd-viz 47 | labels: 48 | name: linkerd-viz 49 | spec: 50 | type: LoadBalancer 51 | ports: 52 | - name: grafana 53 | port: 80 54 | targetPort: 3000 55 | - name: prometheus 56 | port: 9191 57 | targetPort: 9191 58 | selector: 59 | name: linkerd-viz 60 | -------------------------------------------------------------------------------- /linkerd/linkerd.yml: -------------------------------------------------------------------------------- 1 | # runs linkerd in a daemonset, in linker-to-linker mode 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: l5d-config 7 | data: 8 | config.yaml: |- 9 | admin: 10 | ip: 0.0.0.0 11 | port: 9990 12 | 13 | namers: 14 | - kind: io.l5d.k8s 15 | experimental: true 16 | host: localhost 17 | port: 8001 18 | 19 | telemetry: 20 | - kind: io.l5d.prometheus 21 | - kind: io.l5d.recentRequests 22 | sampleRate: 0.25 23 | 24 | usage: 25 | orgId: linkerd-examples-daemonset 26 | 27 | routers: 28 | - protocol: http 29 | label: outgoing 30 | dtab: | 31 | /srv => /#/io.l5d.k8s/default/http; 32 | /host => /srv; 33 | /svc => /host; 34 | /host/world => /srv/world-v1; 35 | interpreter: 36 | kind: default 37 | transformers: 38 | - kind: io.l5d.k8s.daemonset 39 | namespace: default 40 | port: incoming 41 | service: l5d 42 | servers: 43 | - port: 4140 44 | ip: 0.0.0.0 45 | service: 46 | responseClassifier: 47 | kind: io.l5d.http.retryableRead5XX 48 | 49 | - protocol: http 50 | label: incoming 51 | dtab: | 52 | /srv => /#/io.l5d.k8s/default/http; 53 | /host => /srv; 54 | /svc => /host; 55 | /host/world => /srv/world-v1; 56 | interpreter: 57 | kind: default 58 | transformers: 59 | - kind: io.l5d.k8s.localnode 60 | servers: 61 | - port: 4141 62 | ip: 0.0.0.0 63 | --- 64 | apiVersion: apps/v1 65 | kind: DaemonSet 66 | metadata: 67 | labels: 68 | app: l5d 69 | name: l5d 70 | spec: 71 | selector: 72 | matchLabels: 73 | app: l5d 74 | template: 75 | metadata: 76 | labels: 77 | app: l5d 78 | spec: 79 | serviceAccount: linkerd 80 | volumes: 81 | - name: l5d-config 82 | configMap: 83 | name: "l5d-config" 84 | containers: 85 | - name: l5d 86 | image: buoyantio/linkerd:1.2.0 87 | env: 88 | - name: POD_IP 89 | valueFrom: 90 | fieldRef: 91 | fieldPath: status.podIP 92 | args: 93 | - /io.buoyant/linkerd/config/config.yaml 94 | ports: 95 | - name: outgoing 96 | containerPort: 4140 97 | hostPort: 4140 98 | - name: incoming 99 | containerPort: 4141 100 | - name: admin 101 | containerPort: 9990 102 | volumeMounts: 103 | - name: "l5d-config" 104 | mountPath: "/io.buoyant/linkerd/config" 105 | readOnly: true 106 | 107 | - name: kubectl 108 | image: buoyantio/kubectl:v1.4.0 109 | args: 110 | - "proxy" 111 | - "-p" 112 | - "8001" 113 | --- 114 | apiVersion: v1 115 | kind: Service 116 | metadata: 117 | name: l5d 118 | spec: 119 | selector: 120 | app: l5d 121 | type: LoadBalancer 122 | ports: 123 | - name: outgoing 124 | port: 4140 125 | - name: incoming 126 | port: 4141 127 | - name: admin 128 | port: 9990 129 | --- 130 | apiVersion: v1 131 | kind: ServiceAccount 132 | metadata: 133 | name: linkerd 134 | namespace: default 135 | --- 136 | kind: ClusterRole 137 | apiVersion: rbac.authorization.k8s.io/v1 138 | metadata: 139 | name: linkerd-endpoints-reader 140 | rules: 141 | - apiGroups: [""] # "" indicates the core API group 142 | resources: ["endpoints", "services", "pods"] # pod access is required for the *-legacy.yml examples in linkerd-examples 143 | verbs: ["get", "watch", "list"] 144 | --- 145 | kind: ClusterRoleBinding 146 | apiVersion: rbac.authorization.k8s.io/v1 147 | metadata: 148 | name: linkerd-role-binding 149 | subjects: 150 | - kind: ServiceAccount 151 | name: linkerd 152 | namespace: default 153 | roleRef: 154 | kind: ClusterRole 155 | name: linkerd-endpoints-reader 156 | apiGroup: rbac.authorization.k8s.io 157 | -------------------------------------------------------------------------------- /linkerd/node-name-test.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: node-name-test 5 | spec: 6 | restartPolicy: Never 7 | containers: 8 | - image: gcr.io/google_containers/busybox 9 | command: [ "sh", "-c" ] 10 | args: 11 | - while true; do 12 | echo -en '\n'; 13 | nslookup $MY_NODE_NAME; 14 | echo -en '\n'; 15 | printenv MY_NODE_NAME MY_POD_NAME MY_POD_NAMESPACE; 16 | printenv MY_POD_IP MY_POD_SERVICE_ACCOUNT; 17 | sleep 60; 18 | done; 19 | name: node-name 20 | env: 21 | - name: MY_NODE_NAME 22 | valueFrom: 23 | fieldRef: 24 | fieldPath: spec.nodeName 25 | - name: MY_POD_NAME 26 | valueFrom: 27 | fieldRef: 28 | fieldPath: metadata.name 29 | - name: MY_POD_NAMESPACE 30 | valueFrom: 31 | fieldRef: 32 | fieldPath: metadata.namespace 33 | - name: MY_POD_IP 34 | valueFrom: 35 | fieldRef: 36 | fieldPath: status.podIP 37 | - name: MY_POD_SERVICE_ACCOUNT 38 | valueFrom: 39 | fieldRef: 40 | fieldPath: spec.serviceAccountName 41 | -------------------------------------------------------------------------------- /logging/README.md: -------------------------------------------------------------------------------- 1 | # Advanced Kubernetes course 2 | ## logging 3 | * These files are modified from https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/fluentd-elasticsearch to add persistent storage and kibana plugins 4 | 5 | ## Steps 6 | * kubectl create -f logging/ 7 | * Run a cluster with sufficient resources (3x t2.medium at least on AWS) 8 | * Make sure to label nodes with beta.kubernetes.io/fluentd-ds-ready=true 9 | -------------------------------------------------------------------------------- /logging/es-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: elasticsearch-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: elasticsearch-logging 8 | kubernetes.io/cluster-service: "true" 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | kubernetes.io/name: "Elasticsearch" 11 | spec: 12 | ports: 13 | - port: 9200 14 | protocol: TCP 15 | targetPort: db 16 | selector: 17 | k8s-app: elasticsearch-logging 18 | -------------------------------------------------------------------------------- /logging/es-statefulset.yaml: -------------------------------------------------------------------------------- 1 | # RBAC authn and authz 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: elasticsearch-logging 6 | namespace: kube-system 7 | labels: 8 | k8s-app: elasticsearch-logging 9 | kubernetes.io/cluster-service: "true" 10 | addonmanager.kubernetes.io/mode: Reconcile 11 | --- 12 | kind: ClusterRole 13 | apiVersion: rbac.authorization.k8s.io/v1 14 | metadata: 15 | name: elasticsearch-logging 16 | labels: 17 | k8s-app: elasticsearch-logging 18 | kubernetes.io/cluster-service: "true" 19 | addonmanager.kubernetes.io/mode: Reconcile 20 | rules: 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - "services" 25 | - "namespaces" 26 | - "endpoints" 27 | verbs: 28 | - "get" 29 | --- 30 | kind: ClusterRoleBinding 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | metadata: 33 | namespace: kube-system 34 | name: elasticsearch-logging 35 | labels: 36 | k8s-app: elasticsearch-logging 37 | kubernetes.io/cluster-service: "true" 38 | addonmanager.kubernetes.io/mode: Reconcile 39 | subjects: 40 | - kind: ServiceAccount 41 | name: elasticsearch-logging 42 | namespace: kube-system 43 | apiGroup: "" 44 | roleRef: 45 | kind: ClusterRole 46 | name: elasticsearch-logging 47 | apiGroup: "" 48 | --- 49 | # Elasticsearch deployment itself 50 | apiVersion: apps/v1 51 | kind: StatefulSet 52 | metadata: 53 | name: elasticsearch-logging 54 | namespace: kube-system 55 | labels: 56 | k8s-app: elasticsearch-logging 57 | version: v5.5.1 58 | kubernetes.io/cluster-service: "true" 59 | addonmanager.kubernetes.io/mode: Reconcile 60 | spec: 61 | serviceName: elasticsearch-logging 62 | replicas: 2 63 | selector: 64 | matchLabels: 65 | k8s-app: elasticsearch-logging 66 | version: v5.5.1 67 | template: 68 | metadata: 69 | labels: 70 | k8s-app: elasticsearch-logging 71 | version: v5.5.1 72 | kubernetes.io/cluster-service: "true" 73 | spec: 74 | serviceAccountName: elasticsearch-logging 75 | containers: 76 | - image: gcr.io/google-containers/elasticsearch:v5.5.1-1 77 | name: elasticsearch-logging 78 | resources: 79 | # need more cpu upon initialization, therefore burstable class 80 | limits: 81 | cpu: 1000m 82 | memory: 2.5Gi 83 | requests: 84 | memory: 2.5Gi 85 | cpu: 100m 86 | ports: 87 | - containerPort: 9200 88 | name: db 89 | protocol: TCP 90 | - containerPort: 9300 91 | name: transport 92 | protocol: TCP 93 | volumeMounts: 94 | - name: es-storage 95 | mountPath: /data 96 | env: 97 | - name: "NAMESPACE" 98 | valueFrom: 99 | fieldRef: 100 | fieldPath: metadata.namespace 101 | - name: "ES_JAVA_OPTS" 102 | value: "-XX:-AssumeMP" 103 | # Elasticsearch requires vm.max_map_count to be at least 262144. 104 | # If your OS already sets up this number to a higher value, feel free 105 | # to remove this init container. 106 | initContainers: 107 | - image: alpine:3.6 108 | command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"] 109 | name: elasticsearch-logging-init 110 | securityContext: 111 | privileged: true 112 | volumeClaimTemplates: 113 | - metadata: 114 | name: es-storage 115 | spec: 116 | accessModes: [ "ReadWriteOnce" ] 117 | storageClassName: standard 118 | resources: 119 | requests: 120 | storage: 8Gi 121 | -------------------------------------------------------------------------------- /logging/fluentd-es-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | data: 4 | containers.input.conf: |- 5 | # This configuration file for Fluentd / td-agent is used 6 | # to watch changes to Docker log files. The kubelet creates symlinks that 7 | # capture the pod name, namespace, container name & Docker container ID 8 | # to the docker logs for pods in the /var/log/containers directory on the host. 9 | # If running this fluentd configuration in a Docker container, the /var/log 10 | # directory should be mounted in the container. 11 | # 12 | # These logs are then submitted to Elasticsearch which assumes the 13 | # installation of the fluent-plugin-elasticsearch & the 14 | # fluent-plugin-kubernetes_metadata_filter plugins. 15 | # See https://github.com/uken/fluent-plugin-elasticsearch & 16 | # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for 17 | # more information about the plugins. 18 | # 19 | # Example 20 | # ======= 21 | # A line in the Docker log file might look like this JSON: 22 | # 23 | # {"log":"2014/09/25 21:15:03 Got request with path wombat\n", 24 | # "stream":"stderr", 25 | # "time":"2014-09-25T21:15:03.499185026Z"} 26 | # 27 | # The time_format specification below makes sure we properly 28 | # parse the time format produced by Docker. This will be 29 | # submitted to Elasticsearch and should appear like: 30 | # $ curl 'http://elasticsearch-logging:9200/_search?pretty' 31 | # ... 32 | # { 33 | # "_index" : "logstash-2014.09.25", 34 | # "_type" : "fluentd", 35 | # "_id" : "VBrbor2QTuGpsQyTCdfzqA", 36 | # "_score" : 1.0, 37 | # "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n", 38 | # "stream":"stderr","tag":"docker.container.all", 39 | # "@timestamp":"2014-09-25T22:45:50+00:00"} 40 | # }, 41 | # ... 42 | # 43 | # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log 44 | # record & add labels to the log record if properly configured. This enables users 45 | # to filter & search logs on any metadata. 46 | # For example a Docker container's logs might be in the directory: 47 | # 48 | # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b 49 | # 50 | # and in the file: 51 | # 52 | # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log 53 | # 54 | # where 997599971ee6... is the Docker ID of the running container. 55 | # The Kubernetes kubelet makes a symbolic link to this file on the host machine 56 | # in the /var/log/containers directory which includes the pod name and the Kubernetes 57 | # container name: 58 | # 59 | # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log 60 | # -> 61 | # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log 62 | # 63 | # The /var/log directory on the host is mapped to the /var/log directory in the container 64 | # running this instance of Fluentd and we end up collecting the file: 65 | # 66 | # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log 67 | # 68 | # This results in the tag: 69 | # 70 | # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log 71 | # 72 | # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name 73 | # which are added to the log message as a kubernetes field object & the Docker container ID 74 | # is also added under the docker field object. 75 | # The final tag is: 76 | # 77 | # kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log 78 | # 79 | # And the final log record look like: 80 | # 81 | # { 82 | # "log":"2014/09/25 21:15:03 Got request with path wombat\n", 83 | # "stream":"stderr", 84 | # "time":"2014-09-25T21:15:03.499185026Z", 85 | # "kubernetes": { 86 | # "namespace": "default", 87 | # "pod_name": "synthetic-logger-0.25lps-pod", 88 | # "container_name": "synth-lgr" 89 | # }, 90 | # "docker": { 91 | # "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b" 92 | # } 93 | # } 94 | # 95 | # This makes it easier for users to search for logs by pod name or by 96 | # the name of the Kubernetes container regardless of how many times the 97 | # Kubernetes pod has been restarted (resulting in a several Docker container IDs). 98 | 99 | # Example: 100 | # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} 101 | 102 | type tail 103 | path /var/log/containers/*.log 104 | pos_file /var/log/es-containers.log.pos 105 | time_format %Y-%m-%dT%H:%M:%S.%NZ 106 | tag kubernetes.* 107 | format json 108 | read_from_head true 109 | 110 | system.input.conf: |- 111 | # Example: 112 | # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 113 | 114 | type tail 115 | format /^(?