├── README.md
├── dashboard-adminuser.yaml
├── FluentD_Demo
├── create_ES.txt
├── hello-k8s-forlog.yml
└── fluentd.yml
├── eks_devops_flow_1_demo
├── kubectl_role_policy.json
├── Dockerfile
├── hello-k8s.yml
├── main.go
├── buildspec.yml
└── IAM-roles-cloudformation.yaml
├── eksctl-create-cluster.yaml
├── ingress_demo_2
├── frontend-service.yaml
├── nginx-deployment.yaml
└── webserver-ingress.yaml
├── dashboard-role-binding.yaml
├── network-policy-1.yaml
├── nginx-deployment.yaml
├── eksctl-create-ng.yaml
├── nginx-deployment-withrolling.yaml
├── cluster-autoscaler-deployment-1.yaml
├── clusterip-service.yaml
├── nodeport-service.yaml
├── loadbalancer-service.yaml
├── security
├── user-rolebinding.yaml
├── configmap-aws-auth.txt
└── configmap-aws-auth-yaml.yaml
├── fargate-hpa.yml
├── hpa-php-apache.yaml
├── 2048_full.yaml
└── Install_grafana.txt
/README.md:
--------------------------------------------------------------------------------
1 | # eks-demos
2 | Readme file to be updated
3 |
--------------------------------------------------------------------------------
/dashboard-adminuser.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: admin-user
5 | namespace: kubernetes-dashboard
--------------------------------------------------------------------------------
/FluentD_Demo/create_ES.txt:
--------------------------------------------------------------------------------
1 | aws es create-elasticsearch-domain --domain-name eks-logs --elasticsearch-version 7.9 --elasticsearch-cluster-config InstanceType=t3.small.elasticsearch,InstanceCount=1 --ebs-options EBSEnabled=true,VolumeType=gp2,VolumeSize=10
2 |
--------------------------------------------------------------------------------
/eks_devops_flow_1_demo/kubectl_role_policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Effect": "Allow",
6 | "Action": "eks:Describe*",
7 | "Resource": "*"
8 | }
9 | ]
10 | }
--------------------------------------------------------------------------------
/eksctl-create-cluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: eksctl.io/v1alpha5
3 | kind: ClusterConfig
4 |
5 | metadata:
6 | name: config-file
7 | region: us-west-2
8 |
9 | nodeGroups:
10 | - name: ng-default
11 | instanceType: t3.micro
12 | desiredCapacity: 2
13 |
--------------------------------------------------------------------------------
/ingress_demo_2/frontend-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: "service-frontend"
5 | namespace: "2048-game"
6 | spec:
7 | ports:
8 | - port: 80
9 | targetPort: 80
10 | protocol: TCP
11 | type: NodePort
12 | selector:
13 | app: "frontend"
--------------------------------------------------------------------------------
/dashboard-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: admin-user
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: cluster-admin
9 | subjects:
10 | - kind: ServiceAccount
11 | name: admin-user
12 | namespace: kubernetes-dashboard
--------------------------------------------------------------------------------
/network-policy-1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: test-network-policy
5 | namespace: namespace-b
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | environment: test
10 | policyTypes:
11 | - Ingress
12 | ingress:
13 | - from:
14 | - namespaceSelector:
15 | matchLabels:
16 | myspace: namespacea
17 |
18 |
19 |
--------------------------------------------------------------------------------
/nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | environment: test
6 | name: test
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | environment: test
12 | template:
13 | metadata:
14 | labels:
15 | environment: test
16 | spec:
17 | containers:
18 | - image: nginx:1.16
19 | name: nginx
--------------------------------------------------------------------------------
/eksctl-create-ng.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: eksctl.io/v1alpha5
3 | kind: ClusterConfig
4 |
5 | metadata:
6 | name: eksctl-test
7 | region: us-west-2
8 |
9 | nodeGroups:
10 | - name: ng1-public
11 | instanceType: t3.micro
12 | desiredCapacity: 2
13 |
14 | managedNodeGroups:
15 | - name: ng2-managed
16 | instanceType: t3.micro
17 | minSize: 1
18 | maxSize: 3
19 | desiredCapacity: 2
--------------------------------------------------------------------------------
/ingress_demo_2/nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: "frontend-deployment"
5 | namespace: "2048-game"
6 | spec:
7 | selector:
8 | matchLabels:
9 | app: "frontend"
10 | replicas: 1
11 | template:
12 | metadata:
13 | labels:
14 | app: "frontend"
15 | spec:
16 | containers:
17 | - image: nginx
18 | imagePullPolicy: Always
19 | name: "frontend"
20 | ports:
21 | - containerPort: 80
--------------------------------------------------------------------------------
/nginx-deployment-withrolling.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | environment: test
6 | name: testdeploy
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | environment: test
12 | minReadySeconds: 10
13 | strategy:
14 | rollingUpdate:
15 | maxSurge: 1
16 | maxUnavailable: 0
17 | type: RollingUpdate
18 | template:
19 | metadata:
20 | labels:
21 | environment: test
22 | spec:
23 | containers:
24 | - image: nginx:1.16
25 | name: nginx
--------------------------------------------------------------------------------
/cluster-autoscaler-deployment-1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: php-apache
5 | spec:
6 | selector:
7 | matchLabels:
8 | run: php-apache
9 | replicas: 20
10 | template:
11 | metadata:
12 | labels:
13 | run: php-apache
14 | spec:
15 | containers:
16 | - name: php-apache
17 | image: k8s.gcr.io/hpa-example
18 | ports:
19 | - containerPort: 80
20 | resources:
21 | requests:
22 | cpu: 500m
23 | memory: 256Mi
24 | limits:
25 | cpu: 1000m
26 | memory: 512Mi
--------------------------------------------------------------------------------
/eks_devops_flow_1_demo/Dockerfile:
--------------------------------------------------------------------------------
1 | # This is a multi-stage build. First we are going to compile and then
2 | # create a small image for runtime.
3 | FROM golang:1.11.1 as builder
4 |
5 | RUN mkdir -p /go/src/github.com/eks-workshop-sample-api-service-go
6 | WORKDIR /go/src/github.com/eks-workshop-sample-api-service-go
7 | RUN useradd -u 10001 app
8 | COPY . .
9 | RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o main .
10 |
11 | FROM scratch
12 |
13 | COPY --from=builder /go/src/github.com/eks-workshop-sample-api-service-go/main /main
14 | COPY --from=builder /etc/passwd /etc/passwd
15 | USER app
16 |
17 | EXPOSE 8080
18 | CMD ["/main"]
--------------------------------------------------------------------------------
/clusterip-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: app-service
5 | spec:
6 | ports:
7 | - port: 80
8 | protocol: TCP
9 | selector:
10 | app: app-server
11 |
12 | --
13 |
14 | apiVersion: apps/v1
15 | kind: Deployment
16 | metadata:
17 | name: app-server
18 | labels:
19 | app: app-server
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: app-server
24 | template:
25 | metadata:
26 | labels:
27 | app: app-server
28 | spec:
29 | containers:
30 | - name: web-server
31 | image: nginx
32 | ports:
33 | - containerPort: 80
34 |
35 |
--------------------------------------------------------------------------------
/ingress_demo_2/webserver-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: webserver-ingress
5 | namespace: 2048-game
6 | annotations:
7 | alb.ingress.kubernetes.io/scheme: internet-facing
8 | spec:
9 | ingressClassName: alb
10 | rules:
11 | - http:
12 | paths:
13 | - path: /frontend
14 | pathType: Prefix
15 | backend:
16 | service:
17 | name: service-frontend
18 | port:
19 | number: 80
20 | - path: /
21 | pathType: Prefix
22 | backend:
23 | service:
24 | name: service-2048
25 | port:
26 | number: 80
27 |
28 |
--------------------------------------------------------------------------------
/nodeport-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: app-service
5 | spec:
6 | type: NodePort
7 | ports:
8 | - nodePort: 32000
9 | port: 80
10 | targetPort: 80
11 | selector:
12 | app: app-server
13 |
14 | ---
15 |
16 | apiVersion: apps/v1
17 | kind: Deployment
18 | metadata:
19 | name: app-server
20 | labels:
21 | app: app-server
22 | spec:
23 | selector:
24 | matchLabels:
25 | app: app-server
26 | template:
27 | metadata:
28 | labels:
29 | app: app-server
30 | spec:
31 | containers:
32 | - name: web-server
33 | image: nginx
34 | ports:
35 | - containerPort: 80
36 |
37 |
--------------------------------------------------------------------------------
/loadbalancer-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: lb-service
5 | labels:
6 | app: lb-service
7 | spec:
8 | type: LoadBalancer
9 | ports:
10 | - port: 80
11 | selector:
12 | app: frontend
13 | ---
14 | apiVersion: apps/v1
15 | kind: Deployment
16 | metadata:
17 | name: frontend-deployment
18 | spec:
19 | replicas: 2
20 | selector:
21 | matchLabels:
22 | app: frontend
23 | minReadySeconds: 30
24 | strategy:
25 | type: RollingUpdate
26 | rollingUpdate:
27 | maxSurge: 1
28 | maxUnavailable: 0
29 | template:
30 | metadata:
31 | labels:
32 | app: frontend
33 | spec:
34 | containers:
35 | - name: frontend-container
36 | image: nginx
37 |
--------------------------------------------------------------------------------
/security/user-rolebinding.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | name: deployment-role
6 | namespace: frontend
7 | rules:
8 | - apiGroups:
9 | - ""
10 | - extensions
11 | - apps
12 | resources:
13 | - deployments
14 | - replicasets
15 | - pods
16 | verbs:
17 | - create
18 | - get
19 | - list
20 | - update
21 | - delete
22 | - watch
23 | - patch
24 | ---
25 | apiVersion: rbac.authorization.k8s.io/v1
26 | kind: RoleBinding
27 | metadata:
28 | name: deployment-rolebinding
29 | namespace: frontend
30 | roleRef:
31 | apiGroup: ""
32 | kind: Role
33 | name: deployment-role
34 | subjects:
35 | - kind: User
36 | name: developerbob
37 | apiGroup: ""
38 |
39 |
--------------------------------------------------------------------------------
/fargate-hpa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | environment: test
6 | name: test
7 | namespace: frontendapp
8 | spec:
9 | replicas: 3
10 | selector:
11 | matchLabels:
12 | environment: test
13 | template:
14 | metadata:
15 | labels:
16 | environment: test
17 | spec:
18 | containers:
19 | - image: nginx:1.16
20 | name: nginx
21 |
22 | ---
23 |
24 | apiVersion: autoscaling/v2beta1
25 | kind: HorizontalPodAutoscaler
26 | metadata:
27 | name: test
28 | namespace: frontendapp
29 | spec:
30 | scaleTargetRef:
31 | apiVersion: apps/v1
32 | kind: Deployment
33 | name: test
34 | minReplicas: 3
35 | maxReplicas: 10
36 | metrics:
37 | - type: Resource
38 | resource:
39 | name: cpu
40 | targetAverageUtilization: 50
41 |
--------------------------------------------------------------------------------
/eks_devops_flow_1_demo/hello-k8s.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: hello-k8s
5 | spec:
6 | type: LoadBalancer
7 | ports:
8 | - port: 80
9 | targetPort: 8080
10 | selector:
11 | app: hello-k8s
12 | ---
13 | apiVersion: apps/v1
14 | kind: Deployment
15 | metadata:
16 | name: hello-k8s
17 | spec:
18 | replicas: 3
19 | strategy:
20 | type: RollingUpdate
21 | rollingUpdate:
22 | maxUnavailable: 2
23 | maxSurge: 2
24 | selector:
25 | matchLabels:
26 | app: hello-k8s
27 | template:
28 | metadata:
29 | labels:
30 | app: hello-k8s
31 | spec:
32 | containers:
33 | - name: hello-k8s
34 | image: CONTAINER_IMAGE
35 | securityContext:
36 | privileged: false
37 | readOnlyRootFilesystem: true
38 | allowPrivilegeEscalation: false
39 | ports:
40 | - containerPort: 8080
--------------------------------------------------------------------------------
/FluentD_Demo/hello-k8s-forlog.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: hello-k8s
5 | spec:
6 | type: LoadBalancer
7 | ports:
8 | - port: 80
9 | targetPort: 8080
10 | selector:
11 | app: hello-k8s
12 | ---
13 | apiVersion: apps/v1
14 | kind: Deployment
15 | metadata:
16 | name: hello-k8s
17 | spec:
18 | replicas: 3
19 | strategy:
20 | type: RollingUpdate
21 | rollingUpdate:
22 | maxUnavailable: 2
23 | maxSurge: 2
24 | selector:
25 | matchLabels:
26 | app: hello-k8s
27 | template:
28 | metadata:
29 | labels:
30 | app: hello-k8s
31 | spec:
32 | containers:
33 | - name: hello-k8s
34 | image: raj80dockerid/udemydemo:latest
35 | securityContext:
36 | privileged: false
37 | readOnlyRootFilesystem: true
38 | allowPrivilegeEscalation: false
39 | ports:
40 | - containerPort: 8080
41 |
--------------------------------------------------------------------------------
/hpa-php-apache.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: php-apache
5 | spec:
6 | selector:
7 | matchLabels:
8 | run: php-apache
9 | replicas: 1
10 | template:
11 | metadata:
12 | labels:
13 | run: php-apache
14 | spec:
15 | containers:
16 | - name: php-apache
17 | image: k8s.gcr.io/hpa-example
18 | ports:
19 | - containerPort: 80
20 | resources:
21 | requests:
22 | cpu: 500m
23 | limits:
24 | cpu: 1000m
25 |
26 | ---
27 |
28 | apiVersion: v1
29 | kind: Service
30 | metadata:
31 | name: php-apache
32 | labels:
33 | run: php-apache
34 | spec:
35 | ports:
36 | - port: 80
37 | selector:
38 | run: php-apache
39 |
40 | ---
41 |
42 | apiVersion: autoscaling/v1
43 | kind: HorizontalPodAutoscaler
44 | metadata:
45 | name: php-apache
46 | namespace: default
47 | spec:
48 | scaleTargetRef:
49 | apiVersion: apps/v1
50 | kind: Deployment
51 | name: php-apache
52 | minReplicas: 1
53 | maxReplicas: 10
54 | targetCPUUtilizationPercentage: 50
55 |
--------------------------------------------------------------------------------
/security/configmap-aws-auth.txt:
--------------------------------------------------------------------------------
1 | # Please edit the object below. Lines beginning with a '#' will be ignored,
2 | # and an empty file will abort the edit. If an error occurs while saving this file will be
3 | # reopened with the relevant failures.
4 | #
5 | apiVersion: v1
6 | data:
7 | mapRoles: |
8 | - groups:
9 | - system:bootstrappers
10 | - system:nodes
11 | rolearn: arn:aws:iam::719217631821:role/eksctl-eks-access-test-nodegroup-NodeInstanceRole-1U4X1KCH9BEMJ
12 | username: system:node:{{EC2PrivateDNSName}}
13 | mapUsers: |
14 | - userarn: arn:aws:iam::719217631821:user/developertina
15 | username: developertina
16 | groups:
17 | - system:masters
18 | - userarn: arn:aws:iam::719217631821:user/developerbob
19 | username: developertina
20 | groups:
21 | - system:masters
22 | kind: ConfigMap
23 | metadata:
24 | creationTimestamp: "2020-06-01T18:56:49Z"
25 | name: aws-auth
26 | namespace: kube-system
27 | resourceVersion: "23737"
28 | selfLink: /api/v1/namespaces/kube-system/configmaps/aws-auth
29 | uid: 9b0d3aef-efe6-4325-bb59-5c64ea5bff24
30 |
--------------------------------------------------------------------------------
/security/configmap-aws-auth-yaml.yaml:
--------------------------------------------------------------------------------
1 | # Please edit the object below. Lines beginning with a '#' will be ignored,
2 | # and an empty file will abort the edit. If an error occurs while saving this file will be
3 | # reopened with the relevant failures.
4 | #
5 | apiVersion: v1
6 | data:
7 | mapRoles: |
8 | - groups:
9 | - system:bootstrappers
10 | - system:nodes
11 | rolearn: arn:aws:iam::719217631821:role/eksctl-eks-access-test-nodegroup-NodeInstanceRole-1U4X1KCH9BEMJ
12 | username: system:node:{{EC2PrivateDNSName}}
13 | mapUsers: |
14 | - userarn: arn:aws:iam::719217631821:user/developertina
15 | username: developertina
16 | groups:
17 | - system:masters
18 | - userarn: arn:aws:iam::719217631821:user/developerbob
19 | username: developerbob
20 | groups:
21 | - deployment-role
22 | kind: ConfigMap
23 | metadata:
24 | creationTimestamp: "2020-06-01T18:56:49Z"
25 | name: aws-auth
26 | namespace: kube-system
27 | resourceVersion: "23737"
28 | selfLink: /api/v1/namespaces/kube-system/configmaps/aws-auth
29 | uid: 9b0d3aef-efe6-4325-bb59-5c64ea5bff24
30 |
--------------------------------------------------------------------------------
/eks_devops_flow_1_demo/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "io"
7 | "net/http"
8 | "os"
9 | "sort"
10 | "strings"
11 | )
12 |
13 | func main() {
14 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
15 |
16 | f := fib()
17 |
18 | res := &response{Message: "Hello World"}
19 |
20 | for _, e := range os.Environ() {
21 | pair := strings.Split(e, "=")
22 | res.EnvVars = append(res.EnvVars, pair[0]+"="+pair[1])
23 | }
24 | sort.Strings(res.EnvVars)
25 |
26 | for i := 1; i <= 90; i++ {
27 | res.Fib = append(res.Fib, f())
28 | }
29 |
30 | // Beautify the JSON output
31 | out, _ := json.MarshalIndent(res, "", " ")
32 |
33 | // Normally this would be application/json, but we don't want to prompt downloads
34 | w.Header().Set("Content-Type", "text/plain")
35 |
36 | io.WriteString(w, string(out))
37 |
38 | fmt.Println("Hello world - the log message")
39 | })
40 | http.ListenAndServe(":8080", nil)
41 | }
42 |
43 | type response struct {
44 | Message string `json:"message"`
45 | EnvVars []string `json:"env"`
46 | Fib []int `json:"fib"`
47 | }
48 |
49 | func fib() func() int {
50 | a, b := 0, 1
51 | return func() int {
52 | a, b = b, a+b
53 | return a
54 | }
55 | }
--------------------------------------------------------------------------------
/2048_full.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: 2048-game
6 | ---
7 | apiVersion: apps/v1
8 | kind: Deployment
9 | metadata:
10 | namespace: 2048-game
11 | name: deployment-2048
12 | spec:
13 | selector:
14 | matchLabels:
15 | app.kubernetes.io/name: app-2048
16 | replicas: 5
17 | template:
18 | metadata:
19 | labels:
20 | app.kubernetes.io/name: app-2048
21 | spec:
22 | containers:
23 | - image: public.ecr.aws/l6m2t8p7/docker-2048:latest
24 | imagePullPolicy: Always
25 | name: app-2048
26 | ports:
27 | - containerPort: 80
28 | ---
29 | apiVersion: v1
30 | kind: Service
31 | metadata:
32 | namespace: 2048-game
33 | name: service-2048
34 | spec:
35 | ports:
36 | - port: 80
37 | targetPort: 80
38 | protocol: TCP
39 | type: NodePort
40 | selector:
41 | app.kubernetes.io/name: app-2048
42 | ---
43 | apiVersion: networking.k8s.io/v1
44 | kind: Ingress
45 | metadata:
46 | namespace: 2048-game
47 | name: ingress-2048
48 | annotations:
49 | alb.ingress.kubernetes.io/scheme: internet-facing
50 | alb.ingress.kubernetes.io/target-type: ip
51 | spec:
52 | ingressClassName: alb
53 | rules:
54 | - http:
55 | paths:
56 | - path: /
57 | pathType: Prefix
58 | backend:
59 | service:
60 | name: service-2048
61 | port:
62 | number: 80
63 |
--------------------------------------------------------------------------------
/Install_grafana.txt:
--------------------------------------------------------------------------------
1 | 1. Copy paste the following in a YAML file, name it grafana.yaml
2 | datasources:
3 | datasources.yaml:
4 | apiVersion: 1
5 | datasources:
6 | - name: Prometheus
7 | type: prometheus
8 | url: http://prometheus-server.prometheus.svc.cluster.local
9 | access: proxy
10 | isDefault: true
11 |
12 | 2. Grab Grafana Helm charts
13 | helm repo add grafana https://grafana.github.io/helm-charts
14 |
15 | 3. Install Grafana
16 | kubectl create namespace grafana
17 |
18 | helm install grafana grafana/grafana \
19 | --namespace grafana \
20 | --set persistence.storageClassName="gp2" \
21 | --set persistence.enabled=true \
22 | --set adminPassword='EKS!sAWSome' \
23 | --values grafana.yaml \
24 | --set service.type=LoadBalancer
25 |
26 | 4. Check if Grafana is deployed properly
27 | kubectl get all -n grafana
28 |
29 | 5. Get Grafana ELB url
30 | export ELB=$(kubectl get svc -n grafana grafana -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
31 |
32 | echo "http://$ELB"
33 |
34 | 6. When logging in, use username "admin" and get password by running the following:
35 | kubectl get secret --namespace grafana grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
36 |
37 |
38 | 7. Grafana Dashboards for K8s:
39 | https://grafana.com/grafana/dashboards?dataSource=prometheus&direction=desc&orderBy=reviewsCount
40 |
41 | 8. Uninstall Prometheus and Grafana
42 | helm uninstall prometheus --namespace prometheus
43 | helm uninstall grafana --namespace grafana
44 |
--------------------------------------------------------------------------------
/eks_devops_flow_1_demo/buildspec.yml:
--------------------------------------------------------------------------------
1 | ---
2 | version: 0.2
3 | phases:
4 | install:
5 | commands:
6 | - curl -sS -o aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-07-26/bin/linux/amd64/aws-iam-authenticator
7 | - curl -sS -o kubectl https://amazon-eks.s3-us-west-2.amazonaws.com/1.14.6/2019-08-22/bin/linux/amd64/kubectl
8 | - chmod +x ./kubectl ./aws-iam-authenticator
9 | - export PATH=$PWD/:$PATH
10 | - apt-get update && apt-get -y install jq python3-pip python3-dev && pip3 install --upgrade awscli
11 | pre_build:
12 | commands:
13 | - TAG="$REPOSITORY_NAME.$REPOSITORY_BRANCH.$ENVIRONMENT_NAME.$(date +%Y-%m-%d.%H.%M.%S).$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | head -c 8)"
14 | - echo $TAG
15 | - echo $REPOSITORY_URI
16 | - echo $REPOSITORY_URI:$TAG
17 | - sed -i 's@CONTAINER_IMAGE@'"$REPOSITORY_URI:$TAG"'@' hello-k8s.yml
18 | - $(aws ecr get-login --no-include-email)
19 | - export KUBECONFIG=$HOME/.kube/config
20 | build:
21 | commands:
22 | - docker build -t $REPOSITORY_URI:$TAG .
23 |
24 | post_build:
25 | commands:
26 | - docker push $REPOSITORY_URI:$TAG
27 | - CREDENTIALS=$(aws sts assume-role --role-arn $EKS_KUBECTL_ROLE_ARN --role-session-name codebuild-kubectl --duration-seconds 900)
28 | - export AWS_ACCESS_KEY_ID="$(echo ${CREDENTIALS} | jq -r '.Credentials.AccessKeyId')"
29 | - export AWS_SECRET_ACCESS_KEY="$(echo ${CREDENTIALS} | jq -r '.Credentials.SecretAccessKey')"
30 | - export AWS_SESSION_TOKEN="$(echo ${CREDENTIALS} | jq -r '.Credentials.SessionToken')"
31 | - export AWS_EXPIRATION=$(echo ${CREDENTIALS} | jq -r '.Credentials.Expiration')
32 | - aws eks update-kubeconfig --name $EKS_CLUSTER_NAME
33 | - kubectl apply -f hello-k8s.yml
34 | - printf '[{"name":"hello-k8s","imageUri":"%s"}]' $REPOSITORY_URI:$TAG > build.json
35 | artifacts:
36 | files: build.json
--------------------------------------------------------------------------------
/eks_devops_flow_1_demo/IAM-roles-cloudformation.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: 2010-09-09
3 |
4 |
5 | Parameters:
6 |
7 | KubectlRoleName:
8 | Type: String
9 | Default: EksWorkshopCodeBuildKubectlRole
10 | Description: IAM role used by kubectl to interact with EKS cluster
11 | MinLength: 3
12 | MaxLength: 100
13 | ConstraintDescription: You must enter a kubectl IAM role
14 |
15 |
16 | Resources:
17 |
18 |
19 | CodePipelineServiceRole:
20 | Type: AWS::IAM::Role
21 | Properties:
22 | Path: /
23 | AssumeRolePolicyDocument:
24 | Version: 2012-10-17
25 | Statement:
26 | - Effect: Allow
27 | Principal:
28 | Service: codepipeline.amazonaws.com
29 | Action: sts:AssumeRole
30 | Policies:
31 | - PolicyName: codepipeline-access
32 | PolicyDocument:
33 | Version: 2012-10-17
34 | Statement:
35 | - Resource: "*"
36 | Effect: Allow
37 | Action:
38 | - codebuild:StartBuild
39 | - codebuild:BatchGetBuilds
40 | - codecommit:GetBranch
41 | - codecommit:GetCommit
42 | - codecommit:UploadArchive
43 | - codecommit:GetUploadArchiveStatus
44 | - codecommit:CancelUploadArchive
45 | - iam:PassRole
46 | - Resource: "*"
47 | Effect: Allow
48 | Action:
49 | - s3:PutObject
50 | - s3:GetObject
51 | - s3:GetObjectVersion
52 | - s3:GetBucketVersioning
53 |
54 |
55 | CodeBuildServiceRole:
56 | Type: AWS::IAM::Role
57 | Properties:
58 | Path: /
59 | AssumeRolePolicyDocument:
60 | Version: 2012-10-17
61 | Statement:
62 | - Effect: Allow
63 | Principal:
64 | Service: codebuild.amazonaws.com
65 | Action: sts:AssumeRole
66 | Policies:
67 | - PolicyName: root
68 | PolicyDocument:
69 | Version: 2012-10-17
70 | Statement:
71 | - Resource: !Sub arn:aws:iam::${AWS::AccountId}:role/${KubectlRoleName}
72 | Effect: Allow
73 | Action:
74 | - sts:AssumeRole
75 | - Resource: '*'
76 | Effect: Allow
77 | Action:
78 | - eks:Describe*
79 | - Resource: '*'
80 | Effect: Allow
81 | Action:
82 | - logs:CreateLogGroup
83 | - logs:CreateLogStream
84 | - logs:PutLogEvents
85 | - Resource: '*'
86 | Effect: Allow
87 | Action:
88 | - ecr:GetAuthorizationToken
89 | - Resource: '*'
90 | Effect: Allow
91 | Action:
92 | - ec2:CreateNetworkInterface
93 | - ec2:DescribeDhcpOptions
94 | - ec2:DescribeNetworkInterfaces
95 | - ec2:DeleteNetworkInterface
96 | - ec2:DescribeSubnets
97 | - ec2:DescribeSecurityGroups
98 | - ec2:DescribeVpcs
99 | - ec2:CreateNetworkInterfacePermission
100 | - Resource: "*"
101 | Effect: Allow
102 | Action:
103 | - s3:GetObject
104 | - s3:PutObject
105 | - s3:GetObjectVersion
106 | - Resource: '*'
107 | Effect: Allow
108 | Action:
109 | - ecr:GetDownloadUrlForLayer
110 | - ecr:BatchGetImage
111 | - ecr:BatchCheckLayerAvailability
112 | - ecr:PutImage
113 | - ecr:InitiateLayerUpload
114 | - ecr:UploadLayerPart
115 | - ecr:CompleteLayerUpload
116 |
--------------------------------------------------------------------------------
/FluentD_Demo/fluentd.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: fluentd
5 | namespace: kube-system
6 | ---
7 | apiVersion: rbac.authorization.k8s.io/v1beta1
8 | kind: ClusterRole
9 | metadata:
10 | name: fluentd
11 | namespace: kube-system
12 | rules:
13 | - apiGroups: [""]
14 | resources:
15 | - namespaces
16 | - pods
17 | verbs: ["get", "list", "watch"]
18 | ---
19 | apiVersion: rbac.authorization.k8s.io/v1beta1
20 | kind: ClusterRoleBinding
21 | metadata:
22 | name: fluentd
23 | namespace: kube-system
24 | roleRef:
25 | apiGroup: rbac.authorization.k8s.io
26 | kind: ClusterRole
27 | name: fluentd
28 | subjects:
29 | - kind: ServiceAccount
30 | name: fluentd
31 | namespace: kube-system
32 | ---
33 | apiVersion: v1
34 | kind: ConfigMap
35 | metadata:
36 | name: fluentd-config
37 | namespace: kube-system
38 | labels:
39 | k8s-app: fluentd-cloudwatch
40 | data:
41 | fluent.conf: |
42 | @include containers.conf
43 | @include systemd.conf
44 |
45 |
46 | @type null
47 |
48 | containers.conf: |
49 |
50 | @type tail
51 | @id in_tail_container_logs
52 | @label @containers
53 | path /var/log/containers/*.log
54 | pos_file /var/log/fluentd-containers.log.pos
55 | tag *
56 | read_from_head true
57 |
58 | @type json
59 | time_format %Y-%m-%dT%H:%M:%S.%NZ
60 |
61 |
62 |
63 |
93 | systemd.conf: |
94 |
95 | @type systemd
96 | @id in_systemd_kubelet
97 | @label @systemd
98 | filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
99 |
100 | field_map {"MESSAGE": "message", "_HOSTNAME": "hostname", "_SYSTEMD_UNIT": "systemd_unit"}
101 | field_map_strict true
102 |
103 | path /run/log/journal
104 | pos_file /var/log/fluentd-journald-kubelet.pos
105 | read_from_head true
106 | tag kubelet.service
107 |
108 |
109 |
110 | @type systemd
111 | @id in_systemd_kubeproxy
112 | @label @systemd
113 | filters [{ "_SYSTEMD_UNIT": "kubeproxy.service" }]
114 |
115 | field_map {"MESSAGE": "message", "_HOSTNAME": "hostname", "_SYSTEMD_UNIT": "systemd_unit"}
116 | field_map_strict true
117 |
118 | path /run/log/journal
119 | pos_file /var/log/fluentd-journald-kubeproxy.pos
120 | read_from_head true
121 | tag kubeproxy.service
122 |
123 |
124 |
125 | @type systemd
126 | @id in_systemd_docker
127 | @label @systemd
128 | filters [{ "_SYSTEMD_UNIT": "docker.service" }]
129 |
130 | field_map {"MESSAGE": "message", "_HOSTNAME": "hostname", "_SYSTEMD_UNIT": "systemd_unit"}
131 | field_map_strict true
132 |
133 | path /run/log/journal
134 | pos_file /var/log/fluentd-journald-docker.pos
135 | read_from_head true
136 | tag docker.service
137 |
138 |
139 |
164 | ---
165 | apiVersion: apps/v1
166 | kind: DaemonSet
167 | metadata:
168 | name: fluentd-cloudwatch
169 | namespace: kube-system
170 | labels:
171 | k8s-app: fluentd-cloudwatch
172 | spec:
173 | selector:
174 | matchLabels:
175 | k8s-app: fluentd-cloudwatch
176 | template:
177 | metadata:
178 | labels:
179 | k8s-app: fluentd-cloudwatch
180 | spec:
181 | serviceAccountName: fluentd
182 | terminationGracePeriodSeconds: 30
183 | # Because the image's entrypoint requires to write on /fluentd/etc but we mount configmap there which is read-only,
184 | # this initContainers workaround or other is needed.
185 | # See https://github.com/fluent/fluentd-kubernetes-daemonset/issues/90
186 | initContainers:
187 | - name: copy-fluentd-config
188 | image: busybox
189 | command: ['sh', '-c', 'cp /config-volume/..data/* /fluentd/etc']
190 | volumeMounts:
191 | - name: config-volume
192 | mountPath: /config-volume
193 | - name: fluentdconf
194 | mountPath: /fluentd/etc
195 | containers:
196 | - name: fluentd-cloudwatch
197 | image: fluent/fluentd-kubernetes-daemonset:v1.1-debian-cloudwatch
198 | env:
199 | - name: REGION
200 | value: us-west-2
201 | - name: CLUSTER_NAME
202 | value: eks-loggingtest
203 | resources:
204 | limits:
205 | memory: 200Mi
206 | requests:
207 | cpu: 100m
208 | memory: 200Mi
209 | volumeMounts:
210 | - name: config-volume
211 | mountPath: /config-volume
212 | - name: fluentdconf
213 | mountPath: /fluentd/etc
214 | - name: varlog
215 | mountPath: /var/log
216 | - name: varlibdockercontainers
217 | mountPath: /var/lib/docker/containers
218 | readOnly: true
219 | - name: runlogjournal
220 | mountPath: /run/log/journal
221 | readOnly: true
222 | volumes:
223 | - name: config-volume
224 | configMap:
225 | name: fluentd-config
226 | - name: fluentdconf
227 | emptyDir: {}
228 | - name: varlog
229 | hostPath:
230 | path: /var/log
231 | - name: varlibdockercontainers
232 | hostPath:
233 | path: /var/lib/docker/containers
234 | - name: runlogjournal
235 | hostPath:
236 | path: /run/log/journal
--------------------------------------------------------------------------------