├── OpenVPN
├── README.md
├── playbook.yml
└── roles
│ ├── openVPNClient
│ ├── tasks
│ │ └── main.yml
│ └── vars
│ │ └── main.yml
│ └── openVPNServer
│ └── tasks
│ └── main.yml
├── Readme.md
├── charts
├── aws-alb-ingress-controller
│ ├── Chart.yaml
│ ├── templates
│ │ ├── _helpers.tpl
│ │ ├── clusterrole.yaml
│ │ ├── clusterrolebinding.yaml
│ │ ├── deployment.yaml
│ │ └── serviceaccount.yaml
│ └── values.yaml
└── deployment
│ ├── Chart.yaml
│ ├── readme.md
│ ├── templates
│ ├── deployment.yaml
│ ├── ingress.yaml
│ └── service.yaml
│ └── values.yaml
├── deployment.tf
├── ec2.tf
├── eks
├── eks.tf
├── iam-policy.json
├── ingress.tf
├── namespace.tf
├── output.tf
├── provider.tf
├── rbac.tf
├── security_grp.tf
├── terraform.tf
├── terraform.tfvars
├── variable.tf
└── vpc.tf
/OpenVPN/README.md:
--------------------------------------------------------------------------------
1 | # OpenVPN server and client setup using Ansible
2 |
3 | This is an Ansible project which is used to set up OpenVPN server on ubuntu instance.
4 | This project has two ansible roles:
5 |
6 | 1. openVPNServer role: To create OpenVPN server setup
7 | 2. openVPNClient role: To create OpenVPN client ovpn file
8 |
9 |
10 | You can change variables for openVPNClient role so you can create ovpn files with different users.
11 |
12 | playbook.yml is main ansible file which is executed by ansible command.
13 |
14 | Ansible command: **ansible-playbook playbook.yml**
15 |
16 |
--------------------------------------------------------------------------------
/OpenVPN/playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install python2 on all instances
3 | hosts: "*"
4 | gather_facts: false
5 | tasks:
6 | - name: run apt-get update and install python
7 | raw: "{{ item }}"
8 | loop:
9 | - sudo apt-get update
10 | - sudo apt-get -y install python
11 | become: true
12 | ignore_errors: true
13 |
14 | - hosts: openVPN
15 | become: yes
16 | become_user: root
17 | become_method: sudo
18 | roles:
19 | - openVPNServer
20 | - openVPNClient
--------------------------------------------------------------------------------
/OpenVPN/roles/openVPNClient/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: "Generate client certificate key"
2 | become: yes
3 | shell: source vars; ./build-key --batch {{client_name}}
4 | args:
5 | chdir: "{{ ansible_env.HOME }}/openvpn-ca/"
6 | executable: /bin/bash
7 |
8 | - name: "Create client certificate configs dir"
9 | become: yes
10 | file:
11 | owner: "{{ ansible_env.USER }}"
12 | group: "{{ ansible_env.USER }}"
13 | path: "{{ ansible_env.HOME }}/openvpn-ca/{{client_name}}"
14 | state: directory
15 | mode: 0700
16 |
17 | - name: "Copy client sample configs from remote host itself"
18 | become: yes
19 | copy:
20 | remote_src: yes
21 | src: /usr/share/doc/openvpn/examples/sample-config-files/client.conf
22 | dest: "{{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn"
23 |
24 | - name: Set the server ip and port
25 | lineinfile:
26 | dest: "{{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn"
27 | regexp: "^{{ item.regex | regex_escape() }}"
28 | line: "{{ item.value }}"
29 | with_items:
30 | - { regex: 'remote my-server-1 1194', value: 'remote {{ groups["openVPN"][0] }} 1194' }
31 | - { regex: ';user nobody', value: 'user nobody' }
32 | - { regex: ';group nogroup', value: 'group nogroup' }
33 | - { regex: 'ca ca.crt', value: '#ca ca.crt' }
34 | - { regex: 'cert client.crt', value: '#cert client.crt' }
35 | - { regex: 'key client.key', value: '#key client.key' }
36 | - { regex: 'tls-auth ta.key 1', value: '#tls-auth ta.key 1' }
37 |
38 | - name: "Create client ovpn file"
39 | become: yes
40 | shell: "{{ item }}"
41 | with_items:
42 | - echo -e '' >> {{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn
43 | - cat {{ ansible_env.HOME }}/openvpn-ca/keys/ca.crt >> {{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn
44 | - echo -e '\n' >> {{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn
45 | - cat {{ ansible_env.HOME }}/openvpn-ca/keys/{{client_name}}.crt >> {{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn
46 | - echo -e '\n' >> {{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn
47 | - cat {{ ansible_env.HOME }}/openvpn-ca/keys/{{client_name}}.key >> {{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn
48 | - echo -e '\n' >> {{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn
49 | - cat {{ ansible_env.HOME }}/openvpn-ca/keys/ta.key >> {{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn
50 | - echo -e '' >> {{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn
51 | - echo -e 'key-direction 1' >> {{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{client_name}}.ovpn
52 | args:
53 | chdir: "{{ ansible_env.HOME }}/openvpn-ca/"
54 | executable: /bin/bash
55 |
56 | - name: Fetch client configurations
57 | fetch:
58 | src: "{{ ansible_env.HOME }}/openvpn-ca/{{client_name}}/{{ item|basename }}"
59 | dest: "{{ destination_key }}/"
60 | flat: yes
61 | with_items:
62 | - "{{client_name}}.ovpn"
--------------------------------------------------------------------------------
/OpenVPN/roles/openVPNClient/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | client_name: "openvpn"
3 | destination_key: "{{ playbook_dir }}"
--------------------------------------------------------------------------------
/OpenVPN/roles/openVPNServer/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Update apt packages
2 | become: true
3 | apt:
4 | upgrade: yes
5 |
6 | - name: Install openvpn
7 | package:
8 | name: "{{ item }}"
9 | state: present
10 | with_items:
11 | - openvpn
12 | - easy-rsa
13 |
14 | - name: "Remove CA directory"
15 | become: yes
16 | file:
17 | state: absent
18 | path: "{{ ansible_env.HOME }}/openvpn-ca/"
19 |
20 | - name: "Create CA dir"
21 | become: yes
22 | command: make-cadir {{ ansible_env.HOME }}/openvpn-ca
23 |
24 | - name: Customize CA variable configuration
25 | lineinfile:
26 | dest: "{{ ansible_env.HOME }}/openvpn-ca/vars"
27 | regexp: "^{{ item.property | regex_escape() }}="
28 | line: "{{ item.property }}={{ item.value }}"
29 | with_items:
30 | - { property: 'export KEY_NAME', value: '"server"' }
31 | - { property: 'export KEY_COUNTRY', value: '"UK"' }
32 | - { property: 'export KEY_PROVINCE', value: '"WS"' }
33 | - { property: 'export KEY_CITY', value: '"SW"' }
34 | - { property: 'export KEY_ORG', value: '"TK"' }
35 | - { property: 'export KEY_EMAIL', value: '"ak@tk.com"' }
36 | - { property: 'export KEY_OU', value: '"TS"' }
37 | - { property: 'export KEY_CONFIG', value: '{{ ansible_env.HOME }}/openvpn-ca/openssl-1.0.0.cnf' }
38 | - { property: 'export KEY_DIR', value: '{{ ansible_env.HOME }}/openvpn-ca/keys' }
39 |
40 | - name: "Build the certificate authority"
41 | become: yes
42 | shell: >
43 | source vars;
44 | ./clean-all;
45 | yes "" | ./build-ca;
46 | args:
47 | chdir: "{{ ansible_env.HOME }}/openvpn-ca/"
48 | executable: /bin/bash
49 |
50 | - name: "Build server certificate"
51 | become: yes
52 | shell: >
53 | source vars;
54 | ./build-key-server --batch server;
55 | args:
56 | chdir: "{{ ansible_env.HOME }}/openvpn-ca/"
57 | executable: /bin/bash
58 |
59 | - name: "Build Diffie-Hellman parameters and key generation"
60 | become: yes
61 | shell: >
62 | source vars;
63 | yes "" | ./build-dh;
64 | openvpn --genkey --secret keys/ta.key;
65 | args:
66 | chdir: "{{ ansible_env.HOME }}/openvpn-ca/"
67 | executable: /bin/bash
68 |
69 | - name: "Copy key and certificates to /etc/openvpn"
70 | become: yes
71 | copy:
72 | remote_src: yes
73 | src: "{{ ansible_env.HOME }}/openvpn-ca/keys/{{ item }}"
74 | dest: "/etc/openvpn/"
75 | with_items:
76 | - "ca.crt"
77 | - "server.crt"
78 | - "server.key"
79 | - "ta.key"
80 | - "dh2048.pem"
81 |
82 | - name: "Generate server.conf from sample config"
83 | become: yes
84 | shell: >
85 | gzip -d -c /usr/share/doc/openvpn/examples/sample-config-files/server.conf.gz | sudo tee /etc/openvpn/server.conf > /dev/null
86 |
87 | - name: Adjust OpenVPN server configuration
88 | lineinfile:
89 | dest: "/etc/openvpn/server.conf"
90 | regexp: "^{{ item.regex | regex_escape() }}"
91 | line: "{{ item.value }}"
92 | with_items:
93 | - { regex: ';user nobody', value: 'user nobody' }
94 | - { regex: ';group nogroup', value: 'group nogroup' }
95 | - { regex: ';push "redirect-gateway def1 bypass-dhcp"', value: 'push "redirect-gateway def1 bypass-dhcp"' }
96 | - { regex: ';push "route 192.168.10.0 255.255.255.0"', value: 'push " route 10.0.0.0 255.255.255.0"' }
97 | - { regex: ';push "dhcp-option DNS 208.67.222.222"', value: 'push "dhcp-option DNS 10.0.0.2"' }
98 | - { regex: 'cert server.crt', value: 'cert server.crt' }
99 | - { regex: 'key server.key', value: 'key server.key' }
100 |
101 | - name: Configuration IP forwarding
102 | become: true
103 | sysctl:
104 | name: net.ipv4.ip_forward
105 | value: 1
106 | state: present
107 |
108 | - name: Add ufw before content
109 | become: true
110 | blockinfile:
111 | path: /etc/ufw/before.rules
112 | insertbefore: BOF
113 | content: |
114 | # NAT table rules
115 | *nat
116 | :POSTROUTING ACCEPT [0:0]
117 | -A POSTROUTING -s 10.8.0.0/8 -o eth0 -j MASQUERADE
118 | COMMIT
119 |
120 | - name: Customize ufw forwarding policy
121 | become: true
122 | lineinfile:
123 | line: "DEFAULT_FORWARD_POLICY=\"ACCEPT\""
124 | path: "/etc/default/ufw"
125 | regexp: "^DEFAULT_FORWARD_POLICY=\"DROP\""
126 |
127 | - name: Open ufw ports for openvpn and ssh
128 | become: true
129 | shell: ufw allow openvpn && ufw allow OpenSSH
130 |
131 | - name: Enable ufw
132 | become: true
133 | shell: ufw --force enable
134 |
135 | - name: Start openvpn systemd service
136 | become: true
137 | systemd:
138 | name: openvpn@server
139 | state: started
140 | daemon_reload: yes
141 | enabled: yes
142 |
--------------------------------------------------------------------------------
/Readme.md:
--------------------------------------------------------------------------------
1 | # Deploy AWS EKS with open VPN
2 |
3 | ## Getting Started
4 |
5 | **Clone the Repository:**
6 |
7 | **You can provision a basic EKS cluster with VPC using Terraform with the following commands:**
8 |
9 | terraform init
10 | terraform plan
11 | terraform apply
12 |
13 | It might take a while for the cluster to be creates (up to 15-20 minutes).
14 |
15 | 1) Setting up IAM policies for the ALB Ingress Controller in EKS with Terraform, You can provision an EKS cluster with the right policies for the ALB Ingress Controller with
16 | 2) Integrating the Helm provider with Terraform and EKS so you can provision an EKS cluster and install Helm packages at the same time. The current code automatically installs the ALB Ingress Controller with Helm.
17 | As soon as cluster is ready, you should find a kubeconfig_teraki-eks-cluster kubeconfig file in the current directory.
18 |
19 | **Once everything is deployed, RUN:**
20 |
21 | KUBECONFIG=./kubeconfig_teraki-eks-cluster kubectl get nodes --all-namespaces
22 | KUBECONFIG=./kubeconfig_teraki-eks-cluster kubectl describe ingress hello-kubernetes -n web-app
23 |
24 | If you see any error in ingress it is due to LB is starting right now when terraform is trying to deploy the ingress. Do the follow steps if you dont see the URLon ingress.
25 | 1) List the helm charts (KUBECONFIG=./kubeconfig_teraki-eks-cluster helm ls -n web-app)
26 | 2) Delete the Helm chart ( KUBECONFIG=./kubeconfig_teraki-eks-cluster helm delete deployment -n web-app)
27 | 3) Go inside helm chart folder and redeploy the helm chart (KUBECONFIG=../.././kubeconfig_teraki-eks-cluster helm install . --generate-name -n web-app)
28 | 4 Now describe the ingress for URL: (KUBECONFIG=../.././kubeconfig_teraki-eks-cluster kubectl describe ingress -n web-app).
29 |
30 | Download the openvpn.ovpn client file which will be present in OpenVPN folder once terraform finishes and load that file to your openvpn client and connect it.
31 |
32 | You will see the URL of application(Address:). hit that URL in your browser and you can see your application is running fine. Connect the VPN first before hitting the URL.
33 |
34 |
--------------------------------------------------------------------------------
/charts/aws-alb-ingress-controller/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | name: aws-alb-ingress-controller
3 | description: DEPRECATED A Helm chart for AWS ALB Ingress Controller
4 | version: 1.0.4
5 | appVersion: "v1.1.8"
6 | engine: gotpl
7 | home: https://github.com/kubernetes-sigs/aws-alb-ingress-controller
8 | sources:
9 | - https://github.com/kubernetes-sigs/aws-alb-ingress-controller
10 | keywords:
11 | - aws
12 | - ingress
13 | deprecated: true
--------------------------------------------------------------------------------
/charts/aws-alb-ingress-controller/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 | {{/*
3 | Expand the name of the chart.
4 | */}}
5 | {{- define "aws-alb-ingress-controller.name" -}}
6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
7 | {{- end -}}
8 |
9 | {{/*
10 | Create a default fully qualified app name.
11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
12 | If release name contains chart name it will be used as a full name.
13 | */}}
14 | {{- define "aws-alb-ingress-controller.fullname" -}}
15 | {{- if .Values.fullnameOverride -}}
16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
17 | {{- else -}}
18 | {{- $name := default .Chart.Name .Values.nameOverride -}}
19 | {{- if contains $name .Release.Name -}}
20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}}
21 | {{- else -}}
22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
23 | {{- end -}}
24 | {{- end -}}
25 | {{- end -}}
26 |
27 | {{/*
28 | Create chart name and version as used by the chart label.
29 | */}}
30 | {{- define "aws-alb-ingress-controller.chart" -}}
31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
32 | {{- end -}}
33 |
34 | {{/*
35 | Return the service account name used by the pod.
36 | */}}
37 | {{- define "serviceaccount.name" -}}
38 | {{- if and .Values.rbac.create .Values.rbac.serviceAccount.create -}}
39 | {{ include "aws-alb-ingress-controller.fullname" . }}
40 | {{- else -}}
41 | {{ .Values.rbac.serviceAccount.name }}
42 | {{- end -}}
43 | {{- end -}}
--------------------------------------------------------------------------------
/charts/aws-alb-ingress-controller/templates/clusterrole.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.rbac.create }}
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: {{ include "aws-alb-ingress-controller.fullname" . }}
6 | labels:
7 | app.kubernetes.io/name: {{ include "aws-alb-ingress-controller.name" . }}
8 | helm.sh/chart: {{ include "aws-alb-ingress-controller.chart" . }}
9 | app.kubernetes.io/instance: {{ .Release.Name }}
10 | app.kubernetes.io/managed-by: {{ .Release.Service }}
11 | rules:
12 | - apiGroups:
13 | - ""
14 | - extensions
15 | resources:
16 | - configmaps
17 | - endpoints
18 | - events
19 | - ingresses
20 | - ingresses/status
21 | - services
22 | - pods/status
23 | verbs:
24 | - create
25 | - get
26 | - list
27 | - update
28 | - watch
29 | - patch
30 | - apiGroups:
31 | - ""
32 | - extensions
33 | resources:
34 | - nodes
35 | - pods
36 | - secrets
37 | - services
38 | - namespaces
39 | verbs:
40 | - get
41 | - list
42 | - watch
43 | {{- end }}
--------------------------------------------------------------------------------
/charts/aws-alb-ingress-controller/templates/clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.rbac.create }}
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: {{ include "aws-alb-ingress-controller.fullname" . }}
6 | labels:
7 | app.kubernetes.io/name: {{ include "aws-alb-ingress-controller.name" . }}
8 | helm.sh/chart: {{ include "aws-alb-ingress-controller.chart" . }}
9 | app.kubernetes.io/instance: {{ .Release.Name }}
10 | app.kubernetes.io/managed-by: {{ .Release.Service }}
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: ClusterRole
14 | name: {{ include "aws-alb-ingress-controller.fullname" . }}
15 | subjects:
16 | - kind: ServiceAccount
17 | name: {{ include "serviceaccount.name" . }}
18 | namespace: {{ .Release.Namespace }}
19 | {{- end }}
--------------------------------------------------------------------------------
/charts/aws-alb-ingress-controller/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ include "aws-alb-ingress-controller.fullname" . }}
5 | labels:
6 | app.kubernetes.io/name: {{ include "aws-alb-ingress-controller.name" . }}
7 | helm.sh/chart: {{ include "aws-alb-ingress-controller.chart" . }}
8 | app.kubernetes.io/instance: {{ .Release.Name }}
9 | app.kubernetes.io/managed-by: {{ .Release.Service }}
10 | spec:
11 | replicas: {{ .Values.replicaCount }}
12 | selector:
13 | matchLabels:
14 | app.kubernetes.io/name: {{ include "aws-alb-ingress-controller.name" . }}
15 | app.kubernetes.io/instance: {{ .Release.Name }}
16 | template:
17 | metadata:
18 | labels:
19 | app.kubernetes.io/name: {{ include "aws-alb-ingress-controller.name" . }}
20 | app.kubernetes.io/instance: {{ .Release.Name }}
21 | {{- with .Values.podLabels }}{{ toYaml . | nindent 8 }}{{- end }}
22 | {{- with .Values.podAnnotations }}
23 | annotations: {{ toYaml . | nindent 8 }}
24 | {{- end }}
25 | spec:
26 | {{- with .Values.priorityClassName }}
27 | priorityClassName: "{{ . }}"
28 | {{- end }}
29 | containers:
30 | - name: {{ .Chart.Name }}
31 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
32 | imagePullPolicy: {{ .Values.image.pullPolicy }}
33 | args:
34 | - --cluster-name={{ required "specify clusterName via --set clusterName=YourClusterName" .Values.clusterName }}
35 | {{- with .Values.scope.ingressClass }}
36 | - --ingress-class={{ . }}
37 | {{- end }}
38 | {{- if .Values.scope.singleNamespace }}
39 | - --watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}
40 | {{- end }}
41 | {{ if not .Values.autoDiscoverAwsRegion }}
42 | - --aws-region={{ .Values.awsRegion }}
43 | {{- end }}
44 | {{- if not .Values.autoDiscoverAwsVpcID }}
45 | - --aws-vpc-id={{ .Values.awsVpcID }}
46 | {{- end }}
47 | {{- range $key, $value := .Values.extraArgs }}
48 | - --{{ $key }}={{ $value }}
49 | {{- end }}
50 | env:
51 | {{- range $key, $value := .Values.extraEnv }}
52 | - name: {{ $key }}
53 | value: "{{ $value }}"
54 | {{- end }}
55 | ports:
56 | - name: health
57 | containerPort: 10254
58 | protocol: TCP
59 | {{- if .Values.enableReadinessProbe }}
60 | readinessProbe:
61 | httpGet:
62 | path: /healthz
63 | port: health
64 | scheme: HTTP
65 | initialDelaySeconds: {{ .Values.readinessProbeInitialDelay }}
66 | periodSeconds: {{ .Values.readinessProbeInterval }}
67 | timeoutSeconds: {{ .Values.readinessProbeTimeout }}
68 | {{- end }}
69 | {{- if .Values.enableLivenessProbe }}
70 | livenessProbe:
71 | httpGet:
72 | path: /healthz
73 | port: health
74 | scheme: HTTP
75 | initialDelaySeconds: {{ add .Values.livenessProbeInitialDelay .Values.readinessProbeInitialDelay }}
76 | periodSeconds: 60
77 | timeoutSeconds: {{ .Values.livenessProbeTimeout }}
78 | {{- end }}
79 | resources: {{ toYaml .Values.resources | nindent 12 }}
80 | {{- with .Values.volumeMounts }}
81 | volumeMounts: {{ toYaml . | nindent 12 }}
82 | {{- end }}
83 | {{- with .Values.containerSecurityContext }}
84 | securityContext: {{ toYaml . | nindent 12 }}
85 | {{- end }}
86 | {{- with .Values.nodeSelector }}
87 | nodeSelector: {{ toYaml . | nindent 8 }}
88 | {{- end }}
89 | {{- with .Values.affinity }}
90 | affinity: {{ toYaml . | nindent 8 }}
91 | {{- end }}
92 | {{- with .Values.tolerations }}
93 | tolerations: {{ toYaml . | nindent 8 }}
94 | {{- end }}
95 | {{- with .Values.volumes }}
96 | volumes: {{ toYaml . | nindent 8 }}
97 | {{- end }}
98 | serviceAccountName: {{ template "serviceaccount.name" . }}
99 | {{- with .Values.securityContext }}
100 | securityContext: {{ toYaml . | nindent 8 }}
101 | {{- end }}
102 | terminationGracePeriodSeconds: 60
--------------------------------------------------------------------------------
/charts/aws-alb-ingress-controller/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.rbac.create .Values.rbac.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ template "serviceaccount.name" . }}
6 | labels:
7 | app.kubernetes.io/name: {{ include "aws-alb-ingress-controller.name" . }}
8 | helm.sh/chart: {{ include "aws-alb-ingress-controller.chart" . }}
9 | app.kubernetes.io/instance: {{ .Release.Name }}
10 | app.kubernetes.io/managed-by: {{ .Release.Service }}
11 | {{- with .Values.rbac.serviceAccount.annotations }}
12 | annotations: {{ toYaml . | nindent 4 }}
13 | {{- end }}
14 | {{- end }}
--------------------------------------------------------------------------------
/charts/aws-alb-ingress-controller/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for aws-alb-ingress-controller.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | ## Resources created by the ALB Ingress controller will be prefixed with this string
6 | ## Required
7 | clusterName: k8s
8 |
9 | ## AWS region of k8s cluster, required if ec2metadata is unavailable from controller pod
10 | ## Required if autoDiscoverAwsRegion != true
11 | awsRegion: "us-east-2"
12 |
13 | ## Auto Discover awsRegion from ec2metadata, set this to true and omit awsRegion when ec2metadata is available.
14 | autoDiscoverAwsRegion: false
15 |
16 | ## VPC ID of k8s cluster, required if ec2metadata is unavailable from controller pod
17 | ## Required if autoDiscoverAwsVpcID != true
18 | awsVpcID: "vpc-xxx"
19 |
20 | ## Auto Discover awsVpcID from ec2metadata, set this to true and omit awsVpcID: " when ec2metadata is available.
21 | autoDiscoverAwsVpcID: false
22 |
23 | scope:
24 | ## If provided, the ALB ingress controller will only act on Ingress resources annotated with this class
25 | ## Ref: https://github.com/kubernetes-sigs/aws-alb-ingress-controller/blob/master/docs/guide/controller/config.md#limiting-ingress-class
26 | ingressClass: alb
27 |
28 | ## If true, the ALB ingress controller will only act on Ingress resources in a single namespace
29 | ## Default: false; watch all namespaces
30 | singleNamespace: false
31 |
32 | ## If scope.singleNamespace=true, the ALB ingress controller will only act on Ingress resources in this namespace
33 | ## Ref: https://github.com/kubernetes-sigs/aws-alb-ingress-controller/blob/master/docs/guide/controller/config.md#limiting-namespaces
34 | ## Default: namespace of the ALB ingress controller
35 | watchNamespace: ""
36 |
37 | extraArgs: {}
38 |
39 | extraEnv: {}
40 | # AWS_ACCESS_KEY_ID: ""
41 | # AWS_SECRET_ACCESS_KEY: ""
42 |
43 | podAnnotations: {}
44 | # iam.amazonaws.com/role: alb-ingress-controller
45 |
46 | podLabels: {}
47 |
48 | # whether configure readinessProbe on controller pod
49 | enableReadinessProbe: false
50 |
51 | # How often (in seconds) to check controller readiness
52 | readinessProbeInterval: 60
53 |
54 | # How long to wait before timeout (in seconds) when checking controller readiness
55 | readinessProbeTimeout: 3
56 |
57 | # How long to wait (in seconds) before checking the readiness probe
58 | readinessProbeInitialDelay: 30
59 |
60 | # whether configure livenessProbe on controller pod
61 | enableLivenessProbe: false
62 |
63 | # How long to wait (in seconds) before checking the liveness probe
64 | livenessProbeInitialDelay: 30
65 |
66 | # How long to wait before timeout (in seconds) when checking controller liveness
67 | livenessProbeTimeout: 1
68 |
69 | rbac:
70 | ## If true, create & use RBAC resources
71 | ##
72 | create: true
73 | serviceAccount:
74 | create: true
75 | name: default
76 |
77 | ## Annotations for the Service Account
78 | annotations: {}
79 |
80 | image:
81 | repository: docker.io/amazon/aws-alb-ingress-controller
82 | tag: "v1.1.8"
83 | pullPolicy: IfNotPresent
84 |
85 | replicaCount: 1
86 | nameOverride: ""
87 | fullnameOverride: ""
88 |
89 | resources: {}
90 | # We usually recommend not to specify default resources and to leave this as a conscious
91 | # choice for the user. This also increases chances charts run on environments with little
92 | # resources, such as Minikube. If you do want to specify resources, uncomment the following
93 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
94 | # limits:
95 | # cpu: 100m
96 | # memory: 128Mi
97 | # requests:
98 | # cpu: 100m
99 | # memory: 128Mi
100 |
101 | nodeSelector: {}
102 | # node-role.kubernetes.io/node: "true"
103 | # tier: cs
104 |
105 | tolerations: []
106 | # - key: "node-role.kubernetes.io/master"
107 | # effect: NoSchedule
108 |
109 | affinity: {}
110 |
111 | volumeMounts: []
112 | # - name: aws-iam-credentials
113 | # mountPath: /meta/aws-iam
114 | # readOnly: true
115 |
116 | volumes: []
117 | # - name: aws-iam-credentials
118 | # secret:
119 | # secretName: alb-ingress-controller-role
120 |
121 | # Leverage a PriorityClass to ensure your pods survive resource shortages
122 | # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
123 | # PriorityClass: system-cluster-critical
124 | priorityClassName: ""
125 |
126 | # Security Context
127 | # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
128 | # for Pod
129 | securityContext: {}
130 |
131 | # for Container
132 | containerSecurityContext: {}
--------------------------------------------------------------------------------
/charts/deployment/Chart.yaml:
--------------------------------------------------------------------------------
1 | name: deployment
2 | version: 1.0.0
3 | apiVersion: v2
4 | appVersion: 0.0.1
5 | description: Kube-hello-world
6 | maintainers:
7 | - name: Akshay kalra
--------------------------------------------------------------------------------
/charts/deployment/readme.md:
--------------------------------------------------------------------------------
1 | Kubernetes resources
2 | In this folder you will find the basic resources to deploy an application that displays a message, the name of the pod and details of the node it's deployed to.
--------------------------------------------------------------------------------
/charts/deployment/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ .Values.name }}
5 | spec:
6 | selector:
7 | matchLabels:
8 | name: {{ .Values.name }}
9 | template:
10 | metadata:
11 | labels:
12 | name: {{ .Values.name }}
13 | spec:
14 | containers:
15 | - name: {{ .Values.name }}
16 | image: {{ .Values.image.name }}:{{ .Values.image.tag }}
17 | ports:
18 | - containerPort: {{ .Values.ports.nodeport }}
--------------------------------------------------------------------------------
/charts/deployment/templates/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: {{ .Values.name }}
5 | annotations:
6 | alb.ingress.kubernetes.io/scheme: internal
7 | kubernetes.io/ingress.class: alb
8 | kubernetes.io/cluster: teraki-eks-cluster
9 | spec:
10 | rules:
11 | - http:
12 | paths:
13 | - path: /
14 | backend:
15 | serviceName: {{ .Values.name }}
16 | servicePort: {{ .Values.ports.port }}
--------------------------------------------------------------------------------
/charts/deployment/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ .Values.name }}
5 | spec:
6 | type: NodePort
7 | ports:
8 | - port: {{ .Values.ports.port }}
9 | targetPort: {{ .Values.ports.nodeport }}
10 | selector:
11 | name: {{ .Values.name }}
12 |
--------------------------------------------------------------------------------
/charts/deployment/values.yaml:
--------------------------------------------------------------------------------
1 | name: hello-kubernetes
2 |
3 | image:
4 | name: paulbouwer/hello-kubernetes
5 | tag: 1.8
6 |
7 | ports:
8 | nodeport: 8080
9 | port: 80
10 |
--------------------------------------------------------------------------------
/deployment.tf:
--------------------------------------------------------------------------------
1 | resource "helm_release" "app" {
2 | chart = "charts/deployment"
3 | name = "deployment"
4 | version = "1.0.0"
5 | namespace = var.namespaces
6 | }
--------------------------------------------------------------------------------
/ec2.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | private_key_filename = "${var.path}/${var.name}.pem"
3 | }
4 |
5 | # Create a new instance of the Ubuntu 18.04 on an
6 |
7 | resource "tls_private_key" "vpn" {
8 | algorithm = "RSA"
9 | rsa_bits = 4096
10 | }
11 |
12 | resource "aws_key_pair" "vpn" {
13 | key_name = "VPN"
14 | public_key = tls_private_key.vpn.public_key_openssh
15 | }
16 |
17 |
18 | resource "local_file" "private_key_pem" {
19 | count = var.path != "" ? 1 : 0
20 | content = tls_private_key.vpn.private_key_pem
21 | filename = local.private_key_filename
22 | }
23 |
24 | resource "null_resource" "chmod" {
25 | count = var.path != "" ? 1 : 0
26 | depends_on = [local_file.private_key_pem]
27 |
28 | triggers = {
29 | key = tls_private_key.vpn.private_key_pem
30 | }
31 |
32 | provisioner "local-exec" {
33 | command = "chmod 600 ${local.private_key_filename}"
34 | }
35 | }
36 |
37 | data "aws_ami" "ubuntu" {
38 | most_recent = true
39 | owners = ["099720109477"]
40 |
41 | filter {
42 | name = "name"
43 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"]
44 | }
45 |
46 | filter {
47 | name = "virtualization-type"
48 | values = ["hvm"]
49 | }
50 | }
51 |
52 | resource "aws_eip" "vpn" {
53 | instance = module.ec2-instance.id[0]
54 | vpc = true
55 | }
56 |
57 |
58 | resource "null_resource" "wait_10_seconds_ansible" {
59 | depends_on = [aws_eip.vpn]
60 |
61 | provisioner "local-exec" {
62 | command = "sleep 10"
63 | }
64 | }
65 |
66 | module "ec2-instance" {
67 | source = "terraform-aws-modules/ec2-instance/aws"
68 | version = "2.15.0"
69 | name = "vpn"
70 | ami = data.aws_ami.ubuntu.id
71 | instance_type = var.instance_type_ec2
72 | vpc_security_group_ids = [aws_security_group.allow_tls.id]
73 | key_name = aws_key_pair.vpn.key_name
74 | subnet_ids = module.vpc.public_subnets
75 | }
76 |
77 | resource "null_resource" "ansible" {
78 | connection {
79 | host = aws_eip.vpn.public_ip
80 | }
81 |
82 | provisioner "local-exec" {
83 | command = <openvpn.ini;
86 | echo "[openVPN]" | tee -a openvpn.ini;
87 | echo "${aws_eip.vpn.public_ip} ansible_user=${var.ansible_user} ansible_ssh_private_key_file=${local.private_key_filename}" | tee -a openvpn.ini;
88 | export ANSIBLE_HOST_KEY_CHECKING=False;
89 | ansible-playbook --user ${var.ansible_user} --private-key ${local.private_key_filename} -i openvpn.ini OpenVPN/playbook.yml
90 | EOT
91 | }
92 | depends_on = [null_resource.wait_10_seconds_ansible]
93 | }
94 |
--------------------------------------------------------------------------------
/eks:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/akshaykalra92/aws-eks-openvpn-project/d03d34e9b08e18e717bc77b9acb810aee960b233/eks
--------------------------------------------------------------------------------
/eks.tf:
--------------------------------------------------------------------------------
1 | # render Admin & Developer users list with the structure required by EKS module
2 | locals {
3 | admin_user_map_users = [
4 | for admin_user in var.admin_users :
5 | {
6 | userarn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:user/${admin_user}"
7 | username = admin_user
8 | groups = ["system:masters"]
9 | }
10 | ]
11 | developer_user_map_users = [
12 | for developer_user in var.developer_users :
13 | {
14 | userarn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:user/${developer_user}"
15 | username = developer_user
16 | groups = ["${var.name_prefix}-developers"]
17 | }
18 | ]
19 | }
20 |
21 | # create EKS cluster
22 | module "eks-cluster" {
23 | source = "terraform-aws-modules/eks/aws"
24 | cluster_name = "${var.cluster_name}"
25 | cluster_version = "1.17"
26 | write_kubeconfig = true
27 | version = "12.2.0"
28 | config_output_path = "./"
29 | workers_additional_policies = [aws_iam_policy.worker_policy.arn]
30 | subnets = module.vpc.private_subnets
31 | vpc_id = module.vpc.vpc_id
32 | wait_for_cluster_cmd = "until curl -k -s $ENDPOINT/healthz >/dev/null; do sleep 4; done"
33 | # wait_for_cluster_interpreter = ["C:/Program Files/Git/bin/sh.exe", "-c"] # Remove this line if you are running terraform from linux or mac.
34 |
35 | node_groups = {
36 | first = {
37 | desired_capacity = var.autoscaling_minimum_size_by_az * length(data.aws_availability_zones.available.zone_ids)
38 | max_capacity = var.autoscaling_maximum_size_by_az * length(data.aws_availability_zones.available.zone_ids)
39 | min_capacity = var.autoscaling_minimum_size_by_az * length(data.aws_availability_zones.available.zone_ids)
40 |
41 | instance_type = var.asg_instance_types
42 | }
43 |
44 | second = {
45 | desired_capacity = var.autoscaling_minimum_size_by_az * length(data.aws_availability_zones.available.zone_ids)
46 | max_capacity = var.autoscaling_maximum_size_by_az * length(data.aws_availability_zones.available.zone_ids)
47 | min_capacity = var.autoscaling_minimum_size_by_az * length(data.aws_availability_zones.available.zone_ids)
48 |
49 | instance_type = var.asg_instance_types
50 | }
51 | }
52 | # map developer & admin ARNs as kubernetes Users
53 | map_users = concat(local.admin_user_map_users, local.developer_user_map_users)
54 | }
55 |
56 | # get EKS cluster info to configure Kubernetes and Helm providers
57 | data "aws_eks_cluster" "cluster" {
58 | name = module.eks-cluster.cluster_id
59 | }
60 | data "aws_eks_cluster_auth" "cluster" {
61 | name = module.eks-cluster.cluster_id
62 | }
63 |
64 | resource "aws_iam_policy" "worker_policy" {
65 | name = "worker-policy"
66 | description = "Worker policy for the ALB Ingress"
67 |
68 | policy = file("iam-policy.json")
69 | }
70 |
--------------------------------------------------------------------------------
/iam-policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Effect": "Allow",
6 | "Action": [
7 | "acm:DescribeCertificate",
8 | "acm:ListCertificates",
9 | "acm:GetCertificate"
10 | ],
11 | "Resource": "*"
12 | },
13 | {
14 | "Effect": "Allow",
15 | "Action": [
16 | "ec2:AuthorizeSecurityGroupIngress",
17 | "ec2:CreateSecurityGroup",
18 | "ec2:CreateTags",
19 | "ec2:DeleteTags",
20 | "ec2:DeleteSecurityGroup",
21 | "ec2:DescribeAccountAttributes",
22 | "ec2:DescribeAddresses",
23 | "ec2:DescribeInstances",
24 | "ec2:DescribeInstanceStatus",
25 | "ec2:DescribeInternetGateways",
26 | "ec2:DescribeNetworkInterfaces",
27 | "ec2:DescribeSecurityGroups",
28 | "ec2:DescribeSubnets",
29 | "ec2:DescribeTags",
30 | "ec2:DescribeVpcs",
31 | "ec2:ModifyInstanceAttribute",
32 | "ec2:ModifyNetworkInterfaceAttribute",
33 | "ec2:RevokeSecurityGroupIngress"
34 | ],
35 | "Resource": "*"
36 | },
37 | {
38 | "Effect": "Allow",
39 | "Action": [
40 | "elasticloadbalancing:AddListenerCertificates",
41 | "elasticloadbalancing:AddTags",
42 | "elasticloadbalancing:CreateListener",
43 | "elasticloadbalancing:CreateLoadBalancer",
44 | "elasticloadbalancing:CreateRule",
45 | "elasticloadbalancing:CreateTargetGroup",
46 | "elasticloadbalancing:DeleteListener",
47 | "elasticloadbalancing:DeleteLoadBalancer",
48 | "elasticloadbalancing:DeleteRule",
49 | "elasticloadbalancing:DeleteTargetGroup",
50 | "elasticloadbalancing:DeregisterTargets",
51 | "elasticloadbalancing:DescribeListenerCertificates",
52 | "elasticloadbalancing:DescribeListeners",
53 | "elasticloadbalancing:DescribeLoadBalancers",
54 | "elasticloadbalancing:DescribeLoadBalancerAttributes",
55 | "elasticloadbalancing:DescribeRules",
56 | "elasticloadbalancing:DescribeSSLPolicies",
57 | "elasticloadbalancing:DescribeTags",
58 | "elasticloadbalancing:DescribeTargetGroups",
59 | "elasticloadbalancing:DescribeTargetGroupAttributes",
60 | "elasticloadbalancing:DescribeTargetHealth",
61 | "elasticloadbalancing:ModifyListener",
62 | "elasticloadbalancing:ModifyLoadBalancerAttributes",
63 | "elasticloadbalancing:ModifyRule",
64 | "elasticloadbalancing:ModifyTargetGroup",
65 | "elasticloadbalancing:ModifyTargetGroupAttributes",
66 | "elasticloadbalancing:RegisterTargets",
67 | "elasticloadbalancing:RemoveListenerCertificates",
68 | "elasticloadbalancing:RemoveTags",
69 | "elasticloadbalancing:SetIpAddressType",
70 | "elasticloadbalancing:SetSecurityGroups",
71 | "elasticloadbalancing:SetSubnets",
72 | "elasticloadbalancing:SetWebAcl"
73 | ],
74 | "Resource": "*"
75 | },
76 | {
77 | "Effect": "Allow",
78 | "Action": [
79 | "iam:CreateServiceLinkedRole",
80 | "iam:GetServerCertificate",
81 | "iam:ListServerCertificates"
82 | ],
83 | "Resource": "*"
84 | },
85 | {
86 | "Effect": "Allow",
87 | "Action": [
88 | "cognito-idp:DescribeUserPoolClient"
89 | ],
90 | "Resource": "*"
91 | },
92 | {
93 | "Effect": "Allow",
94 | "Action": [
95 | "waf-regional:GetWebACLForResource",
96 | "waf-regional:GetWebACL",
97 | "waf-regional:AssociateWebACL",
98 | "waf-regional:DisassociateWebACL"
99 | ],
100 | "Resource": "*"
101 | },
102 | {
103 | "Effect": "Allow",
104 | "Action": [
105 | "tag:GetResources",
106 | "tag:TagResources"
107 | ],
108 | "Resource": "*"
109 | },
110 | {
111 | "Effect": "Allow",
112 | "Action": [
113 | "waf:GetWebACL"
114 | ],
115 | "Resource": "*"
116 | },
117 | {
118 | "Effect": "Allow",
119 | "Action": [
120 | "wafv2:GetWebACL",
121 | "wafv2:GetWebACLForResource",
122 | "wafv2:AssociateWebACL",
123 | "wafv2:DisassociateWebACL"
124 | ],
125 | "Resource": "*"
126 | }
127 | ]
128 | }
--------------------------------------------------------------------------------
/ingress.tf:
--------------------------------------------------------------------------------
1 | resource "helm_release" "ingress" {
2 | name = "aws-alb-ingress-controller"
3 | chart = "charts/aws-alb-ingress-controller"
4 | repository = "aws-alb-ingress-controller"
5 | version = "1.0.4"
6 | namespace = var.namespaces
7 |
8 | set {
9 | name = "autoDiscoverAwsRegion"
10 | value = "true"
11 | }
12 | set {
13 | name = "autoDiscoverAwsVpcID"
14 | value = "true"
15 | }
16 | set {
17 | name = "clusterName"
18 | value = var.cluster_name
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/namespace.tf:
--------------------------------------------------------------------------------
1 | resource "kubernetes_namespace" "eks_namespaces" {
2 | metadata {
3 | annotations = {
4 | name = var.namespaces
5 | }
6 | name = var.namespaces
7 | }
8 | }
--------------------------------------------------------------------------------
/output.tf:
--------------------------------------------------------------------------------
1 | output "public_key_openssh" {
2 | value = tls_private_key.vpn.public_key_openssh
3 | }
4 |
5 | output "private_key_pem" {
6 | value = tls_private_key.vpn.private_key_pem
7 | }
8 |
--------------------------------------------------------------------------------
/provider.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-west-1"
3 | version = "~> 2.57.0"
4 | }
5 |
6 | provider "random" {
7 | version = "~> 2.2.1"
8 | }
9 |
10 | provider "kubernetes" {
11 | host = data.aws_eks_cluster.cluster.endpoint
12 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
13 | token = data.aws_eks_cluster_auth.cluster.token
14 | load_config_file = false
15 | version = "~> 1.9"
16 | }
17 |
18 | provider "helm" {
19 | kubernetes {
20 | host = data.aws_eks_cluster.cluster.endpoint
21 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
22 | token = data.aws_eks_cluster_auth.cluster.token
23 | load_config_file = false
24 | }
25 | version = "1.3.1"
26 | }
27 |
28 | data "aws_caller_identity" "current" {}
29 |
--------------------------------------------------------------------------------
/rbac.tf:
--------------------------------------------------------------------------------
1 | resource "kubernetes_cluster_role" "iam_roles_developers" {
2 | metadata {
3 | name = "${var.name_prefix}-developers"
4 | }
5 |
6 | rule {
7 | api_groups = ["*"]
8 | resources = ["pods", "pods/log", "deployments", "ingresses", "services"]
9 | verbs = ["get", "list"]
10 | }
11 |
12 | rule {
13 | api_groups = ["*"]
14 | resources = ["pods/exec"]
15 | verbs = ["create"]
16 | }
17 |
18 | rule {
19 | api_groups = ["*"]
20 | resources = ["pods/portforward"]
21 | verbs = ["*"]
22 | }
23 | }
24 |
25 | resource "kubernetes_cluster_role_binding" "iam_roles_developers" {
26 | metadata {
27 | name = "${var.name_prefix}-developers"
28 | }
29 |
30 | role_ref {
31 | api_group = "rbac.authorization.k8s.io"
32 | kind = "ClusterRole"
33 | name = "${var.name_prefix}-developers"
34 | }
35 |
36 | dynamic "subject" {
37 | for_each = toset(var.developer_users)
38 |
39 | content {
40 | name = subject.key
41 | kind = "User"
42 | api_group = "rbac.authorization.k8s.io"
43 | }
44 | }
45 | }
--------------------------------------------------------------------------------
/security_grp.tf:
--------------------------------------------------------------------------------
1 | resource "aws_security_group" "allow_tls" {
2 | name = "VPN_security_grp"
3 | description = "Allow TLS"
4 | vpc_id = module.vpc.vpc_id
5 |
6 | ingress {
7 | description = "VPN port"
8 | from_port = 1194
9 | to_port = 1194
10 | protocol = "udp"
11 | cidr_blocks = ["0.0.0.0/0"]
12 | }
13 |
14 | ingress {
15 | description = "ssh"
16 | from_port = 22
17 | to_port = 22
18 | protocol = "tcp"
19 | cidr_blocks = ["0.0.0.0/0"]
20 | }
21 |
22 | egress {
23 | from_port = 0
24 | to_port = 0
25 | protocol = "-1"
26 | cidr_blocks = ["0.0.0.0/0"]
27 | }
28 |
29 | tags = {
30 | Name = "allow_tls"
31 | iac_environment = var.iac_environment_tag
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/terraform.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = "~> 0.12.24"
3 | }
--------------------------------------------------------------------------------
/terraform.tfvars:
--------------------------------------------------------------------------------
1 | cluster_name = "teraki-eks-cluster"
2 | iac_environment_tag = "development"
3 | name_prefix = "teraki"
4 | main_network_block = "10.0.0.0/16"
5 | subnet_prefix_extension = 4
6 | zone_offset = 8
7 | admin_users = ["akshay-kalra", "Navera"]
8 | developer_users = ["agnes", "test"]
9 | asg_instance_types = "t3.small"
10 | instance_type = "t2.micro"
11 | autoscaling_minimum_size_by_az = 1
12 | autoscaling_maximum_size_by_az = 10
13 | autoscaling_average_cpu = 30
14 | namespaces = "web-app"
15 | instance_type_ec2 = "t2.small"
16 | vpc_name = "teraki-vpc"
17 | cidr = "10.0.0.0/16"
18 |
--------------------------------------------------------------------------------
/variable.tf:
--------------------------------------------------------------------------------
1 | #VPC
2 | variable "cluster_name" {
3 | type = string
4 | description = "EKS cluster name."
5 | }
6 | variable "iac_environment_tag" {
7 | type = string
8 | description = "AWS tag to indicate environment name of each infrastructure object."
9 | }
10 | variable "name_prefix" {
11 | type = string
12 | description = "Prefix to be used on each infrastructure object Name created in AWS."
13 | }
14 | variable "main_network_block" {
15 | type = string
16 | description = "Base CIDR block to be used in our VPC."
17 | }
18 | variable "subnet_prefix_extension" {
19 | type = number
20 | description = "CIDR block bits extension to calculate CIDR blocks of each subnetwork."
21 | }
22 | variable "zone_offset" {
23 | type = number
24 | description = "CIDR block bits extension offset to calculate Public subnets, avoiding collisions with Private subnets."
25 | }
26 |
27 | #EKS
28 |
29 | variable "admin_users" {
30 | type = list(string)
31 | description = "List of Kubernetes admins."
32 | }
33 | variable "developer_users" {
34 | type = list(string)
35 | description = "List of Kubernetes developers."
36 | }
37 | variable "asg_instance_types" {
38 | type = string
39 | description = "EC2 instance machine types to be used in EKS."
40 | }
41 | variable "instance_type" {
42 | type = string
43 | description = "EC2 instance machine types to be used in EKS."
44 | }
45 | variable "autoscaling_minimum_size_by_az" {
46 | type = number
47 | description = "Minimum number of EC2 instances to autoscale our EKS cluster on each AZ."
48 | }
49 | variable "autoscaling_maximum_size_by_az" {
50 | type = number
51 | description = "Maximum number of EC2 instances to autoscale our EKS cluster on each AZ."
52 | }
53 | variable "autoscaling_average_cpu" {
54 | type = number
55 | description = "Average CPU threshold to autoscale EKS EC2 instances."
56 | }
57 | variable "namespaces" {
58 | type = string
59 | description = "namespaces to be created in our EKS Cluster."
60 | }
61 |
62 | variable "ansible_user" {
63 | type = string
64 | default = "ubuntu"
65 | }
66 |
67 | variable "name" {
68 | description = "Unique name for the key, should also be a valid filename. This will prefix the public/private key."
69 | default = "vpn"
70 | type = string
71 | }
72 |
73 | variable "path" {
74 | description = "Path to a directory where the public and private key will be stored."
75 | default = "."
76 | type = string
77 | }
78 |
79 | variable "instance_type_ec2" {
80 | description = "Instance type of ec2."
81 | default = "t2.small"
82 | type = string
83 | }
84 |
85 | variable "vpc_name" {
86 | description = "vpc name"
87 | default = ""
88 | type = string
89 | }
90 |
91 | variable "cidr" {
92 | description = "VPC cidr"
93 | default = ""
94 | type = string
95 | }
96 |
--------------------------------------------------------------------------------
/vpc.tf:
--------------------------------------------------------------------------------
1 |
2 | # reserve Elastic IP to be used in our NAT gateway
3 | resource "aws_eip" "nat_gw_elastic_ip" {
4 | vpc = true
5 |
6 | tags = {
7 | Name = "${var.cluster_name}-nat-eip"
8 | iac_environment = var.iac_environment_tag
9 | }
10 | }
11 |
12 | data "aws_availability_zones" "available" {}
13 |
14 | module "vpc" {
15 | source = "terraform-aws-modules/vpc/aws"
16 | version = "2.6.0"
17 | name = var.vpc_name
18 | cidr = var.cidr
19 | azs = data.aws_availability_zones.available.names
20 | private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
21 | public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
22 | enable_nat_gateway = true
23 | single_nat_gateway = true
24 | enable_dns_hostnames = true
25 | reuse_nat_ips = true
26 | external_nat_ip_ids = [aws_eip.nat_gw_elastic_ip.id]
27 |
28 |
29 | tags = {
30 | "kubernetes.io/cluster/${var.cluster_name}" = "shared"
31 | iac_environment = var.iac_environment_tag
32 | }
33 |
34 | public_subnet_tags = {
35 | "kubernetes.io/cluster/${var.cluster_name}" = "shared"
36 | "kubernetes.io/role/elb" = "1"
37 | iac_environment = var.iac_environment_tag
38 | type = "public"
39 | }
40 |
41 | private_subnet_tags = {
42 | "kubernetes.io/cluster/${var.cluster_name}" = "shared"
43 | "kubernetes.io/role/internal-elb" = "1"
44 | type = "private"
45 | iac_environment = var.iac_environment_tag
46 | }
47 | }
48 |
--------------------------------------------------------------------------------