├── 2-Tier-Architecture
├── Readme.md
├── ec2.tf
├── providers.tf
├── rds.tf
└── vpc.tf
├── 3-Tier-Architecture
├── Readme.md
├── alb.tf
├── ec2.tf
├── eip.tf
├── internet-gw.tf
├── natgw.tf
├── provider.tf
├── route-tb.tf
├── sg.tf
├── subnet.tf
├── variable.tf
└── vpc.tf
├── 3-Tier-EKS-Architecture
├── Manifests
│ ├── backend-deployment.yaml
│ ├── backend-service.yaml
│ ├── cron.yaml
│ ├── daemonset.yaml
│ ├── frontend-deployment.yaml
│ ├── frontend-service.yaml
│ ├── full_stack_lb.yaml
│ ├── hpa.yaml
│ ├── job.yaml
│ ├── mongo
│ │ ├── deploy.yaml
│ │ ├── secrets.yaml
│ │ └── service.yaml
│ ├── monitoring-lb.yaml
│ ├── stateful.yaml
│ └── values.yaml
├── Readme.md
├── app
│ ├── backend
│ │ ├── Dockerfile
│ │ ├── db.js
│ │ ├── index.js
│ │ ├── models
│ │ │ └── task.js
│ │ ├── package-lock.json
│ │ ├── package.json
│ │ └── routes
│ │ │ └── tasks.js
│ └── frontend
│ │ ├── Dockerfile
│ │ ├── package-lock.json
│ │ ├── package.json
│ │ ├── public
│ │ ├── favicon.ico
│ │ ├── index.html
│ │ ├── logo192.png
│ │ ├── logo512.png
│ │ ├── manifest.json
│ │ └── robots.txt
│ │ └── src
│ │ ├── App.css
│ │ ├── App.js
│ │ ├── Tasks.js
│ │ ├── index.css
│ │ ├── index.js
│ │ └── services
│ │ └── taskServices.js
└── terraform
│ ├── autoscaler-iam.tf
│ ├── autoscaler-manifests.tf
│ ├── backend.tf
│ ├── eks.tf
│ ├── helm-lb.tf
│ ├── helm-provider.tf
│ ├── iam.tf
│ ├── igw.tf
│ ├── monitorng.tf
│ ├── provider.tf
│ ├── variable.tf
│ └── vpc.tf
├── EKS-Cluster
├── 0-locals.tf
├── 1-providers.tf
├── 10-add-manager-role.tf
├── 11-helm-provider.tf
├── 12-metrics-server.tf
├── 13-pod-Identity-addon.tf
├── 14-cluster-autoscaler.tf
├── 15-aws-lbc.tf
├── 16-nginx-ingress.tf
├── 17-cert-manager.tf
├── 18-ebs-csi-driver.tf
├── 19-openid-connect-provider.tf
├── 2-vpc.tf
├── 20-efs.tf
├── 21-secrets-store-csi-driver.tf
├── 3-igw.tf
├── 4-subnets.tf
├── 5-nat.tf
├── 6-routes.tf
├── 7-eks.tf
├── 8-nodes.tf
├── 9-add-developer-user.tf
├── iam
│ └── AWSLoadBalancerController.json
└── values
│ ├── metrics-server.yaml
│ └── nginx-ingress.yaml
├── LICENSE
└── README.md
/2-Tier-Architecture/Readme.md:
--------------------------------------------------------------------------------
1 | ## 2-Tier-Architecture Project with Terraform and AWS
2 |
3 | This project showcases implementing a scalable and resilient 2-tier architecture using Terraform and AWS. Leveraging the power of infrastructure as code, this setup provides a solid foundation for deploying web applications with high availability and fault tolerance.
4 |
5 | ## Workflow
6 |
7 |
8 |
9 |
10 | ### Features
11 | - **Tier 1: Web Tier**
12 |
13 | - EC2 instances provisioned in public subnets
14 | - Auto Scaling Group for handling the dynamic workload
15 | - Load Balancer for distributing traffic and ensuring high availability
16 | - Security Groups to control inbound and outbound traffic
17 |
18 | - **Tier 2: Database Tier**
19 |
20 | - RDS MySQL instance in a private subnet
21 | - Secure network access using security groups
22 |
23 | ## Prerequisites
24 | To use this project, you need to have the following prerequisites:
25 |
26 | - AWS account with necessary permissions
27 | - Terraform installed on your local machine
28 |
29 | ## Getting Started
30 |
31 | 1. Clone this repository to your local machine.
32 | 2. Navigate to the project directory.
33 |
34 | ```bash
35 | $ cd 2-Tier-Architecture
36 | ```
37 |
38 | 3. Configure your AWS credentials by setting the environment variables or using the AWS CLI.
39 | 4. Initialize the Terraform project.
40 | ```bash
41 | $ terraform init
42 | ```
43 | 5. Review the execution plan.
44 | ```bash
45 | $ terraform plan
46 | ```
47 | 6. Deploy the architecture.
48 | ```bash
49 | $ terraform apply
50 | ```
51 |
52 | 7. Confirm the deployment by typing `yes` when prompted.
53 |
54 | ## Cleanup
55 | - To clean up and destroy the infrastructure created by this project, run the following command:
56 |
57 | ```bash
58 | $ terraform destroy
59 | ```
60 | Note: Be cautious as this action cannot be undone.
61 |
62 | Contributions are welcome! If you find any issues or have suggestions for improvements, feel free to open an issue or submit a pull request.
63 |
64 | License
65 | This project is licensed under the MIT License.
66 |
67 |
--------------------------------------------------------------------------------
/2-Tier-Architecture/ec2.tf:
--------------------------------------------------------------------------------
1 | resource "aws_instance" "My-web-instance1" {
2 | ami = "ami-0d52744d6551d851e" #Amazon linux 2 AMI
3 | key_name = "mykeypair"
4 | associate_public_ip_address = true
5 | subnet_id = aws_subnet.Public_subnet-1.id
6 | instance_type = "t2.micro"
7 | vpc_security_group_ids = [aws_security_group.custom_security_SG_DB.id]
8 | user_data = <<-EOF
9 | #!/bin/bash
10 | yum update -y
11 | yum install httpd -y
12 | systemctl start httpd
13 | systemctl enable httpd
14 | echo "This is My Custom Project Tier 1 " > /var/www/html/index.html
15 | EOF
16 | }
17 |
18 | resource "aws_instance" "My-web-instance2" {
19 | ami = "ami-0d52744d6551d851e" #Amazon linux 2 AMI
20 | key_name = "mykeypair"
21 | associate_public_ip_address = true
22 | subnet_id = aws_subnet.Public_subnet-2.id
23 | instance_type = "t2.micro"
24 | vpc_security_group_ids = [aws_security_group.custom_security_SG_DB.id]
25 | user_data = <<-EOF
26 | #!/bin/bash
27 | yum update -y
28 | yum install httpd -y
29 | systemctl start httpd
30 | systemctl enable httpd
31 | echo "This is My Custom Project Tier 2 " > /var/www/html/index.html
32 | EOF
33 | }
34 |
--------------------------------------------------------------------------------
/2-Tier-Architecture/providers.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/aws"
5 | version = "~> 4.57.0"
6 | }
7 | }
8 |
9 | required_version = "~> 1.4.6"
10 | }
11 |
12 | provider "aws" {
13 | region = "ap-northeast-1"
14 | }
15 |
--------------------------------------------------------------------------------
/2-Tier-Architecture/rds.tf:
--------------------------------------------------------------------------------
1 | #creating RDS Database
2 | resource "aws_db_instance" "My_database" {
3 | allocated_storage = 10
4 | engine = "mysql"
5 | engine_version = "5.7"
6 | instance_class = "db.t2.micro"
7 | db_subnet_group_name = aws_db_subnet_group.My-custom-subgroup.id
8 | vpc_security_group_ids = [aws_security_group.aws_security_group.custom_security_SG_DB.id]
9 | username = "username"
10 | password = "password"
11 | parameter_group_name = "default.mysql5.7"
12 | skip_final_snapshot = true
13 |
14 | }
15 |
16 | #creating private security group for Database tier
17 | resource "aws_security_group" "My_database_tier_lu" {
18 | name = "My_database_tier_lu"
19 | description = "allow traffic from SSH & HTTP"
20 | vpc_id = aws_vpc.Custom-vpc.id
21 | ingress {
22 | from_port = 8279 #default port is 3306. You can also use 3307 & 8279 like myself
23 | to_port = 8279
24 | protocol = "tcp"
25 | cidr_blocks = ["10.0.0.0/16"]
26 | security_groups = [aws_security_group.My-SG.id]
27 | }
28 | ingress {
29 | from_port = 22
30 | to_port = 22
31 | protocol = "tcp"
32 | cidr_blocks = ["0.0.0.0/0"]
33 | }
34 | egress {
35 | from_port = 0
36 | to_port = 0
37 | protocol = "-1"
38 | cidr_blocks = ["0.0.0.0/0"]
39 |
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/2-Tier-Architecture/vpc.tf:
--------------------------------------------------------------------------------
1 | resource "aws_vpc" "Custom-vpc" {
2 | cidr_block = "10.0.0.0/16"
3 | instance_tenancy = "default"
4 |
5 | tags = {
6 | Name = "Customvpc"
7 | }
8 | }
9 |
10 | // creating a Public subnets
11 | resource "aws_subnet" "Public_subnet-1" {
12 | vpc_id = aws_vpc.Custom-vpc.id
13 | cidr_block = "10.0.1.0/24"
14 | map_public_ip_on_launch = true
15 | availability_zone = "ap-northeast-1a"
16 |
17 | tags = {
18 | Name = "Public-subnet-1"
19 | }
20 | }
21 |
22 | resource "aws_subnet" "Public_subnet-2" {
23 | vpc_id = aws_vpc.Custom-vpc.id
24 | cidr_block = "10.0.2.0/24"
25 | availability_zone = "ap-northeast-1c"
26 | map_public_ip_on_launch = true
27 |
28 | tags = {
29 | Name = "Public-subnet-2"
30 | }
31 | }
32 |
33 | resource "aws_internet_gateway" "gw" {
34 | vpc_id = aws_vpc.Custom-vpc.id
35 |
36 | tags = {
37 | Name = "My_Internet_Gateaway"
38 | }
39 | }
40 |
41 | resource "aws_eip" "custom_eip" {
42 | vpc = true
43 | }
44 |
45 | #Create a NAT gateway and associate it with an Elastic IP and a public subnet
46 | resource "aws_nat_gateway" "custom_nat_gateway" {
47 | allocation_id = aws_eip.custom_eip.id
48 | subnet_id = aws_subnet.Public_subnet-2.id
49 | }
50 |
51 | #creating NAT route
52 | resource "aws_route_table" "private_route_table" {
53 | vpc_id = aws_vpc.Custom-vpc.id
54 |
55 | route {
56 | cidr_block = "0.0.0.0/0"
57 | nat_gateway_id = aws_nat_gateway.custom_nat_gateway.id
58 | }
59 |
60 | tags = {
61 | Name = "custom-nat-gateaway"
62 | }
63 | }
64 |
65 | resource "aws_subnet" "Private_subnet-1" {
66 | vpc_id = aws_vpc.Custom-vpc.id
67 | cidr_block = "10.0.3.0/24"
68 | availability_zone = "ap-northeast-1a"
69 | map_public_ip_on_launch = false
70 |
71 | tags = {
72 | Name = "Private_subnet-1"
73 | }
74 | }
75 |
76 | resource "aws_subnet" "Private_subnet-2" {
77 | vpc_id = aws_vpc.Custom-vpc.id
78 | cidr_block = "10.0.4.0/24"
79 | availability_zone = "ap-northeast-1c"
80 | map_public_ip_on_launch = false
81 |
82 | tags = {
83 | Name = "Private_subnet-2"
84 | }
85 | }
86 |
87 | #creating database RDS
88 | resource "aws_db_subnet_group" "My-custom-subgroup" {
89 | name = "my-custom-subgroup"
90 | subnet_ids = [aws_subnet.Private_subnet-1.id, aws_subnet.Private_subnet-2.id]
91 | tags = {
92 | Name = "My database subnet group"
93 | }
94 | }
95 |
96 | resource "aws_route_table_association" "private_route_table_ass_1" {
97 | subnet_id = aws_subnet.Private_subnet-1.id
98 | route_table_id = aws_route_table.private_route_table.id
99 | }
100 |
101 | resource "aws_route_table_association" "private_route_table_ass_2" {
102 | subnet_id = aws_subnet.Private_subnet-2.id
103 | route_table_id = aws_route_table.private_route_table.id
104 | }
105 |
106 | // creating a security groups
107 | resource "aws_security_group" "My-SG" {
108 | name = "My-SG"
109 | description = "security group for load balancer"
110 | vpc_id = aws_vpc.Custom-vpc.id
111 |
112 | ingress {
113 | from_port = "0"
114 | to_port = "0"
115 | protocol = "-1"
116 | cidr_blocks = ["0.0.0.0/0"]
117 | }
118 |
119 | egress {
120 | from_port = "0"
121 | to_port = "0"
122 | protocol = "-1"
123 | cidr_blocks = ["0.0.0.0/0"]
124 | }
125 | }
126 |
127 | resource "aws_route_table" "public_route_table" {
128 | vpc_id = aws_vpc.Custom-vpc.id
129 |
130 | route {
131 | cidr_block = "0.0.0.0/0"
132 | gateway_id = aws_nat_gateway.custom_nat_gateway.id
133 | }
134 |
135 | tags = {
136 | Name = "public-route-table"
137 | }
138 | }
139 |
140 | resource "aws_route_table_association" "public_route_table_ass_1" {
141 | subnet_id = aws_subnet.Public_subnet-1.id
142 | route_table_id = aws_route_table.public_route_table.id
143 | }
144 |
145 | resource "aws_route_table_association" "public_route_table_ass_2" {
146 | subnet_id = aws_subnet.Public_subnet-2.id
147 | route_table_id = aws_route_table.public_route_table.id
148 | }
149 |
150 | // creating a public security group
151 | resource "aws_security_group" "custom_security_SG_DB" {
152 | name = "Custom-Public-SG-DB"
153 | description = "web and SSH allowed"
154 | vpc_id = aws_vpc.Custom-vpc.id
155 |
156 | ingress {
157 | from_port = 22
158 | to_port = 22
159 | protocol = "tcp"
160 | cidr_blocks = ["0.0.0.0/0"]
161 | }
162 |
163 | ingress {
164 | from_port = 80
165 | to_port = 80
166 | protocol = "tcp"
167 | cidr_blocks = ["0.0.0.0/0"]
168 | }
169 |
170 | egress {
171 | from_port = 0
172 | to_port = 0
173 | protocol = "-1"
174 | cidr_blocks = ["0.0.0.0/0"]
175 | }
176 | }
177 |
178 | #creating a loadbalancer
179 | resource "aws_lb" "My-lb" {
180 | name = "My-lb"
181 | internal = false
182 | load_balancer_type = "application"
183 | subnets = [aws_subnet.Public_subnet-1.id, aws_subnet.Public_subnet-2.id]
184 | security_groups = [aws_security_group.My-SG.id]
185 | }
186 |
187 | #creating load balancer target group
188 | resource "aws_lb_target_group" "My-lb-tg" {
189 | name = "Customtargetgroup"
190 | port = 80
191 | protocol = "HTTP"
192 | vpc_id = aws_vpc.Custom-vpc.id
193 |
194 | depends_on = [aws_vpc.Custom-vpc]
195 | }
196 |
197 | #creating load balancer target group
198 | resource "aws_lb_target_group_attachment" "My-target-group1" {
199 | target_group_arn = aws_lb_target_group.My-lb-tg.arn
200 | target_id = aws_instance.My-web-instance1.id
201 | port = 80
202 |
203 | depends_on = [aws_instance.My-web-instance1]
204 | }
205 | #creating load balancer target group
206 | resource "aws_lb_target_group_attachment" "My-target-group2" {
207 | target_group_arn = aws_lb_target_group.My-lb-tg.arn
208 | target_id = aws_instance.My-web-instance2.id
209 | port = 80
210 |
211 | depends_on = [aws_instance.My-web-instance2]
212 | }
213 | #creating load balancer listener
214 | resource "aws_lb_listener" "My-listener" {
215 | load_balancer_arn = aws_lb.My-lb.arn
216 | port = "80"
217 | protocol = "HTTP"
218 |
219 | default_action {
220 | type = "forward"
221 | target_group_arn = aws_lb_target_group.My-lb-tg.arn
222 | }
223 | }
224 |
--------------------------------------------------------------------------------
/3-Tier-Architecture/Readme.md:
--------------------------------------------------------------------------------
1 | ## 3-Tier-Architecture Project with Terraform and AWS
2 |
3 | This repository showcases the implementation of a scalable and resilient 3-tier architecture using Terraform and AWS. The project aims to provide hands-on experience in deploying web applications with high availability and fault tolerance by leveraging the power of infrastructure as code.
4 |
5 | ## Workflow of the Project
6 |
7 |
8 |
9 |
10 | ### Features
11 | - **Tier 1: Presentation Tier**
12 | - The presentation tier serves as the user interface layer, responsible for handling user interactions and displaying information to the users.
13 | - In this architecture, I have implemented an **Apache Webserver** hosted on EC2 instances.
14 | - These instances are placed in **public subnets**, allowing them to be accessible from the internet.
15 | - An **Application Load Balancer** can be set up to distribute incoming traffic across these instances, ensuring scalability and high availability.
16 |
17 | - **Tier 2: Logic Tier**
18 | - The logic tier, also known as the application layer, is responsible for processing user requests, executing business logic, and coordinating data flow between the presentation and data tiers.
19 | - In this architecture, EC2 instances have been used to host the logic tier. These instances are placed in private subnets, ensuring that they are not directly accessible from the internet.
20 | - The private subnet setup allows secure communication between the logic tier, presentation tier, and data tier.
21 | - Security groups can be configured to control inbound and outbound traffic to the logic tier instances, ensuring secure network access.
22 | - The logic tier interacts with the RDS MySQL instance, which is provisioned in a private subnet, to perform data operations required by the application.
23 |
24 | - **Tier 3: Database Tier**
25 | - The data tier is responsible for storing and managing data required by the application.
26 | - In this architecture, I have made use of **MySQL** can be utilized as the database management system (DBMS).
27 | - An RDS instance can be provisioned in a **private subnet** to ensure secure access to the database.
28 | - The logic tier can interact with the MySQL database to perform data operations, such as retrieving, storing, and updating information.
29 | - The database can be secured using appropriate authentication mechanisms and access controls.
30 |
31 |
32 | ## Prerequisites
33 | To use this project, you need to have the following prerequisites:
34 |
35 | - AWS account with necessary permissions
36 | - Terraform installed on your local machine
37 |
38 | ## Getting Started
39 |
40 | 1. Clone this repository to your local machine.
41 | 2. Navigate to the project directory.
42 |
43 | ```bash
44 | $ cd 3-Tier-Architecture
45 | ```
46 |
47 | 3. Configure your AWS credentials by setting the environment variables or using the AWS CLI.
48 | 4. Update the `variables.tf` file with your desired configuration.
49 | 5. Initialize the Terraform project.
50 | ```bash
51 | $ terraform init
52 | ```
53 | 6. Review the execution plan.
54 | ```bash
55 | $ terraform plan
56 | ```
57 | 7. Deploy the architecture.
58 | ```bash
59 | $ terraform apply
60 | ```
61 |
62 | 8. Confirm the deployment by typing `yes` when prompted.
63 |
64 | ## Cleanup
65 | - To clean up and destroy the infrastructure created by this project, run the following command:
66 |
67 | ```bash
68 | $ terraform destroy
69 | ```
70 | Note: Be cautious as this action cannot be undone.
71 |
72 | Contributions are welcome! If you find any issues or have suggestions for improvements, feel free to open an issue or submit a pull request.
73 |
74 | License
75 | This project is licensed under the MIT License.
76 |
--------------------------------------------------------------------------------
/3-Tier-Architecture/alb.tf:
--------------------------------------------------------------------------------
1 | resource "aws_lb" "alb" {
2 | name = "test-lb-tf"
3 | internal = false
4 | load_balancer_type = "application"
5 | security_groups = [aws_security_group.allow_tls.id]
6 | subnets = [for subnet in aws_subnet.public : subnet.id]
7 |
8 | enable_deletion_protection = false
9 |
10 | tags = {
11 | Environment = "test"
12 | }
13 | }
14 |
15 | //Target Group
16 | resource "aws_lb_target_group" "albtg" {
17 | name = "tf-example-lb-tg"
18 | port = 80
19 | protocol = "HTTP"
20 | target_type = "instance"
21 | vpc_id = aws_vpc.main.id
22 |
23 | health_check {
24 | healthy_threshold = 3
25 | unhealthy_threshold = 10
26 | timeout = 5
27 | interval = 10
28 | path = "/"
29 | port = 80
30 | }
31 | }
32 |
33 | resource "aws_lb_target_group_attachment" "front_end" {
34 | target_group_arn = aws_lb_target_group.albtg.arn
35 | target_id = aws_instance.web[count.index].id
36 | port = 80
37 | count = 2
38 | }
39 |
40 | //Listener
41 | resource "aws_lb_listener" "albl" {
42 | load_balancer_arn = aws_lb.alb.arn
43 | port = "80"
44 | protocol = "HTTP"
45 |
46 | default_action {
47 | type = "forward"
48 | target_group_arn = aws_lb_target_group.albtg.arn
49 | }
50 | }
--------------------------------------------------------------------------------
/3-Tier-Architecture/ec2.tf:
--------------------------------------------------------------------------------
1 | resource "aws_instance" "web" {
2 | ami = "ami-0d52744d6551d851e"
3 | instance_type = "t2.micro"
4 | key_name = "mykeypair"
5 | subnet_id = aws_subnet.public[count.index].id
6 | vpc_security_group_ids = [aws_security_group.allow_tls.id]
7 | associate_public_ip_address = true
8 | count = 2
9 |
10 | tags = {
11 | Name = "WebServer"
12 | }
13 | }
14 |
15 | resource "aws_instance" "db" {
16 | ami = "ami-0d52744d6551d851e"
17 | instance_type = "t2.micro"
18 | key_name = "mykeypair"
19 | subnet_id = aws_subnet.private.id
20 | vpc_security_group_ids = [aws_security_group.allow_tls_db.id]
21 |
22 | tags = {
23 | Name = "DB Server"
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/3-Tier-Architecture/eip.tf:
--------------------------------------------------------------------------------
1 | resource "aws_eip" "myeip" {
2 | //instance = aws_instance.web.id
3 | vpc = true
4 | }
--------------------------------------------------------------------------------
/3-Tier-Architecture/internet-gw.tf:
--------------------------------------------------------------------------------
1 | resource "aws_internet_gateway" "gw" {
2 | vpc_id = aws_vpc.main.id
3 |
4 | tags = {
5 | Name = "main"
6 | }
7 | }
--------------------------------------------------------------------------------
/3-Tier-Architecture/natgw.tf:
--------------------------------------------------------------------------------
1 | resource "aws_nat_gateway" "natgw" {
2 | allocation_id = aws_eip.myeip.id
3 | subnet_id = aws_subnet.public[0].id
4 |
5 | tags = {
6 | Name = "gw NAT"
7 | }
8 |
9 | # To ensure proper ordering, it is recommended to add an explicit dependency
10 | # on the Internet Gateway for the VPC.
11 | depends_on = [aws_internet_gateway.gw]
12 | }
--------------------------------------------------------------------------------
/3-Tier-Architecture/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/aws"
5 | version = "4.20.1"
6 | }
7 | }
8 | }
9 |
10 | provider "aws" {
11 | region = "ap-south-1"
12 | }
--------------------------------------------------------------------------------
/3-Tier-Architecture/route-tb.tf:
--------------------------------------------------------------------------------
1 | resource "aws_route_table" "rtb" {
2 | vpc_id = aws_vpc.main.id
3 |
4 | route {
5 | cidr_block = "0.0.0.0/0"
6 | gateway_id = aws_internet_gateway.gw.id
7 | }
8 |
9 | tags = {
10 | Name = "MyRoute"
11 | }
12 | }
13 |
14 | resource "aws_route_table_association" "a" {
15 | subnet_id = aws_subnet.public[count.index].id
16 | route_table_id = aws_route_table.rtb.id
17 | count = 2
18 | }
19 |
20 | //Adding NAT Gateway into the default main route table
21 | resource "aws_default_route_table" "dftb" {
22 | default_route_table_id = aws_vpc.main.default_route_table_id
23 |
24 | route {
25 | cidr_block = "0.0.0.0/0"
26 | gateway_id = aws_nat_gateway.natgw.id
27 | }
28 |
29 | tags = {
30 | Name = "dftb"
31 | }
32 | }
--------------------------------------------------------------------------------
/3-Tier-Architecture/sg.tf:
--------------------------------------------------------------------------------
1 | resource "aws_security_group" "allow_tls" {
2 | name = "allow_tls"
3 | description = "Allow TLS inbound traffic"
4 | vpc_id = aws_vpc.main.id
5 |
6 | ingress {
7 | description = "TLS from VPC"
8 | from_port = 22
9 | to_port = 22
10 | protocol = "tcp"
11 | cidr_blocks = ["0.0.0.0/0"]
12 | }
13 |
14 | ingress {
15 | description = "TLS from VPC"
16 | from_port = 80
17 | to_port = 80
18 | protocol = "tcp"
19 | cidr_blocks = ["0.0.0.0/0"]
20 | }
21 |
22 | egress {
23 | from_port = 0
24 | to_port = 0
25 | protocol = "-1"
26 | cidr_blocks = ["0.0.0.0/0"]
27 | }
28 |
29 | tags = {
30 | Name = "allow_tls"
31 | }
32 | }
33 |
34 | resource "aws_security_group" "allow_tls_db" {
35 | name = "allow_tls_db"
36 | description = "Allow TLS inbound traffic"
37 | vpc_id = aws_vpc.main.id
38 |
39 | ingress {
40 | description = "TLS from VPC"
41 | from_port = 22
42 | to_port = 22
43 | protocol = "tcp"
44 | cidr_blocks = ["0.0.0.0/0"]
45 | }
46 |
47 | ingress {
48 | description = "TLS from VPC"
49 | from_port = 3306
50 | to_port = 3306
51 | protocol = "tcp"
52 | cidr_blocks = ["0.0.0.0/0"]
53 | }
54 |
55 | egress {
56 | from_port = 0
57 | to_port = 0
58 | protocol = "-1"
59 | cidr_blocks = ["0.0.0.0/0"]
60 | }
61 |
62 | tags = {
63 | Name = "allow_tls_db"
64 | }
65 | }
--------------------------------------------------------------------------------
/3-Tier-Architecture/subnet.tf:
--------------------------------------------------------------------------------
1 | resource "aws_subnet" "public" {
2 | vpc_id = aws_vpc.main.id
3 | cidr_block = var.cidr[count.index]
4 | availability_zone = var.az[count.index]
5 | count = 2
6 |
7 | tags = {
8 | Name = "public-sub"
9 | }
10 | }
11 |
12 | resource "aws_subnet" "private" {
13 | vpc_id = aws_vpc.main.id
14 | cidr_block = "10.0.3.0/24"
15 | availability_zone = "ap-northeast-1b"
16 |
17 | tags = {
18 | Name = "private-sub3"
19 | }
20 | }
21 |
22 | data "aws_subnets" "sid" {
23 | filter {
24 | name = "vpc-id"
25 | values = [aws_vpc.main.id]
26 | }
27 |
28 | tags = {
29 | Tier = "Public"
30 | }
31 | }
--------------------------------------------------------------------------------
/3-Tier-Architecture/variable.tf:
--------------------------------------------------------------------------------
1 | variable "cidr" {
2 | type = list
3 | default = ["10.0.1.0/24","10.0.2.0/24"]
4 | }
5 |
6 | variable "az" {
7 | type = list
8 | default = ["ap-northeast-1a","ap-northeast-1c"]
9 | }
10 |
--------------------------------------------------------------------------------
/3-Tier-Architecture/vpc.tf:
--------------------------------------------------------------------------------
1 | resource "aws_vpc" "main" {
2 | cidr_block = "10.0.0.0/16"
3 |
4 | tags = {
5 | Name = "CustomVPC"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/backend-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: api
5 | namespace: workshop
6 | labels:
7 | role: api
8 | env: demo
9 | spec:
10 | replicas: 1
11 | strategy:
12 | type: RollingUpdate
13 | rollingUpdate:
14 | maxSurge: 1
15 | maxUnavailable: 25%
16 | selector:
17 | matchLabels:
18 | role: api
19 | template:
20 | metadata:
21 | labels:
22 | role: api
23 | spec:
24 | containers:
25 | - name: api
26 | image: public.ecr.aws/w8u5e4v2/workshop-backend:v1
27 | imagePullPolicy: Always
28 | env:
29 | - name: MONGO_CONN_STR
30 | value: mongodb://mongodb-svc:27017/todo?directConnection=true
31 | - name: MONGO_USERNAME
32 | valueFrom:
33 | secretKeyRef:
34 | name: mongo-sec
35 | key: username
36 | - name: MONGO_PASSWORD
37 | valueFrom:
38 | secretKeyRef:
39 | name: mongo-sec
40 | key: password
41 | ports:
42 | - containerPort: 8080
43 | livenessProbe:
44 | httpGet:
45 | path: /ok
46 | port: 8080
47 | initialDelaySeconds: 2
48 | periodSeconds: 5
49 | readinessProbe:
50 | httpGet:
51 | path: /ok
52 | port: 8080
53 | initialDelaySeconds: 5
54 | periodSeconds: 5
55 | successThreshold: 1
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/backend-service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: api
6 | namespace: workshop
7 | spec:
8 | ports:
9 | - port: 8080
10 | protocol: TCP
11 | type: ClusterIP
12 | selector:
13 | role: api
14 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/cron.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1beta1
2 | kind: CronJob
3 | metadata:
4 | name: example-cronjob
5 | spec:
6 | schedule: "*/5 * * * *"
7 | jobTemplate:
8 | spec:
9 | template:
10 | spec:
11 | containers:
12 | - name: job-container
13 | image: your-image:tag
14 | command: ["echo", "Hello, Kubernetes CronJob!"]
15 | restartPolicy: OnFailure
16 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/daemonset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: example-daemonset
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: my-app
9 | template:
10 | metadata:
11 | labels:
12 | app: my-app
13 | spec:
14 | containers:
15 | - name: my-app-container
16 | image: your-image:tag
17 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/frontend-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: frontend
5 | namespace: workshop
6 | labels:
7 | role: frontend
8 | env: demo
9 | spec:
10 | replicas: 1
11 | strategy:
12 | type: RollingUpdate
13 | rollingUpdate:
14 | maxSurge: 1
15 | maxUnavailable: 25%
16 | selector:
17 | matchLabels:
18 | role: frontend
19 | template:
20 | metadata:
21 | labels:
22 | role: frontend
23 | spec:
24 | containers:
25 | - name: frontend
26 | image: public.ecr.aws/w8u5e4v2/workshop-frontend:v1
27 | imagePullPolicy: Always
28 | env:
29 | - name: REACT_APP_BACKEND_URL
30 | value: "http://app.sandipdas.in/api/tasks" #$API_ELB_PUBLIC_FQDN #add your API_Load_Balancer DNS manually here if app does not run
31 | ports:
32 | - containerPort: 3000
33 | # livenessProbe:
34 | # httpGet:
35 | # path: /
36 | # port: 3000
37 | # initialDelaySeconds: 2
38 | # periodSeconds: 5
39 | # readinessProbe:
40 | # httpGet:
41 | # path: /
42 | # port: 3000
43 | # initialDelaySeconds: 5
44 | # periodSeconds: 5
45 | # successThreshold: 1
46 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/frontend-service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: frontend
6 | namespace: workshop
7 | spec:
8 | ports:
9 | - port: 3000
10 | protocol: TCP
11 | type: ClusterIP
12 | selector:
13 | role: frontend
14 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/full_stack_lb.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: mainlb
6 | namespace: workshop
7 | annotations:
8 | alb.ingress.kubernetes.io/group.name: demo-lb
9 | alb.ingress.kubernetes.io/scheme: internet-facing
10 | alb.ingress.kubernetes.io/target-type: ip
11 | #Uncommnt below to enable https, generate certificate from AWS ACM
12 | alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
13 | alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-west-2:120717539064:certificate/befc6ed9-44ae-41a1-ba96-0d562a369f64
14 | spec:
15 | ingressClassName: alb
16 | rules:
17 | - host: app.sandipdas.in
18 | http:
19 | paths:
20 | - path: /api
21 | pathType: Prefix
22 | backend:
23 | service:
24 | name: api
25 | port:
26 | number: 8080
27 | - path: /v2/api
28 | pathType: Prefix
29 | backend:
30 | service:
31 | name: apiv2
32 | port:
33 | number: 8080
34 | - path: /
35 | pathType: Prefix
36 | backend:
37 | service:
38 | name: frontend
39 | port:
40 | number: 3000
41 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/hpa.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: autoscaling/v1
3 | kind: HorizontalPodAutoscaler
4 | metadata:
5 | name: api
6 | namespace: shepherd
7 | spec:
8 | minReplicas: 2
9 | maxReplicas: 10
10 | scaleTargetRef:
11 | apiVersion: apps/v1
12 | kind: Deployment
13 | name: api
14 | targetCPUUtilizationPercentage: 50
15 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: example-job
5 | spec:
6 | template:
7 | metadata:
8 | name: example-pod
9 | spec:
10 | containers:
11 | - name: job-container
12 | image: your-image:tag
13 | command: ["echo", "Hello, Kubernetes Job!"]
14 | restartPolicy: Never
15 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/mongo/deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | namespace: workshop
5 | name: mongodb
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: mongodb
11 | template:
12 | metadata:
13 | labels:
14 | app: mongodb
15 | spec:
16 | containers:
17 | - name: mongodb
18 | image: mongo:4.4.6
19 | command:
20 | - "numactl"
21 | - "--interleave=all"
22 | - "mongod"
23 | - "--wiredTigerCacheSizeGB"
24 | - "0.1"
25 | - "--bind_ip"
26 | - "0.0.0.0"
27 | ports:
28 | - containerPort: 27017
29 | resources:
30 | requests:
31 | memory: "512Mi"
32 | cpu: "250m"
33 | limits:
34 | memory: "1Gi"
35 | cpu: "500m"
36 | env:
37 | - name: MONGO_INITDB_ROOT_USERNAME
38 | valueFrom:
39 | secretKeyRef:
40 | name: mongo-sec
41 | key: username
42 | - name: MONGO_INITDB_ROOT_PASSWORD
43 | valueFrom:
44 | secretKeyRef:
45 | name: mongo-sec
46 | key: password
47 | # volumeMounts:
48 | # - name: mongo-volume
49 | # mountPath: /data/db
50 | # volumes:
51 | # - name: mongo-volume
52 | # persistentVolumeClaim:
53 | # claimName: mongo-volume-claim
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/mongo/secrets.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | namespace: workshop
5 | name: mongo-sec
6 | type: Opaque
7 | data:
8 | password: cGFzc3dvcmQxMjM= #password123
9 | username: YWRtaW4= #admin
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/mongo/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | namespace: workshop
5 | name: mongodb-svc
6 | spec:
7 | selector:
8 | app: mongodb
9 | ports:
10 | - name: mongodb-svc
11 | protocol: TCP
12 | port: 27017
13 | targetPort: 27017
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/monitoring-lb.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: monitoringlb
6 | namespace: prometheus
7 | annotations:
8 | alb.ingress.kubernetes.io/group.name: demo-lb
9 | alb.ingress.kubernetes.io/scheme: internet-facing
10 | alb.ingress.kubernetes.io/target-type: ip
11 | #Uncommnt below to enable https, generate certificate from AWS ACM
12 | alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
13 | alb.ingress.kubernetes.io/ssl-redirect: '443'
14 | alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-west-2:120717539064:certificate/befc6ed9-44ae-41a1-ba96-0d562a369f64
15 | spec:
16 | ingressClassName: alb
17 | rules:
18 | - host: monitor.sandipdas.in
19 | http:
20 | paths:
21 | - path: /
22 | pathType: Prefix
23 | backend:
24 | service:
25 | name: prometheus-grafana
26 | port:
27 | number: 80
28 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/stateful.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: example-statefulset
5 | spec:
6 | replicas: 3
7 | serviceName: my-stateful-service
8 | selector:
9 | matchLabels:
10 | app: my-app
11 | template:
12 | metadata:
13 | labels:
14 | app: my-app
15 | spec:
16 | containers:
17 | - name: my-app-container
18 | image: your-image:tag
19 | ports:
20 | - containerPort: 80
21 | volumeClaimTemplates:
22 | - metadata:
23 | name: data
24 | spec:
25 | accessModes: [ "ReadWriteOnce" ]
26 | resources:
27 | requests:
28 | storage: 1Gi
29 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Manifests/values.yaml:
--------------------------------------------------------------------------------
1 | rbac:
2 | create: true
3 |
4 | podSecurityPolicy:
5 | enabled: false
6 |
7 | imagePullSecrets: []
8 | # - name: "image-pull-secret"
9 |
10 | ## Define serviceAccount names for components. Defaults to component's fully qualified name.
11 | ##
12 | serviceAccounts:
13 | server:
14 | create: true
15 | name: ""
16 | annotations: {}
17 |
18 | ## Monitors ConfigMap changes and POSTs to a URL
19 | ## Ref: https://github.com/jimmidyson/configmap-reload
20 | ##
21 | configmapReload:
22 | prometheus:
23 | ## If false, the configmap-reload container will not be deployed
24 | ##
25 | enabled: true
26 |
27 | ## configmap-reload container name
28 | ##
29 | name: configmap-reload
30 |
31 | ## configmap-reload container image
32 | ##
33 | image:
34 | repository: jimmidyson/configmap-reload
35 | tag: v0.8.0
36 | # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value).
37 | digest: ""
38 | pullPolicy: IfNotPresent
39 |
40 | # containerPort: 9533
41 |
42 | ## Additional configmap-reload container arguments
43 | ##
44 | extraArgs: {}
45 | ## Additional configmap-reload volume directories
46 | ##
47 | extraVolumeDirs: []
48 |
49 |
50 | ## Additional configmap-reload mounts
51 | ##
52 | extraConfigmapMounts: []
53 | # - name: prometheus-alerts
54 | # mountPath: /etc/alerts.d
55 | # subPath: ""
56 | # configMap: prometheus-alerts
57 | # readOnly: true
58 |
59 | ## Security context to be added to configmap-reload container
60 | containerSecurityContext: {}
61 |
62 | ## configmap-reload resource requests and limits
63 | ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
64 | ##
65 | resources: {}
66 |
67 | server:
68 | ## Prometheus server container name
69 | ##
70 | name: server
71 |
72 | ## Use a ClusterRole (and ClusterRoleBinding)
73 | ## - If set to false - we define a RoleBinding in the defined namespaces ONLY
74 | ##
75 | ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled.
76 | ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide.
77 | ##
78 | ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus.
79 | ##
80 | # useExistingClusterRoleName: nameofclusterrole
81 |
82 | ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges.
83 | # namespaces:
84 | # - yournamespace
85 |
86 | # sidecarContainers - add more containers to prometheus server
87 | # Key/Value where Key is the sidecar `- name: `
88 | # Example:
89 | # sidecarContainers:
90 | # webserver:
91 | # image: nginx
92 | sidecarContainers: {}
93 |
94 | # sidecarTemplateValues - context to be used in template for sidecarContainers
95 | # Example:
96 | # sidecarTemplateValues: *your-custom-globals
97 | # sidecarContainers:
98 | # webserver: |-
99 | # {{ include "webserver-container-template" . }}
100 | # Template for `webserver-container-template` might looks like this:
101 | # image: "{{ .Values.server.sidecarTemplateValues.repository }}:{{ .Values.server.sidecarTemplateValues.tag }}"
102 | # ...
103 | #
104 | sidecarTemplateValues: {}
105 |
106 | ## Prometheus server container image
107 | ##
108 | image:
109 | repository: quay.io/prometheus/prometheus
110 | # if not set appVersion field from Chart.yaml is used
111 | tag: ""
112 | # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value).
113 | digest: ""
114 | pullPolicy: IfNotPresent
115 |
116 | ## prometheus server priorityClassName
117 | ##
118 | priorityClassName: ""
119 |
120 | ## EnableServiceLinks indicates whether information about services should be injected
121 | ## into pod's environment variables, matching the syntax of Docker links.
122 | ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0.
123 | ##
124 | enableServiceLinks: true
125 |
126 | ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
127 | ## so that the various internal URLs are still able to access as they are in the default case.
128 | ## (Optional)
129 | prefixURL: ""
130 |
131 | ## External URL which can access prometheus
132 | ## Maybe same with Ingress host name
133 | baseURL: ""
134 |
135 | ## Additional server container environment variables
136 | ##
137 | ## You specify this manually like you would a raw deployment manifest.
138 | ## This means you can bind in environment variables from secrets.
139 | ##
140 | ## e.g. static environment variable:
141 | ## - name: DEMO_GREETING
142 | ## value: "Hello from the environment"
143 | ##
144 | ## e.g. secret environment variable:
145 | ## - name: USERNAME
146 | ## valueFrom:
147 | ## secretKeyRef:
148 | ## name: mysecret
149 | ## key: username
150 | env: []
151 |
152 | # List of flags to override default parameters, e.g:
153 | # - --enable-feature=agent
154 | # - --storage.agent.retention.max-time=30m
155 | defaultFlagsOverride: []
156 |
157 | extraFlags:
158 | - web.enable-lifecycle
159 | ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as
160 | ## deleting time series. This is disabled by default.
161 | # - web.enable-admin-api
162 | ##
163 | ## storage.tsdb.no-lockfile flag controls BD locking
164 | # - storage.tsdb.no-lockfile
165 | ##
166 | ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL)
167 | # - storage.tsdb.wal-compression
168 |
169 | ## Path to a configuration file on prometheus server container FS
170 | configPath: /etc/config/prometheus.yml
171 |
172 | ### The data directory used by prometheus to set --storage.tsdb.path
173 | ### When empty server.persistentVolume.mountPath is used instead
174 | storagePath: ""
175 |
176 | global:
177 | ## How frequently to scrape targets by default
178 | ##
179 | scrape_interval: 1m
180 | ## How long until a scrape request times out
181 | ##
182 | scrape_timeout: 10s
183 | ## How frequently to evaluate rules
184 | ##
185 | evaluation_interval: 1m
186 | ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write
187 | ##
188 | remoteWrite: []
189 | ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read
190 | ##
191 | remoteRead: []
192 |
193 | ## Custom HTTP headers for Liveness/Readiness/Startup Probe
194 | ##
195 | ## Useful for providing HTTP Basic Auth to healthchecks
196 | probeHeaders: []
197 | # - name: "Authorization"
198 | # value: "Bearer ABCDEabcde12345"
199 |
200 | ## Additional Prometheus server container arguments
201 | ##
202 | extraArgs: {}
203 |
204 | ## Additional InitContainers to initialize the pod
205 | ##
206 | extraInitContainers: []
207 |
208 | ## Additional Prometheus server Volume mounts
209 | ##
210 | extraVolumeMounts: []
211 |
212 | ## Additional Prometheus server Volumes
213 | ##
214 | extraVolumes: []
215 |
216 | ## Additional Prometheus server hostPath mounts
217 | ##
218 | extraHostPathMounts: []
219 | # - name: certs-dir
220 | # mountPath: /etc/kubernetes/certs
221 | # subPath: ""
222 | # hostPath: /etc/kubernetes/certs
223 | # readOnly: true
224 |
225 | extraConfigmapMounts: []
226 | # - name: certs-configmap
227 | # mountPath: /prometheus
228 | # subPath: ""
229 | # configMap: certs-configmap
230 | # readOnly: true
231 |
232 | ## Additional Prometheus server Secret mounts
233 | # Defines additional mounts with secrets. Secrets must be manually created in the namespace.
234 | extraSecretMounts: []
235 | # - name: secret-files
236 | # mountPath: /etc/secrets
237 | # subPath: ""
238 | # secretName: prom-secret-files
239 | # readOnly: true
240 |
241 | ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}}
242 | ## Defining configMapOverrideName will cause templates/server-configmap.yaml
243 | ## to NOT generate a ConfigMap resource
244 | ##
245 | configMapOverrideName: ""
246 |
247 | ## Extra labels for Prometheus server ConfigMap (ConfigMap that holds serverFiles)
248 | extraConfigmapLabels: {}
249 |
250 | ingress:
251 | ## If true, Prometheus server Ingress will be created
252 | ##
253 | enabled: false
254 |
255 | # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
256 | # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
257 | # ingressClassName: nginx
258 |
259 | ## Prometheus server Ingress annotations
260 | ##
261 | annotations: {}
262 | # kubernetes.io/ingress.class: nginx
263 | # kubernetes.io/tls-acme: 'true'
264 |
265 | ## Prometheus server Ingress additional labels
266 | ##
267 | extraLabels: {}
268 |
269 | ## Prometheus server Ingress hostnames with optional path
270 | ## Must be provided if Ingress is enabled
271 | ##
272 | hosts: []
273 | # - prometheus.domain.com
274 | # - domain.com/prometheus
275 |
276 | path: /
277 |
278 | # pathType is only for k8s >= 1.18
279 | pathType: Prefix
280 |
281 | ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
282 | extraPaths: []
283 | # - path: /*
284 | # backend:
285 | # serviceName: ssl-redirect
286 | # servicePort: use-annotation
287 |
288 | ## Prometheus server Ingress TLS configuration
289 | ## Secrets must be manually created in the namespace
290 | ##
291 | tls: []
292 | # - secretName: prometheus-server-tls
293 | # hosts:
294 | # - prometheus.domain.com
295 |
296 | ## Server Deployment Strategy type
297 | strategy:
298 | type: Recreate
299 |
300 | ## hostAliases allows adding entries to /etc/hosts inside the containers
301 | hostAliases: []
302 | # - ip: "127.0.0.1"
303 | # hostnames:
304 | # - "example.com"
305 |
306 | ## Node tolerations for server scheduling to nodes with taints
307 | ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
308 | ##
309 | tolerations: []
310 | # - key: "key"
311 | # operator: "Equal|Exists"
312 | # value: "value"
313 | # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
314 |
315 | ## Node labels for Prometheus server pod assignment
316 | ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
317 | ##
318 | nodeSelector: {}
319 |
320 | ## Pod affinity
321 | ##
322 | affinity: {}
323 |
324 | ## PodDisruptionBudget settings
325 | ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
326 | ##
327 | podDisruptionBudget:
328 | enabled: false
329 | maxUnavailable: 1
330 |
331 | ## Use an alternate scheduler, e.g. "stork".
332 | ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
333 | ##
334 | # schedulerName:
335 |
336 | persistentVolume:
337 | ## If true, Prometheus server will create/use a Persistent Volume Claim
338 | ## If false, use emptyDir
339 | ##
340 | enabled: true
341 |
342 | ## Prometheus server data Persistent Volume access modes
343 | ## Must match those of existing PV or dynamic provisioner
344 | ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
345 | ##
346 | accessModes:
347 | - ReadWriteOnce
348 |
349 | ## Prometheus server data Persistent Volume labels
350 | ##
351 | labels: {}
352 |
353 | ## Prometheus server data Persistent Volume annotations
354 | ##
355 | annotations: {}
356 |
357 | ## Prometheus server data Persistent Volume existing claim name
358 | ## Requires server.persistentVolume.enabled: true
359 | ## If defined, PVC must be created manually before volume will be bound
360 | existingClaim: ""
361 |
362 | ## Prometheus server data Persistent Volume mount root path
363 | ##
364 | mountPath: /data
365 |
366 | ## Prometheus server data Persistent Volume size
367 | ##
368 | size: 8Gi
369 |
370 | ## Prometheus server data Persistent Volume Storage Class
371 | ## If defined, storageClassName:
372 | ## If set to "-", storageClassName: "", which disables dynamic provisioning
373 | ## If undefined (the default) or set to null, no storageClassName spec is
374 | ## set, choosing the default provisioner. (gp2 on AWS, standard on
375 | ## GKE, AWS & OpenStack)
376 | ##
377 | # storageClass: "-"
378 |
379 | ## Prometheus server data Persistent Volume Binding Mode
380 | ## If defined, volumeBindingMode:
381 | ## If undefined (the default) or set to null, no volumeBindingMode spec is
382 | ## set, choosing the default mode.
383 | ##
384 | # volumeBindingMode: ""
385 |
386 | ## Subdirectory of Prometheus server data Persistent Volume to mount
387 | ## Useful if the volume's root directory is not empty
388 | ##
389 | subPath: ""
390 |
391 | ## Persistent Volume Claim Selector
392 | ## Useful if Persistent Volumes have been provisioned in advance
393 | ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector
394 | ##
395 | # selector:
396 | # matchLabels:
397 | # release: "stable"
398 | # matchExpressions:
399 | # - { key: environment, operator: In, values: [ dev ] }
400 |
401 | ## Persistent Volume Name
402 | ## Useful if Persistent Volumes have been provisioned in advance and you want to use a specific one
403 | ##
404 | # volumeName: ""
405 |
406 | emptyDir:
407 | ## Prometheus server emptyDir volume size limit
408 | ##
409 | sizeLimit: ""
410 |
411 | ## Annotations to be added to Prometheus server pods
412 | ##
413 | podAnnotations: {}
414 | # iam.amazonaws.com/role: prometheus
415 |
416 | ## Labels to be added to Prometheus server pods
417 | ##
418 | podLabels: {}
419 |
420 | ## Prometheus AlertManager configuration
421 | ##
422 | alertmanagers: []
423 |
424 | ## Specify if a Pod Security Policy for node-exporter must be created
425 | ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
426 | ##
427 | podSecurityPolicy:
428 | annotations: {}
429 | ## Specify pod annotations
430 | ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
431 | ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
432 | ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
433 | ##
434 | # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
435 | # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
436 | # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
437 |
438 | ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below)
439 | ##
440 | replicaCount: 1
441 |
442 | ## Annotations to be added to deployment
443 | ##
444 | deploymentAnnotations: {}
445 |
446 | statefulSet:
447 | ## If true, use a statefulset instead of a deployment for pod management.
448 | ## This allows to scale replicas to more than 1 pod
449 | ##
450 | enabled: false
451 |
452 | annotations: {}
453 | labels: {}
454 | podManagementPolicy: OrderedReady
455 |
456 | ## Alertmanager headless service to use for the statefulset
457 | ##
458 | headless:
459 | annotations: {}
460 | labels: {}
461 | servicePort: 80
462 | ## Enable gRPC port on service to allow auto discovery with thanos-querier
463 | gRPC:
464 | enabled: false
465 | servicePort: 10901
466 | # nodePort: 10901
467 |
468 | ## Prometheus server readiness and liveness probe initial delay and timeout
469 | ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
470 | ##
471 | tcpSocketProbeEnabled: false
472 | probeScheme: HTTP
473 | readinessProbeInitialDelay: 30
474 | readinessProbePeriodSeconds: 5
475 | readinessProbeTimeout: 4
476 | readinessProbeFailureThreshold: 3
477 | readinessProbeSuccessThreshold: 1
478 | livenessProbeInitialDelay: 30
479 | livenessProbePeriodSeconds: 15
480 | livenessProbeTimeout: 10
481 | livenessProbeFailureThreshold: 3
482 | livenessProbeSuccessThreshold: 1
483 | startupProbe:
484 | enabled: false
485 | periodSeconds: 5
486 | failureThreshold: 30
487 | timeoutSeconds: 10
488 |
489 | ## Prometheus server resource requests and limits
490 | ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
491 | ##
492 | resources: {}
493 | # limits:
494 | # cpu: 500m
495 | # memory: 512Mi
496 | # requests:
497 | # cpu: 500m
498 | # memory: 512Mi
499 |
500 | # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
501 | # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
502 | ##
503 | hostNetwork: false
504 |
505 | # When hostNetwork is enabled, this will set to ClusterFirstWithHostNet automatically
506 | dnsPolicy: ClusterFirst
507 |
508 | # Use hostPort
509 | # hostPort: 9090
510 |
511 | ## Vertical Pod Autoscaler config
512 | ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
513 | verticalAutoscaler:
514 | ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs)
515 | enabled: false
516 | # updateMode: "Auto"
517 | # containerPolicies:
518 | # - containerName: 'prometheus-server'
519 |
520 | # Custom DNS configuration to be added to prometheus server pods
521 | dnsConfig: {}
522 | # nameservers:
523 | # - 1.2.3.4
524 | # searches:
525 | # - ns1.svc.cluster-domain.example
526 | # - my.dns.search.suffix
527 | # options:
528 | # - name: ndots
529 | # value: "2"
530 | # - name: edns0
531 |
532 | ## Security context to be added to server pods
533 | ##
534 | securityContext:
535 | runAsUser: 65534
536 | runAsNonRoot: true
537 | runAsGroup: 65534
538 | fsGroup: 65534
539 |
540 | ## Security context to be added to server container
541 | ##
542 | containerSecurityContext: {}
543 |
544 | service:
545 | ## If false, no Service will be created for the Prometheus server
546 | ##
547 | enabled: true
548 |
549 | annotations: {}
550 | labels: {}
551 | clusterIP: ""
552 |
553 | ## List of IP addresses at which the Prometheus server service is available
554 | ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
555 | ##
556 | externalIPs: []
557 |
558 | loadBalancerIP: ""
559 | loadBalancerSourceRanges: []
560 | servicePort: 80
561 | sessionAffinity: None
562 | type: ClusterIP
563 |
564 | ## Enable gRPC port on service to allow auto discovery with thanos-querier
565 | gRPC:
566 | enabled: false
567 | servicePort: 10901
568 | # nodePort: 10901
569 |
570 | ## If using a statefulSet (statefulSet.enabled=true), configure the
571 | ## service to connect to a specific replica to have a consistent view
572 | ## of the data.
573 | statefulsetReplica:
574 | enabled: false
575 | replica: 0
576 |
577 | ## Prometheus server pod termination grace period
578 | ##
579 | terminationGracePeriodSeconds: 300
580 |
581 | ## Prometheus data retention period (default if not specified is 15 days)
582 | ##
583 | retention: "15d"
584 |
585 | ## Prometheus server ConfigMap entries for rule files (allow prometheus labels interpolation)
586 | ruleFiles: {}
587 |
588 | ## Prometheus server ConfigMap entries
589 | ##
590 | serverFiles:
591 | ## Alerts configuration
592 | ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/
593 | alerting_rules.yml: {}
594 | # groups:
595 | # - name: Instances
596 | # rules:
597 | # - alert: InstanceDown
598 | # expr: up == 0
599 | # for: 5m
600 | # labels:
601 | # severity: page
602 | # annotations:
603 | # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.'
604 | # summary: 'Instance {{ $labels.instance }} down'
605 | ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml
606 | alerts: {}
607 |
608 | ## Records configuration
609 | ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/
610 | recording_rules.yml: {}
611 | ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml
612 | rules: {}
613 |
614 | prometheus.yml:
615 | rule_files:
616 | - /etc/config/recording_rules.yml
617 | - /etc/config/alerting_rules.yml
618 | ## Below two files are DEPRECATED will be removed from this default values file
619 | - /etc/config/rules
620 | - /etc/config/alerts
621 |
622 | scrape_configs:
623 | - job_name: prometheus
624 | static_configs:
625 | - targets:
626 | - localhost:9090
627 |
628 | # A scrape configuration for running Prometheus on a Kubernetes cluster.
629 | # This uses separate scrape configs for cluster components (i.e. API server, node)
630 | # and services to allow each to use different authentication configs.
631 | #
632 | # Kubernetes labels will be added as Prometheus labels on metrics via the
633 | # `labelmap` relabeling action.
634 |
635 | # Scrape config for API servers.
636 | #
637 | # Kubernetes exposes API servers as endpoints to the default/kubernetes
638 | # service so this uses `endpoints` role and uses relabelling to only keep
639 | # the endpoints associated with the default/kubernetes service using the
640 | # default named port `https`. This works for single API server deployments as
641 | # well as HA API server deployments.
642 | - job_name: 'kubernetes-apiservers'
643 |
644 | kubernetes_sd_configs:
645 | - role: endpoints
646 |
647 | # Default to scraping over https. If required, just disable this or change to
648 | # `http`.
649 | scheme: https
650 |
651 | # This TLS & bearer token file config is used to connect to the actual scrape
652 | # endpoints for cluster components. This is separate to discovery auth
653 | # configuration because discovery & scraping are two separate concerns in
654 | # Prometheus. The discovery auth config is automatic if Prometheus runs inside
655 | # the cluster. Otherwise, more config options have to be provided within the
656 | # .
657 | tls_config:
658 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
659 | # If your node certificates are self-signed or use a different CA to the
660 | # master CA, then disable certificate verification below. Note that
661 | # certificate verification is an integral part of a secure infrastructure
662 | # so this should only be disabled in a controlled environment. You can
663 | # disable certificate verification by uncommenting the line below.
664 | #
665 | insecure_skip_verify: true
666 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
667 |
668 | # Keep only the default/kubernetes service endpoints for the https port. This
669 | # will add targets for each API server which Kubernetes adds an endpoint to
670 | # the default/kubernetes service.
671 | relabel_configs:
672 | - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
673 | action: keep
674 | regex: default;kubernetes;https
675 |
676 | - job_name: 'kubernetes-nodes'
677 |
678 | # Default to scraping over https. If required, just disable this or change to
679 | # `http`.
680 | scheme: https
681 |
682 | # This TLS & bearer token file config is used to connect to the actual scrape
683 | # endpoints for cluster components. This is separate to discovery auth
684 | # configuration because discovery & scraping are two separate concerns in
685 | # Prometheus. The discovery auth config is automatic if Prometheus runs inside
686 | # the cluster. Otherwise, more config options have to be provided within the
687 | # .
688 | tls_config:
689 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
690 | # If your node certificates are self-signed or use a different CA to the
691 | # master CA, then disable certificate verification below. Note that
692 | # certificate verification is an integral part of a secure infrastructure
693 | # so this should only be disabled in a controlled environment. You can
694 | # disable certificate verification by uncommenting the line below.
695 | #
696 | insecure_skip_verify: true
697 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
698 |
699 | kubernetes_sd_configs:
700 | - role: node
701 |
702 | relabel_configs:
703 | - action: labelmap
704 | regex: __meta_kubernetes_node_label_(.+)
705 | - target_label: __address__
706 | replacement: kubernetes.default.svc:443
707 | - source_labels: [__meta_kubernetes_node_name]
708 | regex: (.+)
709 | target_label: __metrics_path__
710 | replacement: /api/v1/nodes/$1/proxy/metrics
711 |
712 |
713 | - job_name: 'kubernetes-nodes-cadvisor'
714 |
715 | # Default to scraping over https. If required, just disable this or change to
716 | # `http`.
717 | scheme: https
718 |
719 | # This TLS & bearer token file config is used to connect to the actual scrape
720 | # endpoints for cluster components. This is separate to discovery auth
721 | # configuration because discovery & scraping are two separate concerns in
722 | # Prometheus. The discovery auth config is automatic if Prometheus runs inside
723 | # the cluster. Otherwise, more config options have to be provided within the
724 | # .
725 | tls_config:
726 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
727 | # If your node certificates are self-signed or use a different CA to the
728 | # master CA, then disable certificate verification below. Note that
729 | # certificate verification is an integral part of a secure infrastructure
730 | # so this should only be disabled in a controlled environment. You can
731 | # disable certificate verification by uncommenting the line below.
732 | #
733 | insecure_skip_verify: true
734 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
735 |
736 | kubernetes_sd_configs:
737 | - role: node
738 |
739 | # This configuration will work only on kubelet 1.7.3+
740 | # As the scrape endpoints for cAdvisor have changed
741 | # if you are using older version you need to change the replacement to
742 | # replacement: /api/v1/nodes/$1:4194/proxy/metrics
743 | # more info here https://github.com/coreos/prometheus-operator/issues/633
744 | relabel_configs:
745 | - action: labelmap
746 | regex: __meta_kubernetes_node_label_(.+)
747 | - target_label: __address__
748 | replacement: kubernetes.default.svc:443
749 | - source_labels: [__meta_kubernetes_node_name]
750 | regex: (.+)
751 | target_label: __metrics_path__
752 | replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
753 |
754 | # Metric relabel configs to apply to samples before ingestion.
755 | # [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
756 | # metric_relabel_configs:
757 | # - action: labeldrop
758 | # regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
759 |
760 | # Scrape config for service endpoints.
761 | #
762 | # The relabeling allows the actual service scrape endpoint to be configured
763 | # via the following annotations:
764 | #
765 | # * `prometheus.io/scrape`: Only scrape services that have a value of
766 | # `true`, except if `prometheus.io/scrape-slow` is set to `true` as well.
767 | # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
768 | # to set this to `https` & most likely set the `tls_config` of the scrape config.
769 | # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
770 | # * `prometheus.io/port`: If the metrics are exposed on a different port to the
771 | # service then set this appropriately.
772 | # * `prometheus.io/param_`: If the metrics endpoint uses parameters
773 | # then you can set any parameter
774 | - job_name: 'kubernetes-service-endpoints'
775 | honor_labels: true
776 |
777 | kubernetes_sd_configs:
778 | - role: endpoints
779 |
780 | relabel_configs:
781 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
782 | action: keep
783 | regex: true
784 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow]
785 | action: drop
786 | regex: true
787 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
788 | action: replace
789 | target_label: __scheme__
790 | regex: (https?)
791 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
792 | action: replace
793 | target_label: __metrics_path__
794 | regex: (.+)
795 | - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
796 | action: replace
797 | target_label: __address__
798 | regex: (.+?)(?::\d+)?;(\d+)
799 | replacement: $1:$2
800 | - action: labelmap
801 | regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+)
802 | replacement: __param_$1
803 | - action: labelmap
804 | regex: __meta_kubernetes_service_label_(.+)
805 | - source_labels: [__meta_kubernetes_namespace]
806 | action: replace
807 | target_label: namespace
808 | - source_labels: [__meta_kubernetes_service_name]
809 | action: replace
810 | target_label: service
811 | - source_labels: [__meta_kubernetes_pod_node_name]
812 | action: replace
813 | target_label: node
814 |
815 | # Scrape config for slow service endpoints; same as above, but with a larger
816 | # timeout and a larger interval
817 | #
818 | # The relabeling allows the actual service scrape endpoint to be configured
819 | # via the following annotations:
820 | #
821 | # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true`
822 | # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
823 | # to set this to `https` & most likely set the `tls_config` of the scrape config.
824 | # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
825 | # * `prometheus.io/port`: If the metrics are exposed on a different port to the
826 | # service then set this appropriately.
827 | # * `prometheus.io/param_`: If the metrics endpoint uses parameters
828 | # then you can set any parameter
829 | - job_name: 'kubernetes-service-endpoints-slow'
830 | honor_labels: true
831 |
832 | scrape_interval: 5m
833 | scrape_timeout: 30s
834 |
835 | kubernetes_sd_configs:
836 | - role: endpoints
837 |
838 | relabel_configs:
839 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow]
840 | action: keep
841 | regex: true
842 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
843 | action: replace
844 | target_label: __scheme__
845 | regex: (https?)
846 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
847 | action: replace
848 | target_label: __metrics_path__
849 | regex: (.+)
850 | - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
851 | action: replace
852 | target_label: __address__
853 | regex: (.+?)(?::\d+)?;(\d+)
854 | replacement: $1:$2
855 | - action: labelmap
856 | regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+)
857 | replacement: __param_$1
858 | - action: labelmap
859 | regex: __meta_kubernetes_service_label_(.+)
860 | - source_labels: [__meta_kubernetes_namespace]
861 | action: replace
862 | target_label: namespace
863 | - source_labels: [__meta_kubernetes_service_name]
864 | action: replace
865 | target_label: service
866 | - source_labels: [__meta_kubernetes_pod_node_name]
867 | action: replace
868 | target_label: node
869 |
870 | - job_name: 'prometheus-pushgateway'
871 | honor_labels: true
872 |
873 | kubernetes_sd_configs:
874 | - role: service
875 |
876 | relabel_configs:
877 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
878 | action: keep
879 | regex: pushgateway
880 |
881 | # Example scrape config for probing services via the Blackbox Exporter.
882 | #
883 | # The relabeling allows the actual service scrape endpoint to be configured
884 | # via the following annotations:
885 | #
886 | # * `prometheus.io/probe`: Only probe services that have a value of `true`
887 | - job_name: 'kubernetes-services'
888 | honor_labels: true
889 |
890 | metrics_path: /probe
891 | params:
892 | module: [http_2xx]
893 |
894 | kubernetes_sd_configs:
895 | - role: service
896 |
897 | relabel_configs:
898 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
899 | action: keep
900 | regex: true
901 | - source_labels: [__address__]
902 | target_label: __param_target
903 | - target_label: __address__
904 | replacement: blackbox
905 | - source_labels: [__param_target]
906 | target_label: instance
907 | - action: labelmap
908 | regex: __meta_kubernetes_service_label_(.+)
909 | - source_labels: [__meta_kubernetes_namespace]
910 | target_label: namespace
911 | - source_labels: [__meta_kubernetes_service_name]
912 | target_label: service
913 |
914 | # Example scrape config for pods
915 | #
916 | # The relabeling allows the actual pod scrape endpoint to be configured via the
917 | # following annotations:
918 | #
919 | # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`,
920 | # except if `prometheus.io/scrape-slow` is set to `true` as well.
921 | # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
922 | # to set this to `https` & most likely set the `tls_config` of the scrape config.
923 | # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
924 | # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
925 | - job_name: 'kubernetes-pods'
926 | honor_labels: true
927 |
928 | kubernetes_sd_configs:
929 | - role: pod
930 |
931 | relabel_configs:
932 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
933 | action: keep
934 | regex: true
935 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow]
936 | action: drop
937 | regex: true
938 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme]
939 | action: replace
940 | regex: (https?)
941 | target_label: __scheme__
942 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
943 | action: replace
944 | target_label: __metrics_path__
945 | regex: (.+)
946 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip]
947 | action: replace
948 | regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})
949 | replacement: '[$2]:$1'
950 | target_label: __address__
951 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip]
952 | action: replace
953 | regex: (\d+);((([0-9]+?)(\.|$)){4})
954 | replacement: $2:$1
955 | target_label: __address__
956 | - action: labelmap
957 | regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+)
958 | replacement: __param_$1
959 | - action: labelmap
960 | regex: __meta_kubernetes_pod_label_(.+)
961 | - source_labels: [__meta_kubernetes_namespace]
962 | action: replace
963 | target_label: namespace
964 | - source_labels: [__meta_kubernetes_pod_name]
965 | action: replace
966 | target_label: pod
967 | - source_labels: [__meta_kubernetes_pod_phase]
968 | regex: Pending|Succeeded|Failed|Completed
969 | action: drop
970 |
971 | # Example Scrape config for pods which should be scraped slower. An useful example
972 | # would be stackriver-exporter which queries an API on every scrape of the pod
973 | #
974 | # The relabeling allows the actual pod scrape endpoint to be configured via the
975 | # following annotations:
976 | #
977 | # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true`
978 | # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
979 | # to set this to `https` & most likely set the `tls_config` of the scrape config.
980 | # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
981 | # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
982 | - job_name: 'kubernetes-pods-slow'
983 | honor_labels: true
984 |
985 | scrape_interval: 5m
986 | scrape_timeout: 30s
987 |
988 | kubernetes_sd_configs:
989 | - role: pod
990 |
991 | relabel_configs:
992 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow]
993 | action: keep
994 | regex: true
995 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme]
996 | action: replace
997 | regex: (https?)
998 | target_label: __scheme__
999 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
1000 | action: replace
1001 | target_label: __metrics_path__
1002 | regex: (.+)
1003 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip]
1004 | action: replace
1005 | regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})
1006 | replacement: '[$2]:$1'
1007 | target_label: __address__
1008 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip]
1009 | action: replace
1010 | regex: (\d+);((([0-9]+?)(\.|$)){4})
1011 | replacement: $2:$1
1012 | target_label: __address__
1013 | - action: labelmap
1014 | regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+)
1015 | replacement: __param_$1
1016 | - action: labelmap
1017 | regex: __meta_kubernetes_pod_label_(.+)
1018 | - source_labels: [__meta_kubernetes_namespace]
1019 | action: replace
1020 | target_label: namespace
1021 | - source_labels: [__meta_kubernetes_pod_name]
1022 | action: replace
1023 | target_label: pod
1024 | - source_labels: [__meta_kubernetes_pod_phase]
1025 | regex: Pending|Succeeded|Failed|Completed
1026 | action: drop
1027 |
1028 | # adds additional scrape configs to prometheus.yml
1029 | # must be a string so you have to add a | after extraScrapeConfigs:
1030 | # example adds prometheus-blackbox-exporter scrape config
1031 | extraScrapeConfigs: ""
1032 | # - job_name: 'prometheus-blackbox-exporter'
1033 | # metrics_path: /probe
1034 | # params:
1035 | # module: [http_2xx]
1036 | # static_configs:
1037 | # - targets:
1038 | # - https://example.com
1039 | # relabel_configs:
1040 | # - source_labels: [__address__]
1041 | # target_label: __param_target
1042 | # - source_labels: [__param_target]
1043 | # target_label: instance
1044 | # - target_label: __address__
1045 | # replacement: prometheus-blackbox-exporter:9115
1046 |
1047 | # Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager
1048 | # useful in H/A prometheus with different external labels but the same alerts
1049 | alertRelabelConfigs: {}
1050 | # alert_relabel_configs:
1051 | # - source_labels: [dc]
1052 | # regex: (.+)\d+
1053 | # target_label: dc
1054 |
1055 | networkPolicy:
1056 | ## Enable creation of NetworkPolicy resources.
1057 | ##
1058 | enabled: false
1059 |
1060 | # Force namespace of namespaced resources
1061 | forceNamespace: ""
1062 |
1063 | # Extra manifests to deploy as an array
1064 | extraManifests: []
1065 | # - apiVersion: v1
1066 | # kind: ConfigMap
1067 | # metadata:
1068 | # labels:
1069 | # name: prometheus-extra
1070 | # data:
1071 | # extra-data: "value"
1072 |
1073 | # Configuration of subcharts defined in Chart.yaml
1074 |
1075 | ## alertmanager sub-chart configurable values
1076 | ## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager
1077 | ##
1078 | alertmanager:
1079 | ## If false, alertmanager will not be installed
1080 | ##
1081 | enabled: true
1082 |
1083 | persistence:
1084 | size: 2Gi
1085 |
1086 | podSecurityContext:
1087 | runAsUser: 65534
1088 | runAsNonRoot: true
1089 | runAsGroup: 65534
1090 | fsGroup: 65534
1091 |
1092 | ## kube-state-metrics sub-chart configurable values
1093 | ## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics
1094 | ##
1095 | kube-state-metrics:
1096 | ## If false, kube-state-metrics sub-chart will not be installed
1097 | ##
1098 | enabled: true
1099 |
1100 | ## promtheus-node-exporter sub-chart configurable values
1101 | ## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter
1102 | ##
1103 | prometheus-node-exporter:
1104 | ## If false, node-exporter will not be installed
1105 | ##
1106 | enabled: true
1107 |
1108 | rbac:
1109 | pspEnabled: false
1110 |
1111 | containerSecurityContext:
1112 | allowPrivilegeEscalation: false
1113 |
1114 | ## pprometheus-pushgateway sub-chart configurable values
1115 | ## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway
1116 | ##
1117 | prometheus-pushgateway:
1118 | ## If false, pushgateway will not be installed
1119 | ##
1120 | enabled: true
1121 |
1122 | # Optional service annotations
1123 | serviceAnnotations:
1124 | prometheus.io/probe: pushgateway
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/Readme.md:
--------------------------------------------------------------------------------
1 | # AWS EKS Tier-3 Architecture with Terraform
2 |
3 | This repository contains Terraform scripts to deploy a Tier-3 AWS EKS architecture. Tier-3 architecture typically involves an persentation Tier, logic Tier and Database Tier.
4 |
5 | ## Prerequisites
6 |
7 | Ensure you have the following prerequisites installed:
8 |
9 | - [Terraform](https://www.terraform.io/)
10 | - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
11 | - AWS CLI configured with necessary credentials
12 |
13 | ## Getting Started
14 |
15 | 1. Clone the repository:
16 |
17 | ```bash
18 | git clone https://github.com/yourusername/aws-eks-tier3.git
19 | cd aws-eks-tier3
20 |
21 | 2. Initialize Terraform:
22 |
23 | ```bash
24 | terraform init
25 | ```
26 |
27 | 3. Adjust Configuration:
28 | - Modify the variables.tf file to set appropriate values for your AWS environment.
29 |
30 | 4. Deploy Infrastructure:
31 |
32 | ```bash
33 | terraform apply
34 | ``````
35 | Confirm the deployment by typing `yes` when prompted.
36 |
37 | 5. Verify Deployment:
38 |
39 | ```bash
40 | kubectl get nodes
41 | # update the Kubernetes context
42 | aws eks update-kubeconfig --name my-eks-cluster --region ap-northeast-1
43 | ```
44 |
45 | 6. verify access:
46 |
47 | ```bash
48 | kubectl auth can-i "*" "*"
49 | kubectl get nodes
50 | ```
51 |
52 | 7. Verify autoscaler running:
53 | ```
54 | kubectl get pods -n kube-system
55 | ```
56 |
57 | 8. Check Autoscaler logs
58 | ```
59 | kubectl logs -f -n kube-system -l app=cluster-autoscaler
60 | ```
61 |
62 | 9. Check load balancer logs
63 | ```
64 | kubectl logs -f -n kube-system -l app.kubernetes.io/name=aws-load-balancer-controller
65 | ```
66 |
67 | 10. Update Kubeconfig
68 | Syntax: aws eks update-kubeconfig --region region-code --name your-cluster-name
69 | ```
70 | aws eks update-kubeconfig --region ap-northeast-1 --name my-eks-cluster
71 | ```
72 |
73 | 11. Create Namespace
74 |
75 | ```bash
76 | kubectl create ns demo-eks
77 |
78 | kubectl config set-context --current --namespace demo-eks
79 | ```
80 |
81 | # MongoDB Database Setup
82 |
83 | ## To create MongoDB Resources**
84 |
85 | ```bash
86 | cd Manifests/mongo_v1
87 | kubectl apply -f secrets.yaml
88 | kubectl apply -f deploy.yaml
89 | kubectl apply -f service.yaml
90 | ```
91 |
92 | ## Backend API Setup
93 |
94 | Create NodeJs API deployment by running the following command:
95 |
96 | ```bash
97 | kubectl apply -f backend-deployment.yaml
98 | kubectl apply -f backend-service.yaml
99 | ```
100 |
101 | ## Frontend setup
102 |
103 | Create the Frontend resource. In the terminal run the following command:
104 |
105 | ```bash
106 | kubectl apply -f frontend-deployment.yaml
107 | kubectl apply -f frontend-service.yaml
108 | ```
109 |
110 | Finally create the final load balancer to allow internet traffic:
111 |
112 | ```bash
113 | kubectl apply -f full_stack_lb.yaml
114 | ```
115 |
116 | # Grafana setup
117 | ## Verify Services
118 |
119 | ```bash
120 | kubectl get svc -n prometheus
121 | ```
122 |
123 | ## edit the Prometheus-grafana service:
124 | ```
125 | kubectl edit svc prometheus-grafana -n prometheus
126 | ```
127 |
128 | ## change ‘type: ClusterIP’ to 'LoadBalancer'
129 |
130 | Username: admin
131 | Password: prom-operator
132 | Import Dashboard ID: 1860
133 |
134 | Exlore more at: https://grafana.com/grafana/dashboards/
135 |
136 | ## Destroy Kubernetes resources and cluster
137 |
138 | ```
139 | cd ./Manifests
140 | kubectl delete -f -f
141 | ```
142 | ## Remove AWS Resources to stop billing
143 |
144 | ```
145 | cd terraform
146 | terraform destroy --auto-approve
147 | ```
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/backend/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use the official Node.js 14 image as a base image
2 | FROM node:14
3 |
4 | # Set the working directory in the container
5 | WORKDIR /usr/src/app
6 |
7 | # Copy the package.json and package-lock.json files to the container
8 | COPY package*.json ./
9 |
10 | # Install the application's dependencies inside the container
11 | RUN npm install
12 |
13 | # Copy the rest of the application code to the container
14 | COPY . .
15 |
16 | # Specify the command to run when the container starts
17 | CMD [ "node", "index.js" ]
18 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/backend/db.js:
--------------------------------------------------------------------------------
1 | const mongoose = require("mongoose");
2 |
3 | module.exports = async () => {
4 | try {
5 | const connectionParams = {
6 | // user: process.env.MONGO_USERNAME,
7 | // pass: process.env.MONGO_PASSWORD,
8 | useNewUrlParser: true,
9 | // useCreateIndex: true,
10 | useUnifiedTopology: true,
11 | };
12 | const useDBAuth = process.env.USE_DB_AUTH || false;
13 | if(useDBAuth){
14 | connectionParams.user = process.env.MONGO_USERNAME;
15 | connectionParams.pass = process.env.MONGO_PASSWORD;
16 | }
17 | await mongoose.connect(
18 | process.env.MONGO_CONN_STR,
19 | connectionParams
20 | );
21 | console.log("Connected to database.");
22 | } catch (error) {
23 | console.log("Could not connect to database.", error);
24 | }
25 | };
26 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/backend/index.js:
--------------------------------------------------------------------------------
1 | const tasks = require("./routes/tasks");
2 | const connection = require("./db");
3 | const cors = require("cors");
4 | const express = require("express");
5 | const app = express();
6 |
7 | connection();
8 |
9 | app.use(express.json());
10 | app.use(cors());
11 |
12 | app.get('/ok', (req, res) => {
13 | res.status(200).send('ok')
14 | })
15 |
16 | app.use("/api/tasks", tasks);
17 |
18 | const port = process.env.PORT || 8080;
19 | app.listen(port, () => console.log(`Listening on port ${port}...`));
20 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/backend/models/task.js:
--------------------------------------------------------------------------------
1 | const mongoose = require("mongoose");
2 | const Schema = mongoose.Schema;
3 |
4 | const taskSchema = new Schema({
5 | task: {
6 | type: String,
7 | required: true,
8 | },
9 | completed: {
10 | type: Boolean,
11 | default: false,
12 | },
13 | });
14 |
15 | module.exports = mongoose.model("task", taskSchema);
16 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/backend/package-lock.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "server",
3 | "version": "1.0.0",
4 | "lockfileVersion": 1,
5 | "requires": true,
6 | "dependencies": {
7 | "@types/bson": {
8 | "version": "4.0.3",
9 | "resolved": "https://registry.npmjs.org/@types/bson/-/bson-4.0.3.tgz",
10 | "integrity": "sha512-mVRvYnTOZJz3ccpxhr3wgxVmSeiYinW+zlzQz3SXWaJmD1DuL05Jeq7nKw3SnbKmbleW5qrLG5vdyWe/A9sXhw==",
11 | "requires": {
12 | "@types/node": "*"
13 | }
14 | },
15 | "@types/mongodb": {
16 | "version": "3.6.18",
17 | "resolved": "https://registry.npmjs.org/@types/mongodb/-/mongodb-3.6.18.tgz",
18 | "integrity": "sha512-JSVFt9p0rTfZ4EgzXmVHUB3ue00xe3CRbQho8nXfImzEDDM4O7I3po1bwbWl/EIbLENxUreZxqLOc8lvcnLVPA==",
19 | "requires": {
20 | "@types/bson": "*",
21 | "@types/node": "*"
22 | }
23 | },
24 | "@types/node": {
25 | "version": "15.12.2",
26 | "resolved": "https://registry.npmjs.org/@types/node/-/node-15.12.2.tgz",
27 | "integrity": "sha512-zjQ69G564OCIWIOHSXyQEEDpdpGl+G348RAKY0XXy9Z5kU9Vzv1GMNnkar/ZJ8dzXB3COzD9Mo9NtRZ4xfgUww=="
28 | },
29 | "accepts": {
30 | "version": "1.3.7",
31 | "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz",
32 | "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==",
33 | "requires": {
34 | "mime-types": "~2.1.24",
35 | "negotiator": "0.6.2"
36 | }
37 | },
38 | "array-flatten": {
39 | "version": "1.1.1",
40 | "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
41 | "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
42 | },
43 | "bl": {
44 | "version": "2.2.1",
45 | "resolved": "https://registry.npmjs.org/bl/-/bl-2.2.1.tgz",
46 | "integrity": "sha512-6Pesp1w0DEX1N550i/uGV/TqucVL4AM/pgThFSN/Qq9si1/DF9aIHs1BxD8V/QU0HoeHO6cQRTAuYnLPKq1e4g==",
47 | "requires": {
48 | "readable-stream": "^2.3.5",
49 | "safe-buffer": "^5.1.1"
50 | }
51 | },
52 | "bluebird": {
53 | "version": "3.5.1",
54 | "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.5.1.tgz",
55 | "integrity": "sha512-MKiLiV+I1AA596t9w1sQJ8jkiSr5+ZKi0WKrYGUn6d1Fx+Ij4tIj+m2WMQSGczs5jZVxV339chE8iwk6F64wjA=="
56 | },
57 | "body-parser": {
58 | "version": "1.19.0",
59 | "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz",
60 | "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==",
61 | "requires": {
62 | "bytes": "3.1.0",
63 | "content-type": "~1.0.4",
64 | "debug": "2.6.9",
65 | "depd": "~1.1.2",
66 | "http-errors": "1.7.2",
67 | "iconv-lite": "0.4.24",
68 | "on-finished": "~2.3.0",
69 | "qs": "6.7.0",
70 | "raw-body": "2.4.0",
71 | "type-is": "~1.6.17"
72 | }
73 | },
74 | "bson": {
75 | "version": "1.1.6",
76 | "resolved": "https://registry.npmjs.org/bson/-/bson-1.1.6.tgz",
77 | "integrity": "sha512-EvVNVeGo4tHxwi8L6bPj3y3itEvStdwvvlojVxxbyYfoaxJ6keLgrTuKdyfEAszFK+H3olzBuafE0yoh0D1gdg=="
78 | },
79 | "bytes": {
80 | "version": "3.1.0",
81 | "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
82 | "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg=="
83 | },
84 | "content-disposition": {
85 | "version": "0.5.3",
86 | "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz",
87 | "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==",
88 | "requires": {
89 | "safe-buffer": "5.1.2"
90 | }
91 | },
92 | "content-type": {
93 | "version": "1.0.4",
94 | "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
95 | "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
96 | },
97 | "cookie": {
98 | "version": "0.4.0",
99 | "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz",
100 | "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg=="
101 | },
102 | "cookie-signature": {
103 | "version": "1.0.6",
104 | "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
105 | "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
106 | },
107 | "core-util-is": {
108 | "version": "1.0.2",
109 | "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
110 | "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
111 | },
112 | "cors": {
113 | "version": "2.8.5",
114 | "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz",
115 | "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==",
116 | "requires": {
117 | "object-assign": "^4",
118 | "vary": "^1"
119 | }
120 | },
121 | "debug": {
122 | "version": "2.6.9",
123 | "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
124 | "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
125 | "requires": {
126 | "ms": "2.0.0"
127 | }
128 | },
129 | "denque": {
130 | "version": "1.5.0",
131 | "resolved": "https://registry.npmjs.org/denque/-/denque-1.5.0.tgz",
132 | "integrity": "sha512-CYiCSgIF1p6EUByQPlGkKnP1M9g0ZV3qMIrqMqZqdwazygIA/YP2vrbcyl1h/WppKJTdl1F85cXIle+394iDAQ=="
133 | },
134 | "depd": {
135 | "version": "1.1.2",
136 | "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
137 | "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak="
138 | },
139 | "destroy": {
140 | "version": "1.0.4",
141 | "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
142 | "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
143 | },
144 | "ee-first": {
145 | "version": "1.1.1",
146 | "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
147 | "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
148 | },
149 | "encodeurl": {
150 | "version": "1.0.2",
151 | "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
152 | "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k="
153 | },
154 | "escape-html": {
155 | "version": "1.0.3",
156 | "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
157 | "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
158 | },
159 | "etag": {
160 | "version": "1.8.1",
161 | "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
162 | "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
163 | },
164 | "express": {
165 | "version": "4.17.1",
166 | "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz",
167 | "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==",
168 | "requires": {
169 | "accepts": "~1.3.7",
170 | "array-flatten": "1.1.1",
171 | "body-parser": "1.19.0",
172 | "content-disposition": "0.5.3",
173 | "content-type": "~1.0.4",
174 | "cookie": "0.4.0",
175 | "cookie-signature": "1.0.6",
176 | "debug": "2.6.9",
177 | "depd": "~1.1.2",
178 | "encodeurl": "~1.0.2",
179 | "escape-html": "~1.0.3",
180 | "etag": "~1.8.1",
181 | "finalhandler": "~1.1.2",
182 | "fresh": "0.5.2",
183 | "merge-descriptors": "1.0.1",
184 | "methods": "~1.1.2",
185 | "on-finished": "~2.3.0",
186 | "parseurl": "~1.3.3",
187 | "path-to-regexp": "0.1.7",
188 | "proxy-addr": "~2.0.5",
189 | "qs": "6.7.0",
190 | "range-parser": "~1.2.1",
191 | "safe-buffer": "5.1.2",
192 | "send": "0.17.1",
193 | "serve-static": "1.14.1",
194 | "setprototypeof": "1.1.1",
195 | "statuses": "~1.5.0",
196 | "type-is": "~1.6.18",
197 | "utils-merge": "1.0.1",
198 | "vary": "~1.1.2"
199 | }
200 | },
201 | "finalhandler": {
202 | "version": "1.1.2",
203 | "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz",
204 | "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==",
205 | "requires": {
206 | "debug": "2.6.9",
207 | "encodeurl": "~1.0.2",
208 | "escape-html": "~1.0.3",
209 | "on-finished": "~2.3.0",
210 | "parseurl": "~1.3.3",
211 | "statuses": "~1.5.0",
212 | "unpipe": "~1.0.0"
213 | }
214 | },
215 | "forwarded": {
216 | "version": "0.2.0",
217 | "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
218 | "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow=="
219 | },
220 | "fresh": {
221 | "version": "0.5.2",
222 | "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
223 | "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
224 | },
225 | "http-errors": {
226 | "version": "1.7.2",
227 | "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz",
228 | "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==",
229 | "requires": {
230 | "depd": "~1.1.2",
231 | "inherits": "2.0.3",
232 | "setprototypeof": "1.1.1",
233 | "statuses": ">= 1.5.0 < 2",
234 | "toidentifier": "1.0.0"
235 | }
236 | },
237 | "iconv-lite": {
238 | "version": "0.4.24",
239 | "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
240 | "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
241 | "requires": {
242 | "safer-buffer": ">= 2.1.2 < 3"
243 | }
244 | },
245 | "inherits": {
246 | "version": "2.0.3",
247 | "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
248 | "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
249 | },
250 | "ipaddr.js": {
251 | "version": "1.9.1",
252 | "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
253 | "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g=="
254 | },
255 | "isarray": {
256 | "version": "1.0.0",
257 | "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
258 | "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
259 | },
260 | "kareem": {
261 | "version": "2.3.2",
262 | "resolved": "https://registry.npmjs.org/kareem/-/kareem-2.3.2.tgz",
263 | "integrity": "sha512-STHz9P7X2L4Kwn72fA4rGyqyXdmrMSdxqHx9IXon/FXluXieaFA6KJ2upcHAHxQPQ0LeM/OjLrhFxifHewOALQ=="
264 | },
265 | "media-typer": {
266 | "version": "0.3.0",
267 | "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
268 | "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
269 | },
270 | "memory-pager": {
271 | "version": "1.5.0",
272 | "resolved": "https://registry.npmjs.org/memory-pager/-/memory-pager-1.5.0.tgz",
273 | "integrity": "sha512-ZS4Bp4r/Zoeq6+NLJpP+0Zzm0pR8whtGPf1XExKLJBAczGMnSi3It14OiNCStjQjM6NU1okjQGSxgEZN8eBYKg==",
274 | "optional": true
275 | },
276 | "merge-descriptors": {
277 | "version": "1.0.1",
278 | "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
279 | "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
280 | },
281 | "methods": {
282 | "version": "1.1.2",
283 | "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
284 | "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
285 | },
286 | "mime": {
287 | "version": "1.6.0",
288 | "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
289 | "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="
290 | },
291 | "mime-db": {
292 | "version": "1.48.0",
293 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.48.0.tgz",
294 | "integrity": "sha512-FM3QwxV+TnZYQ2aRqhlKBMHxk10lTbMt3bBkMAp54ddrNeVSfcQYOOKuGuy3Ddrm38I04If834fOUSq1yzslJQ=="
295 | },
296 | "mime-types": {
297 | "version": "2.1.31",
298 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.31.tgz",
299 | "integrity": "sha512-XGZnNzm3QvgKxa8dpzyhFTHmpP3l5YNusmne07VUOXxou9CqUqYa/HBy124RqtVh/O2pECas/MOcsDgpilPOPg==",
300 | "requires": {
301 | "mime-db": "1.48.0"
302 | }
303 | },
304 | "mongodb": {
305 | "version": "3.6.8",
306 | "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-3.6.8.tgz",
307 | "integrity": "sha512-sDjJvI73WjON1vapcbyBD3Ao9/VN3TKYY8/QX9EPbs22KaCSrQ5rXo5ZZd44tWJ3wl3FlnrFZ+KyUtNH6+1ZPQ==",
308 | "requires": {
309 | "bl": "^2.2.1",
310 | "bson": "^1.1.4",
311 | "denque": "^1.4.1",
312 | "optional-require": "^1.0.3",
313 | "safe-buffer": "^5.1.2",
314 | "saslprep": "^1.0.0"
315 | }
316 | },
317 | "mongoose": {
318 | "version": "5.12.14",
319 | "resolved": "https://registry.npmjs.org/mongoose/-/mongoose-5.12.14.tgz",
320 | "integrity": "sha512-1lMRY8cfGYFWHqe5DLnVgNQDyD0fEbSoWIQe9Mllt/ZtayZ5XUFQt+o5VKrB44vsT5cLNzgwEFO0NvwTwqLluQ==",
321 | "requires": {
322 | "@types/mongodb": "^3.5.27",
323 | "bson": "^1.1.4",
324 | "kareem": "2.3.2",
325 | "mongodb": "3.6.8",
326 | "mongoose-legacy-pluralize": "1.0.2",
327 | "mpath": "0.8.3",
328 | "mquery": "3.2.5",
329 | "ms": "2.1.2",
330 | "regexp-clone": "1.0.0",
331 | "safe-buffer": "5.2.1",
332 | "sift": "13.5.2",
333 | "sliced": "1.0.1"
334 | },
335 | "dependencies": {
336 | "ms": {
337 | "version": "2.1.2",
338 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
339 | "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
340 | },
341 | "safe-buffer": {
342 | "version": "5.2.1",
343 | "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
344 | "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="
345 | }
346 | }
347 | },
348 | "mongoose-legacy-pluralize": {
349 | "version": "1.0.2",
350 | "resolved": "https://registry.npmjs.org/mongoose-legacy-pluralize/-/mongoose-legacy-pluralize-1.0.2.tgz",
351 | "integrity": "sha512-Yo/7qQU4/EyIS8YDFSeenIvXxZN+ld7YdV9LqFVQJzTLye8unujAWPZ4NWKfFA+RNjh+wvTWKY9Z3E5XM6ZZiQ=="
352 | },
353 | "mpath": {
354 | "version": "0.8.3",
355 | "resolved": "https://registry.npmjs.org/mpath/-/mpath-0.8.3.tgz",
356 | "integrity": "sha512-eb9rRvhDltXVNL6Fxd2zM9D4vKBxjVVQNLNijlj7uoXUy19zNDsIif5zR+pWmPCWNKwAtqyo4JveQm4nfD5+eA=="
357 | },
358 | "mquery": {
359 | "version": "3.2.5",
360 | "resolved": "https://registry.npmjs.org/mquery/-/mquery-3.2.5.tgz",
361 | "integrity": "sha512-VjOKHHgU84wij7IUoZzFRU07IAxd5kWJaDmyUzQlbjHjyoeK5TNeeo8ZsFDtTYnSgpW6n/nMNIHvE3u8Lbrf4A==",
362 | "requires": {
363 | "bluebird": "3.5.1",
364 | "debug": "3.1.0",
365 | "regexp-clone": "^1.0.0",
366 | "safe-buffer": "5.1.2",
367 | "sliced": "1.0.1"
368 | },
369 | "dependencies": {
370 | "debug": {
371 | "version": "3.1.0",
372 | "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
373 | "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
374 | "requires": {
375 | "ms": "2.0.0"
376 | }
377 | }
378 | }
379 | },
380 | "ms": {
381 | "version": "2.0.0",
382 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
383 | "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
384 | },
385 | "negotiator": {
386 | "version": "0.6.2",
387 | "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz",
388 | "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw=="
389 | },
390 | "object-assign": {
391 | "version": "4.1.1",
392 | "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
393 | "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM="
394 | },
395 | "on-finished": {
396 | "version": "2.3.0",
397 | "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
398 | "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
399 | "requires": {
400 | "ee-first": "1.1.1"
401 | }
402 | },
403 | "optional-require": {
404 | "version": "1.0.3",
405 | "resolved": "https://registry.npmjs.org/optional-require/-/optional-require-1.0.3.tgz",
406 | "integrity": "sha512-RV2Zp2MY2aeYK5G+B/Sps8lW5NHAzE5QClbFP15j+PWmP+T9PxlJXBOOLoSAdgwFvS4t0aMR4vpedMkbHfh0nA=="
407 | },
408 | "parseurl": {
409 | "version": "1.3.3",
410 | "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
411 | "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="
412 | },
413 | "path-to-regexp": {
414 | "version": "0.1.7",
415 | "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
416 | "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
417 | },
418 | "process-nextick-args": {
419 | "version": "2.0.1",
420 | "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
421 | "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
422 | },
423 | "proxy-addr": {
424 | "version": "2.0.7",
425 | "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
426 | "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
427 | "requires": {
428 | "forwarded": "0.2.0",
429 | "ipaddr.js": "1.9.1"
430 | }
431 | },
432 | "qs": {
433 | "version": "6.7.0",
434 | "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
435 | "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ=="
436 | },
437 | "range-parser": {
438 | "version": "1.2.1",
439 | "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
440 | "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="
441 | },
442 | "raw-body": {
443 | "version": "2.4.0",
444 | "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz",
445 | "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==",
446 | "requires": {
447 | "bytes": "3.1.0",
448 | "http-errors": "1.7.2",
449 | "iconv-lite": "0.4.24",
450 | "unpipe": "1.0.0"
451 | }
452 | },
453 | "readable-stream": {
454 | "version": "2.3.7",
455 | "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
456 | "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
457 | "requires": {
458 | "core-util-is": "~1.0.0",
459 | "inherits": "~2.0.3",
460 | "isarray": "~1.0.0",
461 | "process-nextick-args": "~2.0.0",
462 | "safe-buffer": "~5.1.1",
463 | "string_decoder": "~1.1.1",
464 | "util-deprecate": "~1.0.1"
465 | }
466 | },
467 | "regexp-clone": {
468 | "version": "1.0.0",
469 | "resolved": "https://registry.npmjs.org/regexp-clone/-/regexp-clone-1.0.0.tgz",
470 | "integrity": "sha512-TuAasHQNamyyJ2hb97IuBEif4qBHGjPHBS64sZwytpLEqtBQ1gPJTnOaQ6qmpET16cK14kkjbazl6+p0RRv0yw=="
471 | },
472 | "safe-buffer": {
473 | "version": "5.1.2",
474 | "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
475 | "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
476 | },
477 | "safer-buffer": {
478 | "version": "2.1.2",
479 | "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
480 | "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
481 | },
482 | "saslprep": {
483 | "version": "1.0.3",
484 | "resolved": "https://registry.npmjs.org/saslprep/-/saslprep-1.0.3.tgz",
485 | "integrity": "sha512-/MY/PEMbk2SuY5sScONwhUDsV2p77Znkb/q3nSVstq/yQzYJOH/Azh29p9oJLsl3LnQwSvZDKagDGBsBwSooag==",
486 | "optional": true,
487 | "requires": {
488 | "sparse-bitfield": "^3.0.3"
489 | }
490 | },
491 | "send": {
492 | "version": "0.17.1",
493 | "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz",
494 | "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==",
495 | "requires": {
496 | "debug": "2.6.9",
497 | "depd": "~1.1.2",
498 | "destroy": "~1.0.4",
499 | "encodeurl": "~1.0.2",
500 | "escape-html": "~1.0.3",
501 | "etag": "~1.8.1",
502 | "fresh": "0.5.2",
503 | "http-errors": "~1.7.2",
504 | "mime": "1.6.0",
505 | "ms": "2.1.1",
506 | "on-finished": "~2.3.0",
507 | "range-parser": "~1.2.1",
508 | "statuses": "~1.5.0"
509 | },
510 | "dependencies": {
511 | "ms": {
512 | "version": "2.1.1",
513 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz",
514 | "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg=="
515 | }
516 | }
517 | },
518 | "serve-static": {
519 | "version": "1.14.1",
520 | "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz",
521 | "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==",
522 | "requires": {
523 | "encodeurl": "~1.0.2",
524 | "escape-html": "~1.0.3",
525 | "parseurl": "~1.3.3",
526 | "send": "0.17.1"
527 | }
528 | },
529 | "setprototypeof": {
530 | "version": "1.1.1",
531 | "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz",
532 | "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw=="
533 | },
534 | "sift": {
535 | "version": "13.5.2",
536 | "resolved": "https://registry.npmjs.org/sift/-/sift-13.5.2.tgz",
537 | "integrity": "sha512-+gxdEOMA2J+AI+fVsCqeNn7Tgx3M9ZN9jdi95939l1IJ8cZsqS8sqpJyOkic2SJk+1+98Uwryt/gL6XDaV+UZA=="
538 | },
539 | "sliced": {
540 | "version": "1.0.1",
541 | "resolved": "https://registry.npmjs.org/sliced/-/sliced-1.0.1.tgz",
542 | "integrity": "sha1-CzpmK10Ewxd7GSa+qCsD+Dei70E="
543 | },
544 | "sparse-bitfield": {
545 | "version": "3.0.3",
546 | "resolved": "https://registry.npmjs.org/sparse-bitfield/-/sparse-bitfield-3.0.3.tgz",
547 | "integrity": "sha1-/0rm5oZWBWuks+eSqzM004JzyhE=",
548 | "optional": true,
549 | "requires": {
550 | "memory-pager": "^1.0.2"
551 | }
552 | },
553 | "statuses": {
554 | "version": "1.5.0",
555 | "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
556 | "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow="
557 | },
558 | "string_decoder": {
559 | "version": "1.1.1",
560 | "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
561 | "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
562 | "requires": {
563 | "safe-buffer": "~5.1.0"
564 | }
565 | },
566 | "toidentifier": {
567 | "version": "1.0.0",
568 | "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz",
569 | "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw=="
570 | },
571 | "type-is": {
572 | "version": "1.6.18",
573 | "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
574 | "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
575 | "requires": {
576 | "media-typer": "0.3.0",
577 | "mime-types": "~2.1.24"
578 | }
579 | },
580 | "unpipe": {
581 | "version": "1.0.0",
582 | "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
583 | "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
584 | },
585 | "util-deprecate": {
586 | "version": "1.0.2",
587 | "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
588 | "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
589 | },
590 | "utils-merge": {
591 | "version": "1.0.1",
592 | "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
593 | "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
594 | },
595 | "vary": {
596 | "version": "1.1.2",
597 | "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
598 | "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
599 | }
600 | }
601 | }
602 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/backend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "server",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "index.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1"
8 | },
9 | "keywords": [],
10 | "author": "",
11 | "license": "ISC",
12 | "dependencies": {
13 | "cors": "^2.8.5",
14 | "express": "^4.17.1",
15 | "mongoose": "^5.12.14"
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/backend/routes/tasks.js:
--------------------------------------------------------------------------------
1 | const Task = require("../models/task");
2 | const express = require("express");
3 | const router = express.Router();
4 |
5 | router.post("/", async (req, res) => {
6 | try {
7 | const task = await new Task(req.body).save();
8 | res.send(task);
9 | } catch (error) {
10 | res.send(error);
11 | }
12 | });
13 |
14 | router.get("/", async (req, res) => {
15 | try {
16 | const tasks = await Task.find();
17 | res.send(tasks);
18 | } catch (error) {
19 | res.send(error);
20 | }
21 | });
22 |
23 | router.put("/:id", async (req, res) => {
24 | try {
25 | const task = await Task.findOneAndUpdate(
26 | { _id: req.params.id },
27 | req.body
28 | );
29 | res.send(task);
30 | } catch (error) {
31 | res.send(error);
32 | }
33 | });
34 |
35 | router.delete("/:id", async (req, res) => {
36 | try {
37 | const task = await Task.findByIdAndDelete(req.params.id);
38 | res.send(task);
39 | } catch (error) {
40 | res.send(error);
41 | }
42 | });
43 |
44 | module.exports = router;
45 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use the official Node.js 14 image as a base image
2 | FROM node:14
3 |
4 | # Set the working directory in the container
5 | WORKDIR /usr/src/app
6 |
7 | # Copy the package.json and package-lock.json files to the container
8 | COPY package*.json ./
9 |
10 | # Install the application's dependencies inside the container
11 | RUN npm install
12 |
13 | # Copy the rest of the application code to the container
14 | COPY . .
15 |
16 | # Specify the command to run when the container starts
17 | CMD [ "npm", "start" ]
18 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "client",
3 | "version": "0.1.0",
4 | "private": true,
5 | "dependencies": {
6 | "@material-ui/core": "^4.11.4",
7 | "@testing-library/jest-dom": "^5.14.1",
8 | "@testing-library/react": "^11.2.7",
9 | "@testing-library/user-event": "^12.8.3",
10 | "axios": "^0.21.1",
11 | "react": "^17.0.2",
12 | "react-dom": "^17.0.2",
13 | "react-scripts": "4.0.3",
14 | "web-vitals": "^1.1.2"
15 | },
16 | "scripts": {
17 | "start": "react-scripts start",
18 | "build": "react-scripts build",
19 | "test": "react-scripts test",
20 | "eject": "react-scripts eject"
21 | },
22 | "eslintConfig": {
23 | "extends": [
24 | "react-app",
25 | "react-app/jest"
26 | ]
27 | },
28 | "browserslist": {
29 | "production": [
30 | ">0.2%",
31 | "not dead",
32 | "not op_mini all"
33 | ],
34 | "development": [
35 | "last 1 chrome version",
36 | "last 1 firefox version",
37 | "last 1 safari version"
38 | ]
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yashpimple/Terraform-AWS-Architecture/90340062c6eb31dabcbeed4c45df4819119e111b/3-Tier-EKS-Architecture/app/frontend/public/favicon.ico
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
12 |
13 |
17 |
18 |
27 | TO-DO App
28 |
29 |
30 | You need to enable JavaScript to run this app.
31 |
32 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/public/logo192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yashpimple/Terraform-AWS-Architecture/90340062c6eb31dabcbeed4c45df4819119e111b/3-Tier-EKS-Architecture/app/frontend/public/logo192.png
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/public/logo512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yashpimple/Terraform-AWS-Architecture/90340062c6eb31dabcbeed4c45df4819119e111b/3-Tier-EKS-Architecture/app/frontend/public/logo512.png
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/public/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "short_name": "React App",
3 | "name": "Create React App Sample",
4 | "icons": [
5 | {
6 | "src": "favicon.ico",
7 | "sizes": "64x64 32x32 24x24 16x16",
8 | "type": "image/x-icon"
9 | },
10 | {
11 | "src": "logo192.png",
12 | "type": "image/png",
13 | "sizes": "192x192"
14 | },
15 | {
16 | "src": "logo512.png",
17 | "type": "image/png",
18 | "sizes": "512x512"
19 | }
20 | ],
21 | "start_url": ".",
22 | "display": "standalone",
23 | "theme_color": "#000000",
24 | "background_color": "#ffffff"
25 | }
26 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/public/robots.txt:
--------------------------------------------------------------------------------
1 | # https://www.robotstxt.org/robotstxt.html
2 | User-agent: *
3 | Disallow:
4 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/src/App.css:
--------------------------------------------------------------------------------
1 | .App {
2 | width: 100vw;
3 | height: 100vh;
4 | background: #f5f5f5;
5 | }
6 |
7 | .heading {
8 | font-size: 20px;
9 | font-weight: bold;
10 | text-align: center;
11 | }
12 |
13 | .flex {
14 | display: flex;
15 | justify-content: center;
16 | align-items: center;
17 | }
18 |
19 | .container {
20 | width: 500px;
21 | min-height: 300px;
22 | padding: 10px;
23 | }
24 |
25 | .task_container {
26 | margin: 10px 0;
27 | cursor: pointer;
28 | }
29 |
30 | .task {
31 | flex-grow: 1;
32 | margin-left: 10px;
33 | }
34 |
35 | .line_through {
36 | text-decoration: line-through;
37 | }
38 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/src/App.js:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import Tasks from "./Tasks";
3 | import { Paper, TextField } from "@material-ui/core";
4 | import { Checkbox, Button } from "@material-ui/core";
5 | import "./App.css";
6 |
7 | class App extends Tasks {
8 | state = { tasks: [], currentTask: "" };
9 | render() {
10 | const { tasks } = this.state;
11 | return (
12 |
13 |
14 | TO-DO
15 |
38 |
39 | {tasks.map((task) => (
40 |
44 | this.handleUpdate(task._id)}
47 | color="primary"
48 | />
49 |
56 | {task.task}
57 |
58 | this.handleDelete(task._id)}
60 | color="secondary"
61 | >
62 | delete
63 |
64 |
65 | ))}
66 |
67 |
68 |
69 | );
70 | }
71 | }
72 |
73 | export default App;
74 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/src/Tasks.js:
--------------------------------------------------------------------------------
1 | import { Component } from "react";
2 | import {
3 | addTask,
4 | getTasks,
5 | updateTask,
6 | deleteTask,
7 | } from "./services/taskServices";
8 |
9 | class Tasks extends Component {
10 | state = { tasks: [], currentTask: "" };
11 |
12 | async componentDidMount() {
13 | try {
14 | const { data } = await getTasks();
15 | this.setState({ tasks: data });
16 | } catch (error) {
17 | console.log(error);
18 | }
19 | }
20 |
21 | handleChange = ({ currentTarget: input }) => {
22 | this.setState({ currentTask: input.value });
23 | };
24 |
25 | handleSubmit = async (e) => {
26 | e.preventDefault();
27 | const originalTasks = this.state.tasks;
28 | try {
29 | const { data } = await addTask({ task: this.state.currentTask });
30 | const tasks = originalTasks;
31 | tasks.push(data);
32 | this.setState({ tasks, currentTask: "" });
33 | } catch (error) {
34 | console.log(error);
35 | }
36 | };
37 |
38 | handleUpdate = async (currentTask) => {
39 | const originalTasks = this.state.tasks;
40 | try {
41 | const tasks = [...originalTasks];
42 | const index = tasks.findIndex((task) => task._id === currentTask);
43 | tasks[index] = { ...tasks[index] };
44 | tasks[index].completed = !tasks[index].completed;
45 | this.setState({ tasks });
46 | await updateTask(currentTask, {
47 | completed: tasks[index].completed,
48 | });
49 | } catch (error) {
50 | this.setState({ tasks: originalTasks });
51 | console.log(error);
52 | }
53 | };
54 |
55 | handleDelete = async (currentTask) => {
56 | const originalTasks = this.state.tasks;
57 | try {
58 | const tasks = originalTasks.filter(
59 | (task) => task._id !== currentTask
60 | );
61 | this.setState({ tasks });
62 | await deleteTask(currentTask);
63 | } catch (error) {
64 | this.setState({ tasks: originalTasks });
65 | console.log(error);
66 | }
67 | };
68 | }
69 |
70 | export default Tasks;
71 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/src/index.css:
--------------------------------------------------------------------------------
1 | body {
2 | margin: 0;
3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
5 | sans-serif;
6 | -webkit-font-smoothing: antialiased;
7 | -moz-osx-font-smoothing: grayscale;
8 | }
9 |
10 | code {
11 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
12 | monospace;
13 | }
14 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/src/index.js:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import ReactDOM from "react-dom";
3 | import "./index.css";
4 | import App from "./App";
5 |
6 | ReactDOM.render(
7 |
8 |
9 | ,
10 | document.getElementById("root")
11 | );
12 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/app/frontend/src/services/taskServices.js:
--------------------------------------------------------------------------------
1 | import axios from "axios";
2 | const apiUrl = process.env.REACT_APP_BACKEND_URL //"http://localhost:8080/api/tasks";
3 | console.log(apiUrl)
4 | export function getTasks() {
5 | return axios.get(apiUrl);
6 | }
7 |
8 | export function addTask(task) {
9 | return axios.post(apiUrl, task);
10 | }
11 |
12 | export function updateTask(id, task) {
13 | return axios.put(apiUrl + "/" + id, task);
14 | }
15 |
16 | export function deleteTask(id) {
17 | return axios.delete(apiUrl + "/" + id);
18 | }
19 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/autoscaler-iam.tf:
--------------------------------------------------------------------------------
1 | module "cluster_autoscaler_irsa_role" {
2 | source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
3 | version = "5.3.1"
4 |
5 | role_name = "cluster-autoscaler"
6 | attach_cluster_autoscaler_policy = true
7 | cluster_autoscaler_cluster_ids = [module.eks.cluster_id]
8 |
9 | oidc_providers = {
10 | ex = {
11 | provider_arn = module.eks.oidc_provider_arn
12 | namespace_service_accounts = ["kube-system:cluster-autoscaler"]
13 | }
14 | }
15 | }
16 |
17 |
18 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/autoscaler-manifests.tf:
--------------------------------------------------------------------------------
1 | provider "kubectl" {
2 | host = data.aws_eks_cluster.default.endpoint
3 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)
4 | load_config_file = false
5 |
6 | exec {
7 | api_version = "client.authentication.k8s.io/v1beta1"
8 | args = ["eks", "get-token", "--cluster-name", data.aws_eks_cluster.default.id]
9 | command = "aws"
10 | }
11 | }
12 |
13 | resource "kubectl_manifest" "service_account" {
14 | yaml_body = <<-EOF
15 | apiVersion: v1
16 | kind: ServiceAccount
17 | metadata:
18 | labels:
19 | k8s-addon: cluster-autoscaler.addons.k8s.io
20 | k8s-app: cluster-autoscaler
21 | name: cluster-autoscaler
22 | namespace: kube-system
23 | annotations:
24 | eks.amazonaws.com/role-arn: ${module.cluster_autoscaler_irsa_role.iam_role_arn}
25 | EOF
26 | }
27 |
28 | resource "kubectl_manifest" "role" {
29 | yaml_body = <<-EOF
30 | apiVersion: rbac.authorization.k8s.io/v1
31 | kind: Role
32 | metadata:
33 | name: cluster-autoscaler
34 | namespace: kube-system
35 | labels:
36 | k8s-addon: cluster-autoscaler.addons.k8s.io
37 | k8s-app: cluster-autoscaler
38 | rules:
39 | - apiGroups: [""]
40 | resources: ["configmaps"]
41 | verbs: ["create","list","watch"]
42 | - apiGroups: [""]
43 | resources: ["configmaps"]
44 | resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"]
45 | verbs: ["delete", "get", "update", "watch"]
46 | EOF
47 | }
48 |
49 | resource "kubectl_manifest" "role_binding" {
50 | yaml_body = <<-EOF
51 | apiVersion: rbac.authorization.k8s.io/v1
52 | kind: RoleBinding
53 | metadata:
54 | name: cluster-autoscaler
55 | namespace: kube-system
56 | labels:
57 | k8s-addon: cluster-autoscaler.addons.k8s.io
58 | k8s-app: cluster-autoscaler
59 | roleRef:
60 | apiGroup: rbac.authorization.k8s.io
61 | kind: Role
62 | name: cluster-autoscaler
63 | subjects:
64 | - kind: ServiceAccount
65 | name: cluster-autoscaler
66 | namespace: kube-system
67 | EOF
68 | }
69 |
70 | resource "kubectl_manifest" "cluster_role" {
71 | yaml_body = <<-EOF
72 | apiVersion: rbac.authorization.k8s.io/v1
73 | kind: ClusterRole
74 | metadata:
75 | name: cluster-autoscaler
76 | labels:
77 | k8s-addon: cluster-autoscaler.addons.k8s.io
78 | k8s-app: cluster-autoscaler
79 | rules:
80 | - apiGroups: [""]
81 | resources: ["events", "endpoints"]
82 | verbs: ["create", "patch"]
83 | - apiGroups: [""]
84 | resources: ["pods/eviction"]
85 | verbs: ["create"]
86 | - apiGroups: [""]
87 | resources: ["pods/status"]
88 | verbs: ["update"]
89 | - apiGroups: [""]
90 | resources: ["endpoints"]
91 | resourceNames: ["cluster-autoscaler"]
92 | verbs: ["get", "update"]
93 | - apiGroups: [""]
94 | resources: ["nodes"]
95 | verbs: ["watch", "list", "get", "update"]
96 | - apiGroups: [""]
97 | resources:
98 | - "namespaces"
99 | - "pods"
100 | - "services"
101 | - "replicationcontrollers"
102 | - "persistentvolumeclaims"
103 | - "persistentvolumes"
104 | verbs: ["watch", "list", "get"]
105 | - apiGroups: ["extensions"]
106 | resources: ["replicasets", "daemonsets"]
107 | verbs: ["watch", "list", "get"]
108 | - apiGroups: ["policy"]
109 | resources: ["poddisruptionbudgets"]
110 | verbs: ["watch", "list"]
111 | - apiGroups: ["apps"]
112 | resources: ["statefulsets", "replicasets", "daemonsets"]
113 | verbs: ["watch", "list", "get"]
114 | - apiGroups: ["storage.k8s.io"]
115 | resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
116 | verbs: ["watch", "list", "get"]
117 | - apiGroups: ["batch", "extensions"]
118 | resources: ["jobs"]
119 | verbs: ["get", "list", "watch", "patch"]
120 | - apiGroups: ["coordination.k8s.io"]
121 | resources: ["leases"]
122 | verbs: ["create"]
123 | - apiGroups: ["coordination.k8s.io"]
124 | resourceNames: ["cluster-autoscaler"]
125 | resources: ["leases"]
126 | verbs: ["get", "update"]
127 | EOF
128 | }
129 |
130 | resource "kubectl_manifest" "cluster_role_binding" {
131 | yaml_body = <<-EOF
132 | apiVersion: rbac.authorization.k8s.io/v1
133 | kind: ClusterRoleBinding
134 | metadata:
135 | name: cluster-autoscaler
136 | labels:
137 | k8s-addon: cluster-autoscaler.addons.k8s.io
138 | k8s-app: cluster-autoscaler
139 | roleRef:
140 | apiGroup: rbac.authorization.k8s.io
141 | kind: ClusterRole
142 | name: cluster-autoscaler
143 | subjects:
144 | - kind: ServiceAccount
145 | name: cluster-autoscaler
146 | namespace: kube-system
147 | EOF
148 | }
149 |
150 | resource "kubectl_manifest" "deployment" {
151 | yaml_body = <<-EOF
152 | apiVersion: apps/v1
153 | kind: Deployment
154 | metadata:
155 | name: cluster-autoscaler
156 | namespace: kube-system
157 | labels:
158 | app: cluster-autoscaler
159 | spec:
160 | replicas: 1
161 | selector:
162 | matchLabels:
163 | app: cluster-autoscaler
164 | template:
165 | metadata:
166 | labels:
167 | app: cluster-autoscaler
168 | spec:
169 | priorityClassName: system-cluster-critical
170 | securityContext:
171 | runAsNonRoot: true
172 | runAsUser: 65534
173 | fsGroup: 65534
174 | serviceAccountName: cluster-autoscaler
175 | containers:
176 | - image: k8s.gcr.io/autoscaling/cluster-autoscaler:v1.23.1
177 | name: cluster-autoscaler
178 | resources:
179 | limits:
180 | cpu: 100m
181 | memory: 600Mi
182 | requests:
183 | cpu: 100m
184 | memory: 600Mi
185 | command:
186 | - ./cluster-autoscaler
187 | - --v=4
188 | - --stderrthreshold=info
189 | - --cloud-provider=aws
190 | - --skip-nodes-with-local-storage=false
191 | - --expander=least-waste
192 | - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/${module.eks.cluster_id}
193 | volumeMounts:
194 | - name: ssl-certs
195 | mountPath: /etc/ssl/certs/ca-certificates.crt
196 | readOnly: true
197 | volumes:
198 | - name: ssl-certs
199 | hostPath:
200 | path: "/etc/ssl/certs/ca-bundle.crt"
201 | EOF
202 | }
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/backend.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | backend "s3" {
3 | bucket = "eks-tfstate-bucket"
4 | key = "eks/terraform.tfstate"
5 | region = "ap-northeast-1"
6 | }
7 | }
8 |
9 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/eks.tf:
--------------------------------------------------------------------------------
1 | module "eks" {
2 | source = "terraform-aws-modules/eks/aws"
3 | version = "18.29.0"
4 |
5 | cluster_name = var.cluster_name
6 | cluster_version = var.cluster_version
7 |
8 | vpc_id = module.vpc.vpc_id
9 | subnet_ids = module.vpc.private_subnets
10 |
11 | cluster_endpoint_private_access = true
12 | cluster_endpoint_public_access = true
13 |
14 |
15 | enable_irsa = true
16 | eks_managed_node_group_defaults = {
17 | disk_size = 50
18 | }
19 |
20 | eks_managed_node_groups = {
21 | general = {
22 | desired_size = 1
23 | min_size = 1
24 | max_size = 10
25 |
26 | labels = {
27 | role = "general"
28 | }
29 |
30 | instance_types = ["t3.small"]
31 | capacity_type = "ON_DEMAND"
32 | }
33 |
34 | spot = {
35 | desired_size = 1
36 | min_size = 1
37 | max_size = 10
38 |
39 | labels = {
40 | role = "spot"
41 | }
42 |
43 | taints = [{
44 | key = "market"
45 | value = "spot"
46 | effect = "NO_SCHEDULE"
47 | }]
48 |
49 | instance_types = ["t3.micro"]
50 | capacity_type = "SPOT"
51 | }
52 | }
53 |
54 |
55 | }
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/helm-lb.tf:
--------------------------------------------------------------------------------
1 | module "aws_load_balancer_controller_irsa_role" {
2 | source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
3 | version = "5.30.0"
4 |
5 | role_name = "aws-load-balancer-controller"
6 |
7 | attach_load_balancer_controller_policy = true
8 |
9 | oidc_providers = {
10 | ex = {
11 | provider_arn = module.eks.oidc_provider_arn
12 | namespace_service_accounts = ["kube-system:aws-load-balancer-controller"]
13 | }
14 | }
15 | }
16 |
17 | resource "helm_release" "aws_load_balancer_controller" {
18 | name = "aws-load-balancer-controller"
19 |
20 | repository = "https://aws.github.io/eks-charts"
21 | chart = "aws-load-balancer-controller"
22 | namespace = "kube-system"
23 | version = "1.4.4"
24 |
25 | set {
26 | name = "replicaCount"
27 | value = 1
28 | }
29 |
30 | set {
31 | name = "clusterName"
32 | value = module.eks.cluster_id
33 | }
34 |
35 | set {
36 | name = "serviceAccount.name"
37 | value = "aws-load-balancer-controller"
38 | }
39 |
40 | set {
41 | name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
42 | value = module.aws_load_balancer_controller_irsa_role.iam_role_arn
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/helm-provider.tf:
--------------------------------------------------------------------------------
1 | provider "helm" {
2 | kubernetes {
3 | host = data.aws_eks_cluster.default.endpoint
4 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)
5 | exec {
6 | api_version = "client.authentication.k8s.io/v1beta1"
7 | args = ["eks", "get-token", "--cluster-name", data.aws_eks_cluster.default.id]
8 | command = "aws"
9 | }
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/iam.tf:
--------------------------------------------------------------------------------
1 | module "allow_eks_access_iam_policy" {
2 | source = "terraform-aws-modules/iam/aws//modules/iam-policy"
3 | version = "5.3.1"
4 |
5 | name = "allow-eks-access"
6 | create_policy = true
7 |
8 | policy = jsonencode({
9 | Version = "2012-10-17"
10 | Statement = [
11 | {
12 | Action = [
13 | "eks:DescribeCluster",
14 | ]
15 | Effect = "Allow"
16 | Resource = "*"
17 | },
18 | ]
19 | })
20 | }
21 |
22 | module "eks_admins_iam_role" {
23 | source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role"
24 | version = "5.3.1"
25 |
26 | role_name = "eks-admin"
27 | create_role = true
28 | role_requires_mfa = false
29 |
30 | custom_role_policy_arns = [module.allow_eks_access_iam_policy.arn]
31 |
32 | trusted_role_arns = [
33 | "arn:aws:iam::${module.vpc.vpc_owner_id}:root"
34 | ]
35 | }
36 |
37 | module "user1_iam_user" {
38 | source = "terraform-aws-modules/iam/aws//modules/iam-user"
39 | version = "5.3.1"
40 |
41 | name = "user1"
42 | create_iam_access_key = false
43 | create_iam_user_login_profile = false
44 |
45 | force_destroy = true
46 | }
47 |
48 | module "allow_assume_eks_admins_iam_policy" {
49 | source = "terraform-aws-modules/iam/aws//modules/iam-policy"
50 | version = "5.3.1"
51 |
52 | name = "allow-assume-eks-admin-iam-role"
53 | create_policy = true
54 |
55 | policy = jsonencode({
56 | Version = "2012-10-17"
57 | Statement = [
58 | {
59 | Action = [
60 | "sts:AssumeRole",
61 | ]
62 | Effect = "Allow"
63 | Resource = module.eks_admins_iam_role.iam_role_arn
64 | },
65 | ]
66 | })
67 | }
68 |
69 | module "eks_admins_iam_group" {
70 | source = "terraform-aws-modules/iam/aws//modules/iam-group-with-policies"
71 | version = "5.3.1"
72 |
73 | name = "eks-admin"
74 | attach_iam_self_management_policy = false
75 | create_group = true
76 | group_users = [module.user1_iam_user.iam_user_name]
77 | custom_group_policy_arns = [module.allow_assume_eks_admins_iam_policy.arn]
78 | }
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/igw.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yashpimple/Terraform-AWS-Architecture/90340062c6eb31dabcbeed4c45df4819119e111b/3-Tier-EKS-Architecture/terraform/igw.tf
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/monitorng.tf:
--------------------------------------------------------------------------------
1 | # resource "time_sleep" "wait_for_kubernetes" {
2 |
3 | # depends_on = [
4 | # module.eks
5 | # ]
6 |
7 | # create_duration = "20s"
8 | # }
9 |
10 | # resource "kubernetes_namespace" "kube-namespace" {
11 | # depends_on = [time_sleep.wait_for_kubernetes]
12 | # metadata {
13 | # name = "prometheus"
14 | # }
15 | # }
16 |
17 | # resource "helm_release" "prometheus" {
18 | # depends_on = [kubernetes_namespace.kube-namespace, time_sleep.wait_for_kubernetes]
19 | # name = "prometheus"
20 | # repository = "https://prometheus-community.github.io/helm-charts"
21 | # chart = "kube-prometheus-stack"
22 | # namespace = kubernetes_namespace.kube-namespace.id
23 | # create_namespace = true
24 | # version = "51.3.0"
25 | # values = [
26 | # file("values.yaml")
27 | # ]
28 | # timeout = 2000
29 |
30 | # set {
31 | # name = "podSecurityPolicy.enabled"
32 | # value = true
33 | # }
34 |
35 | # set {
36 | # name = "server.persistentVolume.enabled"
37 | # value = false
38 | # }
39 |
40 |
41 | # set {
42 | # name = "server\\.resources"
43 | # value = yamlencode({
44 | # limits = {
45 | # cpu = "200m"
46 | # memory = "50Mi"
47 | # }
48 | # requests = {
49 | # cpu = "100m"
50 | # memory = "30Mi"
51 | # }
52 | # })
53 | # }
54 | # }
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/provider.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = var.region
3 | }
4 |
5 | terraform {
6 | required_providers {
7 | kubectl = {
8 | source = "gavinbunney/kubectl"
9 | version = ">= 1.7.0"
10 | }
11 |
12 | helm = {
13 | source = "hashicorp/helm"
14 | version = ">= 2.6.0"
15 | }
16 | }
17 |
18 | required_version = "~> 1.0"
19 | }
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/variable.tf:
--------------------------------------------------------------------------------
1 | variable "cluster_name" {
2 | type = string
3 | default = "my-eks-cluster"
4 | }
5 |
6 | variable "cluster_version" {
7 | type = number
8 | default = 1.25
9 | }
10 |
11 | variable "region" {
12 | type = string
13 | default = "ap-northeast-1"
14 | }
15 |
16 | variable "availability_zones" {
17 | type = list
18 | default = ["ap-northeast-1a", "ap-northeast-1b"]
19 | }
20 |
21 | variable "addons" {
22 | type = list(object({
23 | name = string
24 | version = string
25 | }))
26 |
27 | default = [
28 | {
29 | name = "kube-proxy"
30 | version = "v1.25.6-eksbuild.1"
31 | },
32 | {
33 | name = "vpc-cni"
34 | version = "v1.12.2-eksbuild.1"
35 | } ,
36 | {
37 | name = "coredns"
38 | version = "v1.9.3-eksbuild.2"
39 | },
40 | {
41 | name = "aws-ebs-csi-driver"
42 | version = "v1.23.0-eksbuild.1"
43 | }
44 | ]
45 | }
--------------------------------------------------------------------------------
/3-Tier-EKS-Architecture/terraform/vpc.tf:
--------------------------------------------------------------------------------
1 | module "vpc" {
2 | source = "terraform-aws-modules/vpc/aws"
3 | version = "5.5.1"
4 |
5 | name = "main"
6 | cidr = "10.0.0.0/16"
7 |
8 | azs = var.availability_zones
9 | private_subnets = ["10.0.0.0/19", "10.0.32.0/19"]
10 | public_subnets = ["10.0.64.0/19", "10.0.96.0/19"]
11 |
12 | // This tag applied to subnets is for internal and external-facing load balancers when they are deployed.
13 | public_subnet_tags = {
14 | "kubernetes.io/role/elb" = "1"
15 | }
16 |
17 | private_subnet_tags = {
18 | "kubernetes.io/role/internal-elb" = "1"
19 | }
20 |
21 |
22 | enable_dns_hostnames = true
23 | enable_dns_support = true
24 |
25 | enable_nat_gateway = true
26 | single_nat_gateway = true
27 | one_nat_gateway_per_az = true
28 |
29 | tags = {
30 | Environment = "dev"
31 | }
32 |
33 | }
--------------------------------------------------------------------------------
/EKS-Cluster/0-locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | env = "staging"
3 | region = "us-east-2"
4 | zone1 = "us-east-2a"
5 | zone2 = "us-east-2b"
6 | eks_name = "demo"
7 | eks_version = "1.29"
8 | }
9 |
--------------------------------------------------------------------------------
/EKS-Cluster/1-providers.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = local.region
3 | }
4 |
5 | terraform {
6 | required_version = ">= 1.0"
7 |
8 | required_providers {
9 | aws = {
10 | source = "hashicorp/aws"
11 | version = "~> 5.49"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/EKS-Cluster/10-add-manager-role.tf:
--------------------------------------------------------------------------------
1 | # data "aws_caller_identity" "current" {}
2 |
3 | # resource "aws_iam_role" "eks_admin" {
4 | # name = "${local.env}-${local.eks_name}-eks-admin"
5 |
6 | # assume_role_policy = <