├── .gitignore
├── README.md
├── image
├── Gitlab-provider-banner.jpg
├── banner.png
├── docker-provider-banner.jpg
├── github-provider-banner.jpg
├── kubernetes-provider-banner.jpg
└── terraform.png
├── part01-docker-provider
├── README.md
├── main.tf
├── providers.tf
└── variables.tf
├── part02-github-provider
├── README.md
├── main.tf
├── providers.tf
└── variables.tf
├── part03-kubernetes-provider
├── .gitignore
├── README.md
├── grafana-prometheus
│ ├── README.md
│ ├── main.tf
│ ├── prometheus-dashboard.json
│ ├── providers.tf
│ ├── resources.tf
│ └── variables.tf
├── jenkins
│ ├── README.md
│ ├── jenkins.tf
│ ├── main.tf
│ ├── modules.tf
│ ├── providers.tf
│ └── variables.tf
├── kubernetes-dashboard
│ ├── README.md
│ ├── kube-dashboard.tf
│ ├── main.tf
│ └── providers.tf
├── metallb
│ ├── README.md
│ ├── configmap.yml
│ ├── main.tf
│ ├── metallb.tf
│ ├── providers.tf
│ └── variables.tf
└── nginx-ingress-controller
│ ├── README.md
│ ├── main.tf
│ ├── nginx-ingress.tf
│ └── providers.tf
├── part04-gitlab-provider
├── .gitignore
├── README.md
├── providers.tf
├── resources.tf
├── terraform.tfvars.example
├── variables.tf
└── versions.tf
├── part05-HA-proxy-provider
├── README.md
├── main.tf
└── providers.tf
├── part06-grafana-provider
├── README.md
├── assets
│ └── grafana.png
├── main.tf
└── providers.tf
├── part07-CiscoDevNet-provider
├── README.md
├── main.tf
└── variables.tf
├── part08-vsphere-provider
├── README.md
├── main.tf
└── variables.tf
├── part09-helm-provider
├── kubernetes-dashboard
│ ├── README.md
│ └── main.tf
├── nfs-client-provisioner
│ ├── README.md
│ ├── main.tf
│ └── variables.tf
└── redis
│ ├── README.md
│ ├── main.tf
│ ├── providers.tf
│ ├── terraform.tfvars
│ └── variables.tf
├── part10-maas-provider
├── README.md
├── main.tf
├── outputs.tf
├── providers.tf
└── variables.tf
├── part11-consul-provider
└── README.md
├── part12-vault-provider
├── README.md
├── auth.tf
├── main.tf
├── policies.tf
├── policies
│ ├── admin-policy.hcl
│ └── eaas-client-policy.hcl
└── secrets.tf
├── part13-keycloak-provider
└── README.md
├── part14-azure-devops-provider
├── README.md
├── main.tf
└── providers.tf
├── part15-openstack-provider
├── 1-providers.tf
├── 2-variables.tf
├── 3-network.tf
├── 4-keypairs.tf
├── 5-get_images.tf
├── 6-flavors.tf
├── 7-sec_groups.tf
├── 8-computes.tf
├── 9-locals.tf
├── README.md
├── clouds.yaml.example
└── templates
│ └── hosts.ini
├── part16-kvm-provider
├── README
├── cloud_init.cfg
├── main.tf
├── network_config.cfg
├── terraform.tfvars
└── variables.tf
├── part17-arvancloud-minio-cluster
├── README.md
├── inventory.tmpl
├── main.tf
├── modules
│ ├── abrak
│ │ └── ubuntu22
│ │ │ ├── data.tf
│ │ │ ├── main.tf
│ │ │ └── output.tf
│ ├── private-network
│ │ ├── main.tf
│ │ └── output.tf
│ ├── public-ip
│ │ ├── main.tf
│ │ └── output.tf
│ └── ssh-key
│ │ ├── main.tf
│ │ └── output.tf
├── output.tf
├── provider.tf
└── variable.tf
├── part18-hetznercloud-provider
├── data_sources
│ ├── locations.txt
│ └── main.tf
└── simple-vm
│ ├── README.md
│ ├── main.tf
│ ├── output.tf
│ ├── variables.tf
│ └── versions.tf
├── part19-cloudflare-provider
└── create-record
│ ├── README.md
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
├── part20-arvancloud-abrak
├── .gitignore
├── main.tf
├── modules
│ └── abrak
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── variables.tf
│ │ └── versions.tf
├── output.tf
├── providers.tf
└── variables.tf
├── part21-proxmox-provider
├── README.md
├── main.tf
├── terraform.tfvars
└── variables.tf
├── part22-aws-provider-custom-modules
├── .gitignore
├── README.md
├── main.tf
├── module-1
│ ├── main.tf
│ ├── output.tf
│ └── variables.tf
└── variables.tf
└── part23-boundary-provider
├── README.md
├── main.tf
├── output.tf
├── provider.tf
└── variables.tf
/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # terraform lockfile
9 | .terraform.lock.hcl
10 |
11 | # Crash log files
12 | crash.log
13 |
14 | # Exclude all .tfvars files, which are likely to contain sentitive data, such as
15 | # password, private keys, and other secrets. These should not be part of version
16 | # control as they are data points which are potentially sensitive and subject
17 | # to change depending on the environment.
18 | #
19 | #*.tfvars
20 |
21 | # Ignore override files as they are usually used to override resources locally and so
22 | # are not checked in
23 | override.tf
24 | override.tf.json
25 | *_override.tf
26 | *_override.tf.json
27 |
28 | # Include override files you do wish to add to version control using negated pattern
29 | #
30 | # !example_override.tf
31 |
32 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
33 | # example: *tfplan*
34 |
35 | # Ignore CLI configuration files
36 | .terraformrc
37 | terraform.rc
38 |
39 | # Examples
40 | debezium*
41 | *.tar.gz
42 |
43 | # Infra Cost extension auto generated directory
44 | .infracost
45 | part18-hetznercloud-provider/simple-vm/ansible_inventory.ini
46 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Terraform-tutorial
2 |
3 |
4 |
5 |
In this repository, you can find examples of Terraform providers which can help deepen your Terraform knowledge.
6 |
7 | ## Table of Contents
8 |
9 | 1. [Where to start](#how-and-where-to-start)
10 | 2. [Providers list](#providers-list)
11 | 3. [How to contribute](#how-to-contribute)
12 | 4. [Tasks](#tasks)
13 |
14 | ## How and where to start?
15 |
16 | You can find a list of providers below. They can be a guide for you to learn different providers for different services for example Kubernetes, docker, Cloudflare and ...
17 | It's better to start from [Terraform Tutorial - Part 1 - Docker Introduction](./part01-docker-provider/) and move as the list goes.
18 |
19 | ## Providers List:
20 |
21 | | Icon | num | Terraform Tutorials | Status |
22 | | ---- | --- | ----------------------------------------------------------------- | -------------- |
23 | | 🐳 | 1 | [Docker Provider](./part01-docker-provider/README.md) | ✅ Done |
24 | | 🐙 | 2 | [GitHub Provider](./part02-github-provider/README.md) | ✅ Done |
25 | | ☸️ | 3 | [Kubernetes Provider](./part03-kubernetes-provider/README.md) | ✅ Done |
26 | | 🦊 | 4 | [GitLab Provider](./part04-gitlab-provider/README.md) | ✅ Done |
27 | | 🍀 | 5 | [HAProxy Provider](./part05-HA-proxy-provider/README.md) | ✅ Done |
28 | | 📈 | 6 | [Grafana Provider](./part06-grafana-provider/README.md) | ✅ Done |
29 | | 🤖 | 7 | [Cisco DevNet Provider](./part07-CiscoDevNet-provider/README.md) | ✅ Done |
30 | | 🖥️ | 8 | [Vsphere Provider](./part08-vsphere-provider/README.md) | ✅ Done |
31 | | 🎓 | 9 | [Helm Provider](./part09-helm-provider/README.md) | ✅ Done |
32 | | 🚧 | 10 | [MAAS Provider](./part10-maas-provider/README.md) | 🚧 In Progress |
33 | | 🌀 | 11 | [Consul Provider](./part11-consul-provider/README.md) | 🚧 In Progress |
34 | | 🔑 | 12 | [Vault Provider](./part12-vault-provider/README.md) | ✅ Done |
35 | | 🎓 | 13 | [Keycloak Provider](./part13-keycloak-provider/README.md) | 🚧 In Progress |
36 | | ☁️ | 14 | [Azure DevOps Provider](./part14-azure-devops-provider/README.md) | 🚧 In Progress |
37 | | 🌎 | 15 | [OpenStack Provider](./part15-openstack-provider/README.md) | ✅ Done |
38 | | 🦄 | 18 | [HetznerCloud Provider](./part18-hetznercloud-provider/README.md) | ✅ Done |
39 | | 🌩️ | 19 | [Cloudflare Provider](./part19-cloudflare-provider/README.md) | ✅ Done |
40 |
41 | ## How to contribute?
42 |
43 | We always appreciate and welcome contributions and here is a summarized guide for you:
44 |
45 | - Checklist and find providers with pending or in progress status. Help to write and complete them.
46 | - Check for issues. Issues are a very important section in a repository and you can search for your first task (reported issue, requests) in the issues section.
47 | - Start exploring as you move along the course and find problems that you can fix.
48 |
--------------------------------------------------------------------------------
/image/Gitlab-provider-banner.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/devopshobbies/terraform-templates/0f13f06f8279ef8c183a2ca726d72ff7fc328b72/image/Gitlab-provider-banner.jpg
--------------------------------------------------------------------------------
/image/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/devopshobbies/terraform-templates/0f13f06f8279ef8c183a2ca726d72ff7fc328b72/image/banner.png
--------------------------------------------------------------------------------
/image/docker-provider-banner.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/devopshobbies/terraform-templates/0f13f06f8279ef8c183a2ca726d72ff7fc328b72/image/docker-provider-banner.jpg
--------------------------------------------------------------------------------
/image/github-provider-banner.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/devopshobbies/terraform-templates/0f13f06f8279ef8c183a2ca726d72ff7fc328b72/image/github-provider-banner.jpg
--------------------------------------------------------------------------------
/image/kubernetes-provider-banner.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/devopshobbies/terraform-templates/0f13f06f8279ef8c183a2ca726d72ff7fc328b72/image/kubernetes-provider-banner.jpg
--------------------------------------------------------------------------------
/image/terraform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/devopshobbies/terraform-templates/0f13f06f8279ef8c183a2ca726d72ff7fc328b72/image/terraform.png
--------------------------------------------------------------------------------
/part01-docker-provider/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # About The Template
4 |
5 | This Terraform configuration file describes infrastructure as code for deploying a Docker container running the Nginx web server.
6 |
7 | ## What is happening in template ?
8 |
9 | In the `main.tf` file three resources are defined: A docker `image` resource, A docker `container` resource and a Docker `network` resource.
10 |
11 | ### The Docker image resource:
12 |
13 | This resource pulls the latest version of the Nginx image specified in the `var.nginx_image_version` defined in `variables.tf` and allows the option to keep the image locally in the `var.keep_locally` and force to remove the image `var.force_remove`.
14 |
15 | ```hcl
16 | resource "docker_image" "nginx" {
17 | name = var.nginx_image_version
18 | keep_locally = var.keep_locally
19 | force_remove = var.force_remove
20 | }
21 | ```
22 |
23 | ### The Docker container resource:
24 |
25 | This resource creates a container based on pulled Nginx image by the `docker image resource`. The `docker-container` resource maps the container's port `80` to the host's port `8000`, sets the container hostname, domain name and restart policy, and mounts a host directory as a volume in the container.
26 | You can check the variables in `variables.tf` file to check what is passed.
27 |
28 | ```hcl
29 | resource "docker_container" "nginx" {
30 | # The attribute "latest" is deprecated.
31 | image = docker_image.nginx.latest
32 | hostname = var.container_host_name
33 | name = var.container_name
34 | domainname = var.container_host_name
35 | restart = var.container_restart
36 | ports {
37 | internal = var.internal_container_port
38 | external = var.external_container_port
39 | }
40 |
41 | volumes {
42 | container_path = var.container_path
43 | host_path = var.host_path
44 | read_only = var.read_only
45 | }
46 | }
47 | ```
48 |
49 | ### The Docker network resource.
50 |
51 | And the last resource creates a private network for the container as defined in the `var.container_network`. Finally, a simple note will be printed `Everything is ok`
52 |
53 | ```h
54 | resource "docker_network" "private_network" {
55 | name = var.container_network.name
56 | driver = var.container_network.driver
57 | }
58 |
59 |
60 | output "output_p" {
61 | value = "every thing is ok!"
62 | }
63 | ```
64 |
65 | ## How to run ?
66 |
67 | ```
68 | $ terraform init
69 | $ terraform validate
70 | $ terraform fmt
71 | $ terraform plan
72 | $ terraform apply
73 | ```
74 |
75 | for delete:
76 |
77 | ```
78 | $ terraform destroy
79 | ```
80 |
--------------------------------------------------------------------------------
/part01-docker-provider/main.tf:
--------------------------------------------------------------------------------
1 | # Create a docker image resource
2 | # -> docker pull nginx:latest
3 | resource "docker_image" "nginx" {
4 | name = var.nginx_image_version
5 | keep_locally = var.keep_locally
6 | force_remove = var.force_remove
7 | }
8 |
9 | # Create a docker container resource
10 | # -> same as 'docker run --name nginx -p8080:80 -d nginx:latest'
11 | resource "docker_container" "nginx" {
12 | # The attribute "latest" is deprecated.
13 | image = docker_image.nginx.image_id
14 | hostname = var.container_host_name
15 | name = var.container_name
16 | domainname = var.container_host_name
17 | restart = var.container_restart
18 | ports {
19 | internal = var.internal_container_port
20 | external = var.external_container_port
21 | }
22 |
23 | volumes {
24 | container_path = var.container_path
25 | host_path = var.host_path
26 | read_only = var.read_only
27 | }
28 | }
29 |
30 | resource "docker_network" "private_network" {
31 | name = var.container_network.name
32 | driver = var.container_network.driver
33 | }
34 |
35 |
36 | output "output_p" {
37 | value = "every thing is ok!"
38 | }
39 |
--------------------------------------------------------------------------------
/part01-docker-provider/providers.tf:
--------------------------------------------------------------------------------
1 | # Adding terraform version and terraform provider version on Terraform block
2 | # Set the required provider and versions
3 | terraform {
4 | required_providers {
5 | # We recommend pinning to the specific version of the Docker Provider you're using
6 | # since new versions are released frequently
7 | docker = {
8 | source = "kreuzwerker/docker"
9 | version = "~> 2.13.0"
10 | }
11 | }
12 | }
13 |
14 | provider "docker" {
15 | host = "tcp://localhost:2376"
16 | }
17 |
--------------------------------------------------------------------------------
/part01-docker-provider/variables.tf:
--------------------------------------------------------------------------------
1 | variable "nginx_version" {
2 | default = "docker_image.nginx.latest"
3 | type = string
4 | description = "value of the nginx container tag"
5 | }
6 | variable "nginx_image_version" {
7 | default = "nginx:latest"
8 | type = string
9 | description = "value of nginex version"
10 | }
11 | variable "container_name" {
12 | default = "nginx"
13 | type = string
14 | description = "value of the container name"
15 | }
16 | variable "container_host_name" {
17 | default = "nginx"
18 | type = string
19 | description = "value of the hostname of the container"
20 | }
21 |
22 | variable "internal_container_port" {
23 | default = 80
24 | type = number
25 | description = "value of the internal container port"
26 | }
27 | variable "external_container_port" {
28 | default = 8000
29 | type = number
30 | description = "value of the external container port"
31 | }
32 | variable "keep_locally" {
33 | default = false
34 | type = bool
35 | description = "If true, then the Docker image won't be deleted on destroy operation. If this is false, it will delete the image from the docker local storage on destroy operation."
36 | }
37 |
38 | variable "force_remove" {
39 | default = true
40 | type = bool
41 | description = "If true, then the Docker image won't be deleted on destroy operation. If this is false, it will delete the image from the docker local storage on destroy operation."
42 | }
43 |
44 | variable "container_path" {
45 | default = "/usr/share/nginx/html"
46 | type = string
47 | description = "value of the container volumes path"
48 | }
49 | variable "host_path" {
50 | default = "/data"
51 | type = string
52 | description = "value of the host_path path"
53 | }
54 | variable "read_only" {
55 | default = false
56 | type = bool
57 | description = "If true, this volume will be readonly. Defaults to false"
58 | }
59 | variable "container_restart" {
60 | default = "always"
61 | type = string
62 | description = "The restart policy for the container. Must be one of 'no', 'on-failure', 'always', 'unless-stopped'. Defaults to no."
63 | }
64 | variable "container_network" {
65 | type = object({
66 | name = string
67 | driver = string
68 | })
69 | default = {
70 | driver = "bridge"
71 | name = "nginx_network"
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/part02-github-provider/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # GitHub Repository Automation with Terraform
4 |
5 | This Terraform code automates the creation of a new GitHub repository, adds a file to it, and creates a new branch within it. The code is written in HashiCorp Configuration Language (HCL) and uses Terraform to manage the resources.
6 |
7 | ## `github_repository` Resource
8 |
9 | This resource creates a new GitHub repository with the name, description, and visibility specified in the `var.repository_details` variable.
10 |
11 | - `name`: The name of the new GitHub repository, taken from the `var.repository_details.name` variable.
12 | - `description`: The description of the new GitHub repository, taken from the `var.repository_details.description` variable.
13 | - `auto_init`: Whether to initialize the repository with a README.md file or not, taken from the `var.repository_details.auto_init` variable.
14 | - `visibility`: The visibility of the new GitHub repository, which can be "public", "private", or "internal", taken from the `var.repository_details.visibility` variable.
15 |
16 | ## `github_repository_file` Resource
17 |
18 | This resource creates a new file within the GitHub repository created by the previous resource.
19 |
20 | - `repository`: The name of the GitHub repository to add the file to, taken from the `github_repository.new-repo.name` output of the previous resource.
21 | - `branch`: The name of the branch to create or update the file in, taken from the `var.repository_file_details.branch` variable.
22 | - `file`: The path of the file to create or update within the branch, taken from the `var.repository_file_details.file` variable.
23 | - `content`: The content of the file to create or update, taken from the `var.repository_file_details.content` variable.
24 | - `commit_message`: The commit message to use when committing the file changes, taken from the `var.repository_file_details.commit_message` variable.
25 | - `commit_author`: The name of the commit author, taken from the `var.repository_file_details.commit_author` variable.
26 | - `commit_email`: The email of the commit author, taken from the `var.repository_file_details.commit_email` variable.
27 | - `overwrite_on_create`: Whether to overwrite the file if it already exists or not, taken from the `var.repository_file_details.overwrite_on_create` variable.
28 |
29 | ## `github_branch` Resource
30 |
31 | This resource creates a new branch within the GitHub repository created by the first resource.
32 |
33 | - `repository`: The name of the GitHub repository to create the branch in, taken from the `github_repository.new-repo.name` output of the first resource.
34 | - `branch`: The name of the new branch, taken from the `var.gitbranch_details` variable.
35 |
36 | run these commands:
37 |
38 | ```
39 | $ terraform init
40 | $ terraform validate
41 | $ terraform fmt
42 | $ terraform plan
43 | $ terraform apply
44 | ```
45 |
46 | for delete:
47 |
48 | ```
49 | $ terraform destroy
50 | ```
51 |
--------------------------------------------------------------------------------
/part02-github-provider/main.tf:
--------------------------------------------------------------------------------
1 | # Resources
2 |
3 | # This resource allows you to create and manage repositories within your GitHub organization or personal account.
4 | resource "github_repository" "new-repo" {
5 | name = var.repository_details.name
6 | description = var.repository_details.description
7 | auto_init = var.repository_details.auto_init
8 |
9 | visibility = var.repository_details.visibility
10 | }
11 |
12 |
13 | # This resource allows you to create and manage files within a GitHub repository.
14 | resource "github_repository_file" "new-repo" {
15 | repository = github_repository.new-repo.name
16 | branch = var.repository_file_details.branch
17 | file = var.repository_file_details.file
18 | content = var.repository_file_details.content
19 | commit_message = var.repository_file_details.commit_message
20 | commit_author = var.repository_file_details.commit_author
21 | commit_email = var.repository_file_details.commit_email
22 | overwrite_on_create = var.repository_file_details.overwrite_on_create
23 | }
24 |
25 |
26 | # This resource allows you to create and manage branches within your repository.
27 | resource "github_branch" "development" {
28 | repository = github_repository.new-repo.name
29 | branch = var.gitbranch_details
30 | }
31 |
--------------------------------------------------------------------------------
/part02-github-provider/providers.tf:
--------------------------------------------------------------------------------
1 | # Adding terraform version and terraform provider version on Terraform block
2 | # GitHub Provider Docs: https://registry.terraform.io/providers/integrations/github/latest/docs
3 |
4 | terraform {
5 | required_providers {
6 | github = {
7 | source = "integrations/github"
8 | version = "4.23.0"
9 | }
10 | }
11 | }
12 |
13 |
14 | provider "github" {
15 | # for more authentication ways and other configure options go to https://registry.terraform.io/providers/integrations/github/latest/docs#authentication
16 | # for more inforamtion for generate token https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token
17 |
18 | token = var.access_token # var.token
19 | }
20 |
--------------------------------------------------------------------------------
/part02-github-provider/variables.tf:
--------------------------------------------------------------------------------
1 | variable "access_token" {
2 | default = "ghp_1MVg--------------------om3wiSKe"
3 | type = string
4 | description = "value of the token"
5 | }
6 |
7 | variable "repository_details" {
8 | type = object({
9 | name = string
10 | description = string
11 | visibility = string
12 | auto_init = bool
13 | }
14 | )
15 | default = {
16 | auto_init = true
17 | description = "My new repo description"
18 | name = "new-repo"
19 | visibility = "public"
20 | }
21 | }
22 | #This resource allows you to create and manage files within a GitHub repository.
23 | variable "repository_file_details" {
24 | type = object({
25 | branch = string
26 | file = string
27 | content = string
28 | commit_message = string
29 | commit_author = string
30 | commit_email = string
31 | overwrite_on_create = bool
32 | }
33 | )
34 | default = {
35 | branch = "main"
36 | commit_author = "Terraform User"
37 | commit_email = "terraform@example.com"
38 | commit_message = "Managed by Terraform"
39 | content = "**/*.tfstate"
40 | file = ".gitignore"
41 | overwrite_on_create = true
42 | }
43 | }
44 |
45 | variable "gitbranch_details" {
46 | default = "develop_new_branch"
47 | type = string
48 | description = "value of the branch name"
49 | }
50 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # Crash log files
9 | crash.log
10 | crash.*.log
11 |
12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as
13 | # password, private keys, and other secrets. These should not be part of version
14 | # control as they are data points which are potentially sensitive and subject
15 | # to change depending on the environment.
16 | *.tfvars
17 | *.tfvars.json
18 |
19 | # Ignore override files as they are usually used to override resources locally and so
20 | # are not checked in
21 | override.tf
22 | override.tf.json
23 | *_override.tf
24 | *_override.tf.json
25 |
26 | # Include override files you do wish to add to version control using negated pattern
27 | # !example_override.tf
28 |
29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
30 | # example: *tfplan*
31 |
32 | # Ignore CLI configuration files
33 | .terraformrc
34 | terraform.rc
--------------------------------------------------------------------------------
/part03-kubernetes-provider/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Terraform for your kubernetes
3 |
4 | This directory contains Terraform codes to deploy different services and applications on your Kubernetes cluster.
5 |
6 | ## What I can find here?
7 |
8 | You can find examples of:
9 |
10 | - ### [MetalLB](./metallb/)
11 |
12 | A load balancer implementation for bare metal Kubernetes clusters. It provides a network load balancer implementation that can handle external traffic to Kubernetes services, in the absence of an external load balancer, such as a cloud provider load balancer or a hardware load balancer.
13 |
14 | - ## [Jenkins](./jenkins/)
15 | Jenkins is an open-source automation server that is used to build, test, and deploy software applications.
16 | - ## [Grafana & Prometheus](./grafana-prometheus)
17 |
18 | This is a great solution to use Grafana and Prometheus together on Kubernetes because they provide very powerful monitoring tools.
19 |
20 | Prometheus can be used to collect data from Kubernetes while Grafana can be used to create custom dashboards that display this data in real time.
21 |
22 | - ## [Kubernetes Dashboard](./kubernetes-dashboard/)
23 |
24 | You can understand from the name that the `Kubernetes dashboard` is a GUI for managing Kubernetes cluster by providing an easy way to monitor and manage the resources.
25 |
26 | - ## [Nginx Ingress Controller](./nginx-ingress-controller/)
27 | The NGINX Ingress Controller is a software application that runs on a Kubernetes cluster and provides reverse proxy and load-balancing capabilities for incoming traffic to Kubernetes services.
28 |
29 | ## How to run the project
30 |
31 | If you need guidance, there is little and brief guid about how to start and run the samples.
32 |
33 | ### Step 1 Clone the repository.
34 |
35 | First, you should clone the repository using the below command:
36 |
37 | ```bash
38 | git clone https https://github.com/devopshobbies/terraform-templates
39 | ```
40 |
41 | ### step2: RUN
42 |
43 | ```bash
44 | terraform init
45 | terraform fmt
46 | terraform validate
47 | terraform apply
48 | ```
49 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/grafana-prometheus/README.md:
--------------------------------------------------------------------------------
1 | # Terraform for your kuberntes
2 | This repo contains Terafform codes to deploy different services and application on your kubernetes cluster.
3 |
4 | ## What am i deploying?
5 | We are going to install Grafana using helm with terraform on k8s and then customizing our setup with grafana terraform provider. sounds cool huh ? lets get started
6 |
7 | - deploy prometheus and grafana in k8s
8 | - customizing grafana
9 | - create new organization
10 | - import prometheus dashboards from a json file
11 |
12 |
13 |
14 | ### step1: clone this repo
15 |
16 | ### step2: RUN
17 | ```
18 | $ terraform init
19 | $ terraform fmt
20 | $ terraform validate
21 | $ terraform apply
22 | ```
23 |
24 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/grafana-prometheus/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | helm = {
4 | source = "hashicorp/helm"
5 | version = "2.5.1"
6 | }
7 | grafana = {
8 | source = "grafana/grafana"
9 | version = "1.23.0"
10 | }
11 | }
12 | }
13 |
14 |
15 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/grafana-prometheus/providers.tf:
--------------------------------------------------------------------------------
1 | provider "helm" {
2 | kubernetes {
3 | config_path = "~/.kube/config"
4 | }
5 | }
6 | provider "grafana" {
7 | alias = "base"
8 | url = var.grafana_url
9 | auth = var.grafana_auth
10 | }
11 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/grafana-prometheus/resources.tf:
--------------------------------------------------------------------------------
1 | #### HELM RESOURCES ####
2 | resource "helm_release" "grafana" {
3 | name = "grafana"
4 | repository = "https://grafana.github.io/helm-charts"
5 | chart = "grafana"
6 |
7 | set {
8 | name = "service.type"
9 | value = var.service_type
10 | }
11 |
12 | set {
13 | name = "service.nodePort"
14 | value = var.service_port
15 | }
16 |
17 | set {
18 | name = "persistence.enabled"
19 | value = true
20 | }
21 |
22 | set {
23 | name = "persistence.storageClassName"
24 | value = var.storage_class
25 | }
26 |
27 | set {
28 | name = "adminUser"
29 | value = var.admin_user
30 | }
31 | set {
32 | name = "adminPassword"
33 | value = var.admin_password
34 | }
35 |
36 | }
37 |
38 | resource "helm_release" "prometheus" {
39 | name = "prometheus-community"
40 | repository = "https://prometheus-community.github.io/helm-charts"
41 | chart = "prometheus"
42 |
43 | set {
44 | name = "server.service.type"
45 | value = "NodePort"
46 | }
47 |
48 | set {
49 | name = "server.service.nodePort"
50 | value = var.prometheus_nodeport
51 | }
52 | set {
53 | name = "server.persistentVolume.storageClass"
54 | value = var.storage_class
55 | }
56 |
57 | set {
58 | name = "alertmanager.persistentVolume.storageClass"
59 | value = var.storage_class
60 | }
61 | }
62 | #### GRAFANA ####
63 | resource "grafana_organization" "my_org" {
64 | provider = grafana.base
65 | name = "my_org"
66 | }
67 |
68 | resource "grafana_dashboard" "metrics" {
69 | provider = grafana.base
70 | config_json = file("prometheus-dashboard.json")
71 | }
72 |
73 | resource "grafana_data_source" "prometheus" {
74 | provider = grafana.base
75 | type = "prometheus"
76 | name = "k8s"
77 | url = "http://10.132.132.102:32324"
78 | access_mode = "proxy"
79 | }
--------------------------------------------------------------------------------
/part03-kubernetes-provider/grafana-prometheus/variables.tf:
--------------------------------------------------------------------------------
1 | variable "service_type" {
2 | description = "Service type"
3 | default = "NodePort"
4 | }
5 |
6 | variable "service_port" {
7 | description = "Node port"
8 | default = 32323
9 | }
10 |
11 | variable "storage_class" {
12 | default = "managed-nfs-storage"
13 | }
14 |
15 | variable "admin_user" {
16 | description = "admin username for grafana"
17 | default = "admin"
18 | }
19 |
20 | variable "admin_password" {
21 | description = "admin password for admin user"
22 | default = "admin"
23 | }
24 |
25 | variable "grafana_auth" {
26 | default = "admin:admin"
27 | }
28 |
29 | variable "grafana_url" {
30 | default = "http://10.132.132.102:32323"
31 | }
32 |
33 | variable "prometheus_nodeport" {
34 | default = 32324
35 | }
--------------------------------------------------------------------------------
/part03-kubernetes-provider/jenkins/README.md:
--------------------------------------------------------------------------------
1 | # Terraform for your kuberntes
2 | This repo contains Terafform codes to deploy different services and application on your kubernetes cluster.
3 |
4 | ### step1: clone this repo
5 |
6 | ### step2: RUN
7 | ```
8 | $ terraform init
9 | $ terraform fmt
10 | $ terraform validate
11 | $ terraform apply
12 | ```
13 | **Note: Make sure to change values in variables.tf file**
--------------------------------------------------------------------------------
/part03-kubernetes-provider/jenkins/jenkins.tf:
--------------------------------------------------------------------------------
1 | resource "kubernetes_namespace" "jenkins_namespace" {
2 |
3 | metadata {
4 | annotations = {
5 | name = "jenkins"
6 | }
7 |
8 | labels = {
9 | managedby = "terraform"
10 | }
11 |
12 | name = var.namespace
13 | }
14 | }
15 |
16 | resource "kubernetes_persistent_volume_claim" "claim" {
17 | metadata {
18 | name = "${var.name}-claim"
19 | namespace = var.namespace
20 | labels = {
21 | managedby = "terraform"
22 | }
23 | }
24 | spec {
25 | access_modes = [var.accessmode]
26 | resources {
27 | requests = {
28 | storage = var.request_storage
29 | }
30 | }
31 | storage_class_name = var.storageclass
32 | }
33 | depends_on = [
34 | kubernetes_namespace.jenkins_namespace
35 | ]
36 | }
37 |
38 | resource "kubernetes_deployment" "jenkins" {
39 | depends_on = [
40 | kubernetes_namespace.jenkins_namespace
41 | ]
42 |
43 | metadata {
44 | name = "${var.name}-deployment"
45 | labels = {
46 | managedby = "terraform"
47 | }
48 | namespace = var.namespace
49 | }
50 |
51 | spec {
52 | replicas = var.replicas
53 |
54 | selector {
55 | match_labels = {
56 | app = var.name
57 | }
58 | }
59 |
60 | template {
61 | metadata {
62 | labels = {
63 | app = var.name
64 | }
65 | }
66 |
67 | spec {
68 | container {
69 | image = var.jenkins_image
70 | name = var.name
71 | port {
72 | container_port = "8080"
73 | }
74 | volume_mount {
75 | name = "${var.name}-persistent-storage"
76 | mount_path = "/var/jenkins_home"
77 | }
78 | # TODO: liveness probe
79 | }
80 | security_context {
81 | fs_group = "1000"
82 | }
83 | volume {
84 | name = "${var.name}-persistent-storage"
85 | persistent_volume_claim {
86 | claim_name = "${var.name}-claim"
87 | }
88 | }
89 | }
90 | }
91 | }
92 | }
93 |
94 | resource "kubernetes_service" "jenkins-service" {
95 | depends_on = [
96 | kubernetes_deployment.jenkins,
97 | kubernetes_namespace.jenkins_namespace
98 | ]
99 | metadata {
100 | name = var.name
101 | namespace = var.namespace
102 | labels = {
103 | managedby = "terraform"
104 | service = var.name
105 | }
106 | }
107 | spec {
108 | selector = {
109 | app = var.name
110 | }
111 | port {
112 | port = 8080
113 | name = "http"
114 | }
115 | port {
116 | port = 50000
117 | name = "tunnel"
118 | }
119 |
120 | type = "ClusterIP"
121 | }
122 | }
123 |
124 | # resource "kubernetes_secret" "this" {
125 | # metadata {
126 | # name = "${var.name}-admin"
127 | # annotations = {
128 | # "kubernetes.io/service-account.name" = "${var.name}-admin"
129 | # }
130 | # }
131 |
132 | # type = "kubernetes.io/service-account-token"
133 | # }
134 |
135 | resource "kubernetes_service_account" "thia" {
136 | metadata {
137 | name = "${var.name}-admin"
138 | namespace = var.namespace
139 | }
140 | }
141 |
142 | resource "kubernetes_cluster_role_binding" "this" {
143 | metadata {
144 | name = "${var.name}-rbac"
145 | }
146 | role_ref {
147 | api_group = "rbac.authorization.k8s.io"
148 | kind = "ClusterRole"
149 | name = "cluster-admin"
150 | }
151 | subject {
152 | kind = "ServiceAccount"
153 | name = "${var.name}-admin"
154 | namespace = var.namespace
155 | }
156 | }
--------------------------------------------------------------------------------
/part03-kubernetes-provider/jenkins/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | kubernetes = {
4 | source = "hashicorp/kubernetes"
5 | version = "2.10.0"
6 | }
7 | kubectl = {
8 | source = "gavinbunney/kubectl"
9 | version = ">= 1.7.0"
10 | }
11 | helm = ">= 2.1.0"
12 | }
13 | }
14 |
15 |
16 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/jenkins/modules.tf:
--------------------------------------------------------------------------------
1 | module "jenkins" {
2 | source = "poush/jenkins/kubernetes"
3 | version = "0.2.1"
4 | create_namespace = true
5 | }
--------------------------------------------------------------------------------
/part03-kubernetes-provider/jenkins/providers.tf:
--------------------------------------------------------------------------------
1 | provider "kubernetes" {
2 | config_path = "~/.kube/config"
3 | }
4 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/jenkins/variables.tf:
--------------------------------------------------------------------------------
1 | ############# Jenkins ###############
2 | variable "request_storage" {
3 | description = "storage for your jenkins installation"
4 | default = "5Gi"
5 | }
6 |
7 | variable "accessmode" {
8 | description = "access mode for jenkins persistent volume claim"
9 | default = "ReadWriteOnce"
10 | }
11 |
12 | variable "name" {
13 | description = "name of your jenkins application, will be used as prefix for all manifests"
14 | default = "jenkins"
15 | }
16 |
17 | variable "namespace" {
18 | description = "namespace where all the jenkins resources will be created"
19 | default = "jenkins"
20 | }
21 |
22 | variable "storageclass" {
23 | description = "storageclass to use for creating persistent volume claim, defaults to gp2 of AWS"
24 | default = "managed-nfs-storage"
25 | }
26 |
27 | variable "jenkins_image" {
28 | description = "docker image with the tag"
29 | default = "jenkins/jenkins:latest"
30 | }
31 |
32 | variable "replicas" {
33 | description = "no. of replicas you want"
34 | default = "1"
35 | }
--------------------------------------------------------------------------------
/part03-kubernetes-provider/kubernetes-dashboard/README.md:
--------------------------------------------------------------------------------
1 | # Terraform for your kuberntes
2 | This repo contains Terafform codes to deploy different services and application on your kubernetes cluster.
3 |
4 | ### step1: clone this repo
5 |
6 | ### step2: RUN
7 | ```
8 | $ terraform init
9 | $ terraform fmt
10 | $ terraform validate
11 | $ terraform apply
12 | ```
13 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/kubernetes-dashboard/kube-dashboard.tf:
--------------------------------------------------------------------------------
1 | resource "helm_release" "my-kubernetes-dashboard" {
2 |
3 | name = "my-kubernetes-dashboard"
4 |
5 | repository = "https://kubernetes.github.io/dashboard/"
6 | chart = "kubernetes-dashboard"
7 | namespace = "default"
8 |
9 | set {
10 | name = "service.type"
11 | value = "LoadBalancer"
12 | }
13 |
14 | set {
15 | name = "protocolHttp"
16 | value = "true"
17 | }
18 |
19 | set {
20 | name = "service.externalPort"
21 | value = 80
22 | }
23 |
24 | set {
25 | name = "replicaCount"
26 | value = 2
27 | }
28 |
29 | set {
30 | name = "rbac.clusterReadOnlyRole"
31 | value = "true"
32 | }
33 | }
--------------------------------------------------------------------------------
/part03-kubernetes-provider/kubernetes-dashboard/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | kubernetes = {
4 | source = "hashicorp/kubernetes"
5 | version = "2.10.0"
6 | }
7 | helm = ">= 2.1.0"
8 | }
9 | }
10 |
11 |
12 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/kubernetes-dashboard/providers.tf:
--------------------------------------------------------------------------------
1 | provider "kubernetes" {
2 | config_path = "~/.kube/config"
3 | }
4 | provider "helm" {
5 | kubernetes {
6 | config_path = "~/.kube/config"
7 | }
8 | }
--------------------------------------------------------------------------------
/part03-kubernetes-provider/metallb/README.md:
--------------------------------------------------------------------------------
1 | # Terraform for your kuberntes
2 | This repo contains Terafform codes to deploy different services and application on your kubernetes cluster.
3 |
4 | ### step1: clone this repo
5 |
6 | ### step2: RUN
7 | ```
8 | $ terraform init
9 | $ terraform fmt
10 | $ terraform validate
11 | $ terraform apply
12 | ```
13 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/metallb/configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | namespace: metallb-system
5 | name: config
6 | data:
7 | config: |
8 | address-pools:
9 | - name: default
10 | protocol: layer2
11 | addresses:
12 | - 10.132.132.5-10.132.132.10
--------------------------------------------------------------------------------
/part03-kubernetes-provider/metallb/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | kubernetes = {
4 | source = "hashicorp/kubernetes"
5 | version = "2.10.0"
6 | }
7 | kubectl = {
8 | source = "gavinbunney/kubectl"
9 | version = ">= 1.7.0"
10 | }
11 | helm = ">= 2.1.0"
12 | }
13 | }
14 |
15 |
16 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/metallb/metallb.tf:
--------------------------------------------------------------------------------
1 | # Create Namespace
2 | resource "kubernetes_namespace" "metallb_system" {
3 | metadata {
4 | name = "metallb-system"
5 |
6 | labels = {
7 | app = "metallb"
8 | }
9 | }
10 | }
11 | # Create Controller Cluster Role
12 | resource "kubernetes_cluster_role" "controller" {
13 | metadata {
14 | labels = {
15 | app = "metallb"
16 | }
17 | name = "metallb-system:controller"
18 | }
19 |
20 | rule {
21 | api_groups = [""]
22 | resources = ["services"]
23 | verbs = ["get", "list", "watch"]
24 |
25 | }
26 |
27 | rule {
28 | api_groups = [""]
29 | resources = ["services/status"]
30 | verbs = ["update"]
31 |
32 | }
33 |
34 | rule {
35 | api_groups = [""]
36 | resources = ["events"]
37 | verbs = ["create", "patch"]
38 |
39 | }
40 |
41 | rule {
42 | api_groups = ["policy"]
43 | resource_names = ["controller"]
44 | resources = ["podsecuritypolicies"]
45 | verbs = ["use"]
46 |
47 | }
48 | }
49 |
50 | # Create Speaker Cluster Role
51 | resource "kubernetes_cluster_role" "speaker" {
52 | metadata {
53 | labels = {
54 | app = "metallb"
55 | }
56 | name = "metallb-system:speaker"
57 | }
58 |
59 | rule {
60 | api_groups = [""]
61 | resources = ["services", "endpoints", "nodes"]
62 | verbs = ["get", "list", "watch"]
63 |
64 | }
65 |
66 | rule {
67 | api_groups = ["discovery.k8s.io"]
68 | resources = ["endpointslices"]
69 | verbs = ["get", "list", "watch"]
70 |
71 | }
72 |
73 | rule {
74 | api_groups = [""]
75 | resources = ["events"]
76 | verbs = ["create", "patch"]
77 |
78 | }
79 |
80 | rule {
81 | api_groups = ["policy"]
82 | resource_names = ["speaker"]
83 | resources = ["podsecuritypolicies"]
84 | verbs = ["use"]
85 |
86 | }
87 | }
88 | # Create Controller Cluster Role Binding Role
89 | resource "kubernetes_cluster_role_binding" "controller" {
90 | metadata {
91 | labels = {
92 | app = "metallb"
93 | }
94 | name = "metallb-system:controller"
95 | }
96 |
97 | role_ref {
98 | api_group = "rbac.authorization.k8s.io"
99 | kind = "ClusterRole"
100 | name = "metallb-system:controller"
101 | }
102 |
103 | subject {
104 | kind = "ServiceAccount"
105 | name = "controller"
106 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
107 | }
108 | }
109 |
110 | # Create Speaker Cluster Role Binding Role
111 | resource "kubernetes_cluster_role_binding" "speaker" {
112 | metadata {
113 | labels = {
114 | app = "metallb"
115 | }
116 | name = "metallb-system:speaker"
117 | }
118 |
119 | role_ref {
120 | api_group = "rbac.authorization.k8s.io"
121 | kind = "ClusterRole"
122 | name = "metallb-system:speaker"
123 | }
124 |
125 | subject {
126 | kind = "ServiceAccount"
127 | name = "speaker"
128 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
129 | }
130 | }
131 | # Create Config Watcher Role Binding
132 | resource "kubernetes_role_binding" "config_watcher" {
133 | metadata {
134 | labels = {
135 | app = "metallb"
136 | }
137 | name = "config-watcher"
138 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
139 | }
140 |
141 | role_ref {
142 | api_group = "rbac.authorization.k8s.io"
143 | kind = "Role"
144 | name = "config-watcher"
145 | }
146 | subject {
147 | kind = "ServiceAccount"
148 | name = kubernetes_service_account.controller.metadata.0.name
149 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
150 | }
151 | subject {
152 | kind = "ServiceAccount"
153 | name = kubernetes_service_account.speaker.metadata.0.name
154 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
155 | }
156 | }
157 |
158 | # Create Pod Lister Role Binding
159 | resource "kubernetes_role_binding" "pod_lister" {
160 | metadata {
161 | labels = {
162 | app = "metallb"
163 | }
164 | name = "pod-lister"
165 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
166 | }
167 |
168 | role_ref {
169 | api_group = "rbac.authorization.k8s.io"
170 | kind = "Role"
171 | name = "pod-lister"
172 | }
173 |
174 | subject {
175 | kind = "ServiceAccount"
176 | name = kubernetes_service_account.speaker.metadata.0.name
177 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
178 | }
179 | }
180 |
181 | # Create Controller Role Binding
182 | resource "kubernetes_role_binding" "controller" {
183 | metadata {
184 | labels = {
185 | app = "metallb"
186 | }
187 | name = "controller"
188 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
189 | }
190 |
191 | role_ref {
192 | api_group = "rbac.authorization.k8s.io"
193 | kind = "Role"
194 | name = "controller"
195 | }
196 |
197 | subject {
198 | kind = "ServiceAccount"
199 | name = kubernetes_service_account.controller.metadata.0.name
200 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
201 | }
202 | }
203 |
204 | # Create Config Watcher Role
205 | resource "kubernetes_role" "config_watcher" {
206 | metadata {
207 | labels = {
208 | app = "metallb"
209 | }
210 | name = "config-watcher"
211 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
212 | }
213 |
214 | rule {
215 | api_groups = [""]
216 | resources = ["configmaps"]
217 | verbs = ["get", "list", "watch"]
218 | }
219 | }
220 |
221 | # Create Pod Lister Role
222 | resource "kubernetes_role" "pod_lister" {
223 | metadata {
224 | labels = {
225 | app = "metallb"
226 | }
227 | name = "pod-lister"
228 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
229 | }
230 |
231 | rule {
232 | api_groups = [""]
233 | resources = ["pods"]
234 | verbs = ["list"]
235 | }
236 | }
237 |
238 | resource "kubernetes_role" "controller" {
239 | metadata {
240 | labels = {
241 | app = "metallb"
242 | }
243 | name = "controller"
244 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
245 | }
246 |
247 | rule {
248 | api_groups = [""]
249 | resources = ["secrets"]
250 | verbs = ["create"]
251 | }
252 |
253 | rule {
254 | api_groups = [""]
255 | resources = ["secrets"]
256 | resource_names = ["memberlist"]
257 | verbs = ["list"]
258 | }
259 |
260 | rule {
261 | api_groups = ["apps"]
262 | resources = ["deployments"]
263 | resource_names = ["controller"]
264 | verbs = ["get"]
265 | }
266 | }
267 |
268 | # Create Controller Deployment
269 | resource "kubernetes_deployment" "controller" {
270 | metadata {
271 | labels = {
272 | app = "metallb"
273 | component = "controller"
274 | }
275 | name = "controller"
276 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
277 | }
278 |
279 | spec {
280 | revision_history_limit = 3
281 |
282 | selector {
283 | match_labels = {
284 | app = "metallb"
285 | component = "controller"
286 | }
287 | }
288 |
289 | template {
290 | metadata {
291 | annotations = {
292 | "prometheus.io/port" = "7472"
293 | "prometheus.io/scrape" = "true"
294 | }
295 | labels = {
296 | app = "metallb"
297 | component = "controller"
298 | }
299 | }
300 |
301 | spec {
302 |
303 | automount_service_account_token = true # override Terraform's default false - https://github.com/kubernetes/kubernetes/issues/27973#issuecomment-462185284
304 | service_account_name = "controller"
305 | termination_grace_period_seconds = 0
306 | node_selector = merge(
307 | { "kubernetes.io/os" = "linux" },
308 | var.controller_node_selector
309 | )
310 | security_context {
311 | run_as_non_root = true
312 | run_as_user = 65534
313 | }
314 |
315 | container {
316 | name = "controller"
317 | image = "quay.io/metallb/controller:v${var.metallb_version}"
318 |
319 | args = [
320 | "--port=7472",
321 | "--config=config",
322 | ]
323 |
324 | env {
325 | name = "METALLB_ML_SECRET_NAME"
326 | value = "memberlist"
327 | }
328 |
329 | env {
330 | name = "METALLB_DEPLOYMENT"
331 | value = "controller"
332 | }
333 |
334 | port {
335 | name = "monitoring"
336 | container_port = 7472
337 | }
338 |
339 | security_context {
340 | allow_privilege_escalation = false
341 | capabilities {
342 | drop = ["ALL"]
343 | }
344 | read_only_root_filesystem = true
345 | }
346 | }
347 |
348 | dynamic "toleration" {
349 | for_each = var.controller_toleration
350 | content {
351 | key = toleration.value["key"]
352 | effect = toleration.value["effect"]
353 | operator = lookup(toleration.value, "operator", null)
354 | value = lookup(toleration.value, "value", null)
355 | toleration_seconds = lookup(toleration.value, "toleration_seconds", null)
356 | }
357 | }
358 | }
359 | }
360 | }
361 | }
362 |
363 | # Create Controller Service Account
364 | resource "kubernetes_service_account" "controller" {
365 | metadata {
366 | labels = {
367 | app = "metallb"
368 | }
369 | name = "controller"
370 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
371 | }
372 | }
373 |
374 | # Create Speaker Service Account
375 | resource "kubernetes_service_account" "speaker" {
376 | metadata {
377 | labels = {
378 | app = "metallb"
379 | }
380 | name = "speaker"
381 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
382 | }
383 | }
384 |
385 | # Create Speaker DaemonSet
386 | resource "kubernetes_daemonset" "speaker" {
387 | metadata {
388 | labels = {
389 | app = "metallb"
390 | component = "speaker"
391 | }
392 | name = "speaker"
393 | namespace = kubernetes_namespace.metallb_system.metadata.0.name
394 | }
395 |
396 | spec {
397 | selector {
398 | match_labels = {
399 | app = "metallb"
400 | component = "speaker"
401 | }
402 | }
403 |
404 | template {
405 | metadata {
406 | annotations = {
407 | "prometheus.io/port" = "7472"
408 | "prometheus.io/scrape" = "true"
409 | }
410 | labels = {
411 | app = "metallb"
412 | component = "speaker"
413 | }
414 | }
415 |
416 | spec {
417 |
418 | automount_service_account_token = true # override Terraform's default false - https://github.com/kubernetes/kubernetes/issues/27973#issuecomment-462185284
419 | service_account_name = "speaker"
420 | termination_grace_period_seconds = 2
421 | host_network = true
422 | node_selector = {
423 | "kubernetes.io/os" = "linux"
424 | }
425 |
426 | toleration {
427 | key = "node-role.kubernetes.io/master"
428 | effect = "NoSchedule"
429 | operator = "Exists"
430 | }
431 |
432 | container {
433 | name = "speaker"
434 | image = "quay.io/metallb/speaker:v${var.metallb_version}"
435 |
436 | args = [
437 | "--port=7472",
438 | "--config=config",
439 | ]
440 |
441 | env {
442 | name = "METALLB_NODE_NAME"
443 | value_from {
444 | field_ref {
445 | field_path = "spec.nodeName"
446 | }
447 | }
448 | }
449 |
450 | env {
451 | name = "METALLB_HOST"
452 | value_from {
453 | field_ref {
454 | field_path = "status.hostIP"
455 | }
456 | }
457 | }
458 |
459 | env {
460 | name = "METALLB_ML_BIND_ADDR"
461 | value_from {
462 | field_ref {
463 | field_path = "status.podIP"
464 | }
465 | }
466 | }
467 |
468 | env {
469 | name = "METALLB_ML_LABELS"
470 | value = "app=metallb,component=speaker"
471 | }
472 |
473 | env {
474 | name = "METALLB_ML_SECRET_KEY"
475 | value_from {
476 | secret_key_ref {
477 | name = "memberlist"
478 | key = "secretkey"
479 | }
480 | }
481 | }
482 |
483 | port {
484 | name = "monitoring"
485 | container_port = 7472
486 | host_port = 7472
487 | }
488 |
489 | port {
490 | name = "memberlist-tcp"
491 | container_port = 7946
492 | # host_port = 7946
493 | }
494 |
495 | port {
496 | name = "memberlist-udp"
497 | protocol = "UDP"
498 | container_port = 7946
499 | # host_port = 7946
500 | }
501 |
502 | security_context {
503 | allow_privilege_escalation = false
504 | capabilities {
505 | add = ["NET_RAW"]
506 | drop = ["ALL"]
507 | }
508 | read_only_root_filesystem = true
509 | }
510 |
511 | }
512 | }
513 | }
514 | }
515 | }
516 |
517 | # Create Controller Pod Security Policy
518 | resource "kubernetes_pod_security_policy" "controller" {
519 | metadata {
520 | labels = {
521 | app = "metallb"
522 | }
523 | name = "controller"
524 | #namespace = "metallb-system"
525 | }
526 | spec {
527 | allow_privilege_escalation = false
528 | allowed_capabilities = []
529 | #allowed_host_paths {}
530 | default_add_capabilities = []
531 | default_allow_privilege_escalation = false
532 |
533 | fs_group {
534 | range {
535 | max = 65535
536 | min = 1
537 | }
538 | rule = "MustRunAs"
539 | }
540 |
541 | host_ipc = false
542 | host_network = false
543 | host_pid = false
544 | privileged = false
545 | read_only_root_filesystem = true
546 | required_drop_capabilities = ["ALL"]
547 |
548 | run_as_user {
549 | range {
550 | max = 65535
551 | min = 1
552 | }
553 | rule = "MustRunAs"
554 | }
555 |
556 | se_linux {
557 | rule = "RunAsAny"
558 | }
559 |
560 | supplemental_groups {
561 | range {
562 | max = 65535
563 | min = 1
564 | }
565 | rule = "MustRunAs"
566 | }
567 |
568 | volumes = [
569 | "configMap",
570 | "secret",
571 | "emptyDir",
572 | ]
573 | }
574 | }
575 |
576 | # Create Speaker Pod Security Policy
577 | resource "kubernetes_pod_security_policy" "speaker" {
578 | metadata {
579 | labels = {
580 | app = "metallb"
581 | }
582 | name = "speaker"
583 | #namespace = "metallb-system"
584 | }
585 | spec {
586 | allow_privilege_escalation = false
587 | allowed_capabilities = ["NET_RAW"]
588 | #allowed_host_paths {}
589 | default_add_capabilities = []
590 | default_allow_privilege_escalation = false
591 |
592 | fs_group {
593 | rule = "RunAsAny"
594 | }
595 |
596 | host_ipc = false
597 | host_network = true
598 | host_pid = false
599 |
600 | host_ports {
601 | max = 7472
602 | min = 7472
603 | }
604 |
605 | host_ports {
606 | max = 7946
607 | min = 7946
608 | }
609 |
610 | privileged = true
611 | read_only_root_filesystem = true
612 | required_drop_capabilities = ["ALL"]
613 |
614 | run_as_user {
615 | rule = "RunAsAny"
616 | }
617 |
618 | se_linux {
619 | rule = "RunAsAny"
620 | }
621 |
622 | supplemental_groups {
623 | rule = "RunAsAny"
624 | }
625 |
626 | volumes = [
627 | "configMap",
628 | "secret",
629 | "emptyDir",
630 | ]
631 | }
632 | }
633 | resource "kubectl_manifest" "metallb_configmap" {
634 | yaml_body = file("./configmap.yml")
635 | }
--------------------------------------------------------------------------------
/part03-kubernetes-provider/metallb/providers.tf:
--------------------------------------------------------------------------------
1 | provider "kubernetes" {
2 | config_path = "~/.kube/config"
3 | }
--------------------------------------------------------------------------------
/part03-kubernetes-provider/metallb/variables.tf:
--------------------------------------------------------------------------------
1 | ############## metallb ##############
2 | variable "metallb_version" {
3 | default = "0.10.2"
4 | type = string
5 | description = "MetalLB Version e.g. 0.10.2"
6 | }
7 |
8 | variable "controller_toleration" {
9 | default = []
10 | type = list(map(any))
11 | }
12 |
13 | variable "controller_node_selector" {
14 | default = {}
15 | type = map(any)
16 | }
17 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/nginx-ingress-controller/README.md:
--------------------------------------------------------------------------------
1 | # Terraform for your kuberntes
2 | This repo contains Terafform codes to deploy different services and application on your kubernetes cluster.
3 |
4 | ### step1: clone this repo
5 |
6 | ### step2: RUN
7 | ```
8 | $ terraform init
9 | $ terraform fmt
10 | $ terraform validate
11 | $ terraform apply
12 | ```
13 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/nginx-ingress-controller/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | kubernetes = {
4 | source = "hashicorp/kubernetes"
5 | version = "2.10.0"
6 | }
7 | helm = ">= 2.1.0"
8 | }
9 | }
10 |
11 |
12 |
--------------------------------------------------------------------------------
/part03-kubernetes-provider/nginx-ingress-controller/nginx-ingress.tf:
--------------------------------------------------------------------------------
1 | resource "helm_release" "nginx_ingress" {
2 | name = "ingress-nginx"
3 | namespace = "ingress-nginx"
4 | create_namespace = "true"
5 | atomic = "true"
6 | repository = "https://kubernetes.github.io/ingress-nginx"
7 | chart = "ingress-nginx"
8 |
9 | set {
10 | name = "service.type"
11 | value = "LoadBalancer"
12 | }
13 | set {
14 | name = "kind"
15 | value = "DaemonSet"
16 | }
17 | }
--------------------------------------------------------------------------------
/part03-kubernetes-provider/nginx-ingress-controller/providers.tf:
--------------------------------------------------------------------------------
1 | provider "kubernetes" {
2 | config_path = "~/.kube/config"
3 | }
4 | provider "helm" {
5 | kubernetes {
6 | config_path = "~/.kube/config"
7 | }
8 | }
--------------------------------------------------------------------------------
/part04-gitlab-provider/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # Crash log files
9 | crash.log
10 |
11 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most
12 | # .tfvars files are managed as part of configuration and so should be included in
13 | # version control.
14 | #
15 | # example.tfvars
16 |
17 | # Ignore override files as they are usually used to override resources locally and so
18 | # are not checked in
19 | override.tf
20 | override.tf.json
21 | *_override.tf
22 | *_override.tf.json
23 |
24 | # Include override files you do wish to add to version control using negated pattern
25 | #
26 | # !example_override.tf
27 |
28 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
29 | # example: *tfplan*
30 |
--------------------------------------------------------------------------------
/part04-gitlab-provider/README.md:
--------------------------------------------------------------------------------
1 | # Terraform for your Gitlab
2 |
3 |
4 |
5 | ## Contents
6 |
7 | - [Overview](#overview)
8 | - [How to use?](#how-to-use)
9 | - [Roadmap](#roadmap)
10 | - [Contribute](#contribute)
11 |
12 | ## Overview
13 |
14 | This repository contains terraform codes to interact with GitLab resources, like users, groups, projects and more. Take a tour and check the files so you can have a good understanding of what is happening and how the provider is defined.
15 | It's better to implement the example once after you read and check the code so you can learn more about it.
16 |
17 | ## How to setup?
18 |
19 | Copy the `terraform.tfvars.exmaple` and rename it to `terraform.tfvars`:
20 |
21 | ```bash
22 | cp terraform.tfvars.example terraform.tfvars
23 | ```
24 |
25 | Then you can use below commands to test the provider.
26 |
27 | ```bash
28 | terraform init
29 | ```
30 |
31 | Now it's time to actually using apply.
32 |
33 | ```bash
34 | terraform apply
35 | ```
36 |
37 | Deletes and removes Terraform-managed infrastructure
38 |
39 | ```bash
40 | terraform destroy
41 | ```
42 |
43 | ## Roadmap
44 |
45 | - [x] Create a project
46 | - [x] Manage a file within a repository
47 | - [x] Manage license of a project
48 | - [x] Create a new branch
49 | - [x] Create a tag
50 | - [x] Manage labels of a project
51 | - [x] Manage scheduled pipeline
52 | - [x] Manage pipelines
53 |
54 | ## Contribute
55 |
56 | Contributions are always welcome!
57 |
--------------------------------------------------------------------------------
/part04-gitlab-provider/providers.tf:
--------------------------------------------------------------------------------
1 | # Configure the GitLab Provider
2 | provider "gitlab" {
3 | token = var.gitlab_token
4 | }
--------------------------------------------------------------------------------
/part04-gitlab-provider/resources.tf:
--------------------------------------------------------------------------------
1 | # Create project
2 | resource "gitlab_project" "project" {
3 | name = var.project.name
4 | description = var.project.description
5 | visibility_level = var.project.visibility_level
6 | initialize_with_readme = var.project.initialize_with_readme
7 | }
8 |
9 |
10 | # Create repository file
11 | resource "gitlab_repository_file" "text" {
12 | project = gitlab_project.project.id
13 | file_path = var.gitlab_repository_file_details.file_path_text
14 | branch = var.gitlab_repository_file_details.branch
15 | content = var.gitlab_repository_file_details.content_text
16 | author_email = var.gitlab_repository_file_details.author_email
17 | author_name = var.gitlab_repository_file_details.author_name
18 | commit_message = var.gitlab_repository_file_details.commit_message
19 | }
20 |
21 | resource "gitlab_repository_file" "git" {
22 | project = gitlab_project.project.id
23 | file_path = var.gitlab_repository_file_details.file_path_gitignore
24 | branch = var.gitlab_repository_file_details.branch
25 | content = var.gitlab_repository_file_details.content_git
26 | author_email = var.gitlab_repository_file_details.author_email
27 | author_name = var.gitlab_repository_file_details.author_name
28 | commit_message = var.gitlab_repository_file_details.commit_message
29 | }
30 |
31 | # Manage license
32 | # This resource requires a GitLab Enterprise instance.
33 | # resource "gitlab_managed_license" "license" {
34 | # project = gitlab_project.project.id
35 | # name = "MIT license"
36 | # approval_status = "allowed"
37 | # }
38 |
39 | # Create branch
40 | resource "gitlab_branch" "branch" {
41 | name = var.branch_name
42 | ref = var.ref
43 | project = gitlab_project.project.id
44 | }
45 |
46 | # Create tag
47 | resource "gitlab_project_tag" "tag" {
48 | name = var.tag_name
49 | ref = var.ref
50 | project = gitlab_project.project.id
51 | }
52 |
53 | # Add a bug label
54 | resource "gitlab_label" "bug" {
55 | name = "bug"
56 | color = "#000" // Set the color
57 | description = "This label determines that the issue is reporting a bug"
58 |
59 | project = gitlab_project.project.id
60 | }
61 |
62 | # Gitlab pipeline schedule
63 | resource "gitlab_pipeline_schedule" "example" {
64 | project = gitlab_project.project.id
65 | description = "Used to schedule builds"
66 | ref = "master"
67 | cron = "0 1 * * *"
68 | }
69 |
70 |
71 | # Schedule gitlab_pipeline_schedule_variable
72 | resource "gitlab_pipeline_schedule_variable" "example" {
73 | project = gitlab_project.project.id
74 | pipeline_schedule_id = gitlab_pipeline_schedule.example.id
75 | key = "EXAMPLE_KEY"
76 | value = "example"
77 | }
78 |
79 | resource "gitlab_pipeline_trigger" "example" {
80 | project = gitlab_project.project.id
81 | description = "Used to trigger builds"
82 | }
83 |
--------------------------------------------------------------------------------
/part04-gitlab-provider/terraform.tfvars.example:
--------------------------------------------------------------------------------
1 | gitlab_token = ""
2 | branch_name = ""
3 | tag_name = "bug"
4 | ref = "main"
5 |
6 | variable "ref" {
7 | type = string
8 | default = "main"
9 | description = "The ref which the branch is created from."
10 | }
11 |
12 | project = {
13 | "name" = "my-gitlab-project"
14 | "description" = "My GitLab project description"
15 | "visibility_level" = "public"
16 | "initialize_with_readme" = true
17 | }
18 |
19 | gitlab_repository_file_details = {
20 | author_email = "terraform@example.com"
21 | author_name = "Terraform"
22 | branch = "main"
23 | commit_message = "feature: add test file"
24 | content_text = "lorem ipsum"
25 | content_git = "**/*.tfstate"
26 | file_path_text = "test.txt"
27 | file_path_gitignore = ".gitignore"
28 | }
29 |
30 |
31 | gitlab_managed_license_details = {
32 | name = "MIT license"
33 | approval_status = "allowed"
34 | }
35 |
--------------------------------------------------------------------------------
/part04-gitlab-provider/variables.tf:
--------------------------------------------------------------------------------
1 | variable "gitlab_token" {
2 | type = string
3 | default = ""
4 | }
5 |
6 | variable "branch_name" {
7 | type = string
8 | default = "test-terraform"
9 | }
10 |
11 | variable "project" {
12 | type = map(any)
13 | description = "Your Gitlab project configuration"
14 | default = {
15 | "name" = "terraform-tutorial-project"
16 | "description" = "Creating a project using Terraform"
17 | "visibility_level" = "public"
18 | "initialize_with_readme" = true
19 | }
20 | }
21 |
22 | variable "gitlab_repository_file_details" {
23 | type = object({
24 | file_path_text = string
25 | file_path_gitignore = string
26 | branch = string
27 | content_text = string
28 | content_git = string
29 | author_email = string
30 | author_name = string
31 | commit_message = string
32 | })
33 | default = {
34 | author_email = "terraform@example.com"
35 | author_name = "Terraform"
36 | branch = "main"
37 | commit_message = "feature: add test file"
38 | content_text = "lorem ipsum"
39 | content_git = "**/*.tfstate"
40 | file_path_text = "test.txt"
41 | file_path_gitignore = ".gitignore"
42 | }
43 | }
44 |
45 | variable "gitlab_managed_license_details" {
46 | type = object({
47 | name = string
48 | approval_status = string
49 | })
50 | default = {
51 | approval_status = "allowed"
52 | name = "MIT license"
53 | }
54 | }
55 |
56 | variable "ref" {
57 | type = string
58 | default = "main"
59 | description = "The ref which the branch is created from."
60 | }
61 |
62 | variable "tag_name" {
63 | type = string
64 | default = "my-terraform-tag"
65 | description = "terraform tag name"
66 | }
67 |
--------------------------------------------------------------------------------
/part04-gitlab-provider/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | gitlab = {
4 | source = "gitlabhq/gitlab"
5 | version = ">= 3.13.0"
6 | }
7 | }
8 | }
--------------------------------------------------------------------------------
/part05-HA-proxy-provider/README.md:
--------------------------------------------------------------------------------
1 | # Configure HA Proxy with Terraform
2 |
3 | run these commands:
4 |
5 | ```
6 | $ terraform init
7 | $ terraform validate
8 | $ terraform fmt
9 | $ terraform plan
10 | $ terraform apply
11 | ```
12 | for delete:
13 | ```
14 | $ terraform destroy
15 | ```
16 |
--------------------------------------------------------------------------------
/part05-HA-proxy-provider/main.tf:
--------------------------------------------------------------------------------
1 | # Resources
2 |
3 | resource "haproxy_frontend" "my-frontend" {
4 | name = "my-frontend"
5 |
6 | # for more options go to https://registry.terraform.io/providers/matthisholleville/haproxy/latest/docs/resources/frontend#optional
7 | }
8 |
9 | resource "haproxy_maps" "my-key" {
10 | map = "ratelimit"
11 | key = "/metrics"
12 | value = "50"
13 |
14 | # for more options go to https://registry.terraform.io/providers/matthisholleville/haproxy/latest/docs/resources/maps#optional
15 | }
16 |
--------------------------------------------------------------------------------
/part05-HA-proxy-provider/providers.tf:
--------------------------------------------------------------------------------
1 | # Adding terraform version and terraform provider version on Terraform block
2 | terraform {
3 | required_providers {
4 | haproxy = {
5 | source = "matthisholleville/haproxy"
6 | version = "0.2.3"
7 | }
8 | }
9 | }
10 |
11 | provider "haproxy" {
12 | server_addr = "localhost:5555"
13 | username = "admin"
14 | password = "adminpwd"
15 |
16 | # you may need to allow insecure TLS communications unless you have configured
17 | # certificates for your server
18 | insecure = true
19 | }
20 |
--------------------------------------------------------------------------------
/part06-grafana-provider/README.md:
--------------------------------------------------------------------------------
1 | # Configure Grafana with Terraform
2 |
3 | run these commands:
4 |
5 | ```
6 | $ terraform init
7 | $ terraform validate
8 | $ terraform fmt
9 | $ terraform plan
10 | $ terraform apply
11 | ```
12 | for delete:
13 | ```
14 | $ terraform destroy
15 | ```
16 |
--------------------------------------------------------------------------------
/part06-grafana-provider/assets/grafana.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/devopshobbies/terraform-templates/0f13f06f8279ef8c183a2ca726d72ff7fc328b72/part06-grafana-provider/assets/grafana.png
--------------------------------------------------------------------------------
/part06-grafana-provider/main.tf:
--------------------------------------------------------------------------------
1 | # Resources
2 | # Sample senario: new user -> new team -> new folder -> new dashboard -> set folder permissions
3 |
4 |
5 | # Create new user.
6 | # This one does not currently work with API Tokens. You must use basic auth.
7 | resource "grafana_user" "new_user" {
8 | email = "devops.user@example.com"
9 | password = "test"
10 | }
11 |
12 |
13 | # Create new team.
14 | resource "grafana_team" "new_team" {
15 | name = "DevOps Team"
16 | members = [
17 | grafana_user.new_user.email
18 | ]
19 | }
20 |
21 |
22 | # Create new folder.
23 | resource "grafana_folder" "new_folder_with_uid" {
24 | uid = "test-folder-uid"
25 | title = "Terraform Test Folder With UID"
26 | }
27 |
28 |
29 | # Create new dashboard in a specific folder.
30 | resource "grafana_dashboard" "new_dashboard" {
31 | folder = grafana_folder.new_folder_with_uid.id
32 | config_json = < username = OS-Distro-name
40 |
41 | variable "user_name" {
42 | type = string
43 | default = "ubuntu"
44 | }
45 | ```
46 |
47 |
48 | How To Use
49 | ----------
50 | ````
51 | terraform init
52 | terraform plan
53 | #Use `-parallelism=1` for preventing 429 error for create multi VM at the sametime
54 | terraform apply -parallelism=1
55 | ````
56 |
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/inventory.tmpl:
--------------------------------------------------------------------------------
1 | [new_cloud_server]
2 | %{ for index,v in ansible_hostname ~}
3 | ${ansible_hostname[index]} ansible_host=${ansible_ip[index]} ansible_user=${username} ansible_ssh_private_key_file=${key_path} ansible_become=true
4 | %{ endfor ~}
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/main.tf:
--------------------------------------------------------------------------------
1 |
2 | module "abrak-sshkey-module" {
3 | source = "./modules/ssh-key"
4 | sshkey-region = var.region
5 | ssh-public_key = file("${var.key_path}.pub")
6 | }
7 |
8 | module "abrak-module" {
9 | count = var.server-num
10 | abrak-region = var.region
11 | source = "./modules/abrak/ubuntu22"
12 | depends_on = [
13 | module.abrak-sshkey-module
14 | ]
15 | abrak-name = "minio-${count.index}"
16 | ssh-keyname = module.abrak-sshkey-module.get-ssh-key.name
17 | }
18 |
19 | /* # add extra public ip
20 | module "abrak-public-ip-module" {
21 | source = "./modules/public-ip"
22 | depends_on = [
23 | module.abrak-module
24 | ]
25 | region-publicip = var.region
26 | myabrak_uuid = module.abrak-module.details-myabrak-id.id
27 | }
28 | */
29 |
30 |
31 | module "abrak-subnet-module" {
32 | network-region = var.region
33 | source = "./modules/private-network"
34 | subnet_name = "minio"
35 | ip_range = "192.168.10.0/24"
36 | }
37 |
38 |
39 | resource "arvan_iaas_network_attach" "private-network-attach" {
40 | count = var.server-num
41 | depends_on = [
42 | module.abrak-module,
43 | module.abrak-subnet-module
44 | ]
45 | region = var.region
46 | abrak_uuid = module.abrak-module["${count.index}"].details-myabrak-id.id
47 | network_uuid = module.abrak-subnet-module.subnet-details.network_uuid
48 | ip = cidrhost(module.abrak-subnet-module.mynet-ip-range, "${count.index + 2}")
49 |
50 | }
51 |
52 |
53 | resource "local_file" "ansible_inventory" {
54 | depends_on = [
55 | arvan_iaas_network_attach.private-network-attach
56 | ]
57 | filename = "inventory"
58 | content = templatefile("inventory.tmpl",
59 | {
60 | ansible_ip = arvan_iaas_network_attach.private-network-attach.*.ip,
61 | ansible_hostname = module.abrak-module.*.details-myabrak-id.name,
62 | key_path = var.key_path
63 | username = var.user_name
64 | }
65 | )
66 | provisioner "local-exec" {
67 | command = "ansible-playbook -i inventory setup-minio.yml "
68 | }
69 | }
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/modules/abrak/ubuntu22/data.tf:
--------------------------------------------------------------------------------
1 | data "arvan_iaas_abrak" "get_abrak_id" {
2 | depends_on = [
3 | arvan_iaas_abrak.myabrak
4 | ]
5 |
6 | region = var.abrak-region
7 | name = var.abrak-name
8 | }
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/modules/abrak/ubuntu22/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | arvan = {
4 | source = "arvancloud/arvan"
5 | version = "0.6.3" # put the version here
6 | }
7 | }
8 | }
9 |
10 |
11 | variable "abrak-name" {
12 | type = string
13 | }
14 | variable "abrak-region" {
15 | type = string
16 | }
17 | variable "os-distro" {
18 | type = string
19 | default = "ubuntu"
20 | }
21 | variable "os-version" {
22 | type = string
23 | default = "22.04"
24 | }
25 | variable "disksize" {
26 | type = number
27 | default = 25
28 | }
29 | variable "abrak-plan" {
30 | type = string
31 | default = "g1-1-1-0"
32 | }
33 | variable "ssh-keyname" {
34 | type = string
35 | }
36 |
37 |
38 | resource "arvan_iaas_abrak" "myabrak" {
39 | region = var.abrak-region
40 | flavor = var.abrak-plan
41 | name = var.abrak-name
42 | image {
43 | type = "distributions"
44 | name = "${var.os-distro}/${var.os-version}"
45 | }
46 | disk_size = var.disksize
47 | ssh_key = true
48 | key_name = var.ssh-keyname
49 | }
50 |
51 |
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/modules/abrak/ubuntu22/output.tf:
--------------------------------------------------------------------------------
1 | output "details-myabrak-id" {
2 | value = data.arvan_iaas_abrak.get_abrak_id
3 | }
4 |
5 | output "adresses" {
6 | value = arvan_iaas_abrak.myabrak.*.addresses
7 | }
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/modules/private-network/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | arvan = {
4 | source = "arvancloud/arvan"
5 | version = "0.6.3" # put the version here
6 | }
7 | }
8 | }
9 | variable "network-region" {
10 | type = string
11 | }
12 | variable "subnet_name" {
13 | type = string
14 | }
15 | variable "ip_range" {
16 | type = string
17 | default = "192.168.0.0/24"
18 | }
19 | resource "arvan_iaas_subnet" "mysubnet" {
20 | region = var.network-region
21 | name = "${var.subnet_name}-subnet"
22 | subnet_ip = var.ip_range
23 | enable_gateway = false
24 | #gateway = cidrhost(var.ip_range,1)
25 | dns_servers = [
26 | "8.8.8.8",
27 | "4.2.2.4"
28 | ]
29 | enable_dhcp = false
30 | }
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/modules/private-network/output.tf:
--------------------------------------------------------------------------------
1 | output "mynet-ip-range" {
2 | value = var.ip_range
3 | }
4 |
5 | output "subnet-details" {
6 | value = arvan_iaas_subnet.mysubnet
7 | }
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/modules/public-ip/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | arvan = {
4 | source = "arvancloud/arvan"
5 | version = "0.6.3" # put the version here
6 | }
7 | }
8 | }
9 |
10 | variable "region-publicip" {
11 | type = string
12 | }
13 | variable "myabrak_uuid" {
14 | type = string
15 | }
16 |
17 | resource "arvan_iaas_abrak_action" "myabrak-publicip" {
18 | action = "add-public-ip"
19 | region = var.region-publicip
20 | abrak_uuid = var.myabrak_uuid
21 | }
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/modules/public-ip/output.tf:
--------------------------------------------------------------------------------
1 | output "myabrak-publicip" {
2 | value = arvan_iaas_abrak_action.myabrak-publicip
3 | }
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/modules/ssh-key/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | arvan = {
4 | source = "arvancloud/arvan"
5 | version = "0.6.3" # put the version here
6 | }
7 | }
8 | }
9 |
10 | variable "sshkey-region" {
11 | type = string
12 | }
13 | variable "ssh-public_key" {
14 | type = string
15 | }
16 |
17 | resource "arvan_iaas_sshkey" "myabrak-sshkey" {
18 | region = var.sshkey-region
19 | name = "ssh-ubuntu-user"
20 | public_key = var.ssh-public_key
21 | }
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/modules/ssh-key/output.tf:
--------------------------------------------------------------------------------
1 | output "get-ssh-key" {
2 | value = arvan_iaas_sshkey.myabrak-sshkey
3 | }
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/output.tf:
--------------------------------------------------------------------------------
1 | output "hostnames-ips" {
2 | value = [
3 | for index, v in arvan_iaas_network_attach.private-network-attach :
4 | { "server-ip" : v.ip,
5 | "server-name" : module.abrak-module[index].details-myabrak-id.name
6 | }
7 | ]
8 |
9 |
10 | }
11 |
12 |
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | arvan = {
4 | source = "arvancloud/arvan"
5 | version = "0.6.3" # put the version here
6 | }
7 | }
8 | }
9 |
10 | provider "arvan" {
11 | api_key = var.ApiKey
12 | }
--------------------------------------------------------------------------------
/part17-arvancloud-minio-cluster/variable.tf:
--------------------------------------------------------------------------------
1 | variable "ApiKey" {
2 | type = string
3 | default = "Apikey ************************"
4 | sensitive = true
5 | }
6 | variable "region" {
7 | type = string
8 | default = "ir-thr-c2"
9 | }
10 | variable "key_path" {
11 | type = string
12 | default = "/root/.ssh/arvan_rsa"
13 | }
14 | variable "server-num" {
15 | type = number
16 | default = 4
17 | }
18 | variable "user_name" {
19 | type = string
20 | default = "ubuntu"
21 | }
--------------------------------------------------------------------------------
/part18-hetznercloud-provider/data_sources/locations.txt:
--------------------------------------------------------------------------------
1 | Locations Data:
2 | Descriptions:
3 |
4 | - Falkenstein DC Park 1
5 |
6 | - Nuremberg DC Park 1
7 |
8 | - Helsinki DC Park 1
9 |
10 | - Ashburn, VA
11 |
12 | - Hillsboro, OR
13 |
14 | - Singapore
15 |
16 |
17 | Location IDs:
18 |
19 | - 1
20 |
21 | - 2
22 |
23 | - 3
24 |
25 | - 4
26 |
27 | - 5
28 |
29 | - 6
30 |
31 |
32 | Locations Details:
33 |
34 | - Name: fsn1
35 | City: Falkenstein
36 | Country: DE
37 | Description: Falkenstein DC Park 1
38 | Latitude: 50.47612
39 | Longitude: 12.370071
40 | Network Zone: eu-central
41 |
42 | - Name: nbg1
43 | City: Nuremberg
44 | Country: DE
45 | Description: Nuremberg DC Park 1
46 | Latitude: 49.452102
47 | Longitude: 11.076665
48 | Network Zone: eu-central
49 |
50 | - Name: hel1
51 | City: Helsinki
52 | Country: FI
53 | Description: Helsinki DC Park 1
54 | Latitude: 60.169855
55 | Longitude: 24.938379
56 | Network Zone: eu-central
57 |
58 | - Name: ash
59 | City: Ashburn, VA
60 | Country: US
61 | Description: Ashburn, VA
62 | Latitude: 39.045821
63 | Longitude: -77.487073
64 | Network Zone: us-east
65 |
66 | - Name: hil
67 | City: Hillsboro, OR
68 | Country: US
69 | Description: Hillsboro, OR
70 | Latitude: 45.54222
71 | Longitude: -122.951924
72 | Network Zone: us-west
73 |
74 | - Name: sin
75 | City: Singapore
76 | Country: SG
77 | Description: Singapore
78 | Latitude: 1.283333
79 | Longitude: 103.833333
80 | Network Zone: ap-southeast
81 |
82 |
83 | Names:
84 |
85 | - fsn1
86 |
87 | - nbg1
88 |
89 | - hel1
90 |
91 | - ash
92 |
93 | - hil
94 |
95 | - sin
96 |
97 |
--------------------------------------------------------------------------------
/part18-hetznercloud-provider/data_sources/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | hcloud = {
4 | source = "hetznercloud/hcloud"
5 | version = "1.49.1"
6 | }
7 | }
8 | }
9 |
10 | data "hcloud_locations" "all" {
11 | }
12 |
13 | output "locations" {
14 | value = data.hcloud_locations.all
15 | }
16 |
17 | resource "local_file" "locations_output" {
18 | content = <<-EOT
19 | Locations Data:
20 | Descriptions:
21 | %{ for desc in data.hcloud_locations.all.descriptions }
22 | - ${desc}
23 | %{ endfor }
24 |
25 | Location IDs:
26 | %{ for id in data.hcloud_locations.all.location_ids }
27 | - ${id}
28 | %{ endfor }
29 |
30 | Locations Details:
31 | %{ for loc in data.hcloud_locations.all.locations }
32 | - Name: ${loc.name}
33 | City: ${loc.city}
34 | Country: ${loc.country}
35 | Description: ${loc.description}
36 | Latitude: ${loc.latitude}
37 | Longitude: ${loc.longitude}
38 | Network Zone: ${loc.network_zone}
39 | %{ endfor }
40 |
41 | Names:
42 | %{ for name in data.hcloud_locations.all.names }
43 | - ${name}
44 | %{ endfor }
45 | EOT
46 |
47 | filename = "${path.module}/locations.txt"
48 | }
49 |
--------------------------------------------------------------------------------
/part18-hetznercloud-provider/simple-vm/README.md:
--------------------------------------------------------------------------------
1 | # Simple VM
2 |
3 | here we aimed to demonstrate how to create a simple VM in `hetzner cloud` with terraform
4 |
5 | ## Setup
6 |
7 | ```bash
8 | # add your hetznercloud api-token to values.tfvars file
9 | echo 'token = ""' > ./values.tfvars
10 |
11 | or use export TF_VAR_token=
12 |
13 | terraform apply -var-file="./values.tfvars"
14 | ```
15 |
16 | ## Destroy
17 |
18 | ```bash
19 | terraform destroy -var-file="./values.tfvars"
20 | ```
21 |
--------------------------------------------------------------------------------
/part18-hetznercloud-provider/simple-vm/main.tf:
--------------------------------------------------------------------------------
1 | # for generate ssh key: ssh-keygen -t ed25519 -f ~/.ssh/hetzner_ed25519
2 | resource "hcloud_ssh_key" "default" {
3 | name = "hetzner-cloud ssh key"
4 | public_key = file("~/.ssh/hetzner_ed25519.pub")
5 | }
6 |
7 | resource "hcloud_server" "node01" {
8 | name = "node01"
9 | image = var.os_type
10 | server_type = var.server_type
11 | location = var.location
12 | ssh_keys = [hcloud_ssh_key.default.id]
13 | labels = {
14 | "Server" : "node01"
15 | }
16 | public_net {
17 | ipv4_enabled = true
18 | ipv6_enabled = false
19 | }
20 | }
21 |
22 | resource "hcloud_network" "hcloud_network" {
23 | name = "hetzner-cloud network"
24 | ip_range = var.ip_range
25 | }
26 |
27 | resource "hcloud_network_subnet" "hcloud_subnet" {
28 | network_id = hcloud_network.hcloud_network.id
29 | type = "cloud"
30 | network_zone = "eu-central"
31 | ip_range = var.ip_range
32 | }
33 |
34 | resource "hcloud_server_network" "node01_network" {
35 | server_id = hcloud_server.node01.id
36 | subnet_id = hcloud_network_subnet.hcloud_subnet.id
37 | }
38 |
--------------------------------------------------------------------------------
/part18-hetznercloud-provider/simple-vm/output.tf:
--------------------------------------------------------------------------------
1 | output "server_ip" {
2 | description = "The IP address of the created server"
3 | value = hcloud_server.node01.ipv4_address
4 | }
5 |
6 | output "server_id" {
7 | description = "The ID of the created server"
8 | value = hcloud_server.node01.id
9 | }
10 |
11 | output "server_name" {
12 | description = "The name of the created server"
13 | value = hcloud_server.node01.name
14 | }
15 |
16 | output "server_status" {
17 | description = "The status of the created server"
18 | value = hcloud_server.node01.status
19 | }
20 |
21 | resource "local_file" "ansible_inventory" {
22 | content = <<-EOT
23 | [all]
24 | ${hcloud_server.node01.name} ansible_host=${hcloud_server.node01.ipv4_address}
25 |
26 | [all:vars]
27 | ansible_become=true
28 | ansible_become_method=sudo
29 | ansible_python_interpreter=/usr/bin/python3
30 | EOT
31 |
32 | filename = "./ansible_inventory.ini"
33 | }
34 |
--------------------------------------------------------------------------------
/part18-hetznercloud-provider/simple-vm/variables.tf:
--------------------------------------------------------------------------------
1 |
2 | variable "token" {
3 | type = string
4 | sensitive = true
5 | }
6 |
7 | variable "location" {
8 | type = string
9 | default = "nbg1"
10 | }
11 |
12 | variable "server_type" {
13 | type = string
14 | default = "cx32"
15 | }
16 |
17 | variable "os_type" {
18 | type = string
19 | default = "ubuntu-20.04"
20 | }
21 | variable "ip_range" {
22 | type = string
23 | default = "10.0.1.0/24"
24 | }
25 |
--------------------------------------------------------------------------------
/part18-hetznercloud-provider/simple-vm/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.2"
3 |
4 | required_providers {
5 | hcloud = {
6 | source = "hetznercloud/hcloud"
7 | version = "~> 1.49.1"
8 | }
9 | }
10 | }
11 |
12 | provider "hcloud" {
13 | token = var.token
14 | }
15 |
--------------------------------------------------------------------------------
/part19-cloudflare-provider/create-record/README.md:
--------------------------------------------------------------------------------
1 | # Create Record
2 |
3 | here we aimed to demonstrate how to create a record in `Cloudflare` with terraform
4 |
5 | ## Setup
6 |
7 | ```bash
8 | # add your cloudflare api-token and zone-id to values.tfvars file
9 | printf '%s\n%s' 'token = ""' 'zone_id = ""' > ./values.tfvars
10 |
11 | terraform apply -var-file="./values.tfvars"
12 | ```
13 |
14 | ## Destroy
15 |
16 | ```bash
17 | terraform destroy -var-file="./values.tfvars"
18 | ```
19 |
--------------------------------------------------------------------------------
/part19-cloudflare-provider/create-record/main.tf:
--------------------------------------------------------------------------------
1 | resource "cloudflare_record" "vpn" {
2 | zone_id = var.zone_id
3 | name = "subdomain"
4 | value = var.server_ip
5 | type = "A"
6 | proxied = true
7 | }
8 |
--------------------------------------------------------------------------------
/part19-cloudflare-provider/create-record/outputs.tf:
--------------------------------------------------------------------------------
1 | output "record" {
2 | value = cloudflare_record.vpn.hostname
3 | }
4 |
--------------------------------------------------------------------------------
/part19-cloudflare-provider/create-record/variables.tf:
--------------------------------------------------------------------------------
1 | variable "token" {
2 | type = string
3 | sensitive = true
4 | }
5 |
6 | variable "server_ip" {
7 | type = string
8 | }
9 |
10 | variable "zone_id" {
11 | type = string
12 | }
13 |
--------------------------------------------------------------------------------
/part19-cloudflare-provider/create-record/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.2"
3 |
4 | required_providers {
5 | cloudflare = {
6 | source = "cloudflare/cloudflare"
7 | version = "~> 3.0"
8 | }
9 | }
10 | }
11 |
12 | provider "cloudflare" {
13 | api_token = var.token
14 | }
15 |
--------------------------------------------------------------------------------
/part20-arvancloud-abrak/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # Terraform lockfile
5 | .terraform.lock.hcl
6 |
7 | # .tfstate files
8 | *.tfstate
9 | *.tfstate.*
10 |
11 | # Crash log files
12 | crash.log
13 |
14 | # Exclude all .tfvars files, which are likely to contain sentitive data, such as
15 | # password, private keys, and other secrets. These should not be part of version
16 | # control as they are data points which are potentially sensitive and subject
17 | # to change depending on the environment.
18 | *.tfvars
19 |
20 | # Ignore override files as they are usually used to override resources locally and so
21 | # are not checked in
22 | override.tf
23 | override.tf.json
24 | *_override.tf
25 | *_override.tf.json
26 |
27 | # Ignore CLI configuration files
28 | .terraformrc
29 | terraform.rc
30 |
--------------------------------------------------------------------------------
/part20-arvancloud-abrak/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | arvan = {
4 | source = "arvancloud/arvan"
5 | version = "0.6.4" # put the version here
6 | }
7 | }
8 | }
9 |
10 | module "abrak" {
11 | source = "./modules/abrak"
12 | abrak_name = var.abrak_name
13 | region = var.region
14 | abrak_sshkey_enabled = var.abrak_sshkey_enabled
15 | abrak_sshkey_name = "devopshobies"
16 | abrak_flavor = "g1-1-1-0"
17 | abrak_image = {
18 | type = "distributions"
19 | name = "debian/11"
20 | }
21 | abrak_disk_size = 25
22 | }
--------------------------------------------------------------------------------
/part20-arvancloud-abrak/modules/abrak/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # Terraform lockfile
5 | .terraform.lock.hcl
6 |
7 | # .tfstate files
8 | *.tfstate
9 | *.tfstate.*
10 |
11 | # Crash log files
12 | crash.log
13 |
14 | # Exclude all .tfvars files, which are likely to contain sentitive data, such as
15 | # password, private keys, and other secrets. These should not be part of version
16 | # control as they are data points which are potentially sensitive and subject
17 | # to change depending on the environment.
18 | *.tfvars
19 |
20 | # Ignore override files as they are usually used to override resources locally and so
21 | # are not checked in
22 | override.tf
23 | override.tf.json
24 | *_override.tf
25 | *_override.tf.json
26 |
27 | # Ignore CLI configuration files
28 | .terraformrc
29 | terraform.rc
30 |
--------------------------------------------------------------------------------
/part20-arvancloud-abrak/modules/abrak/README.md:
--------------------------------------------------------------------------------
1 | ## Requirements
2 |
3 | | Name | Version |
4 | |------|---------|
5 | | [arvan](#requirement\_arvan) | >=0.6.4 |
6 |
7 | ## Providers
8 |
9 | | Name | Version |
10 | |------|---------|
11 | | [arvan](#provider\_arvan) | >=0.6.4 |
12 |
13 | ## Modules
14 |
15 | No modules.
16 |
17 | ## Resources
18 |
19 | | Name | Type |
20 | |------|------|
21 | | [arvan_iaas_abrak.abrak](https://registry.terraform.io/providers/arvancloud/arvan/latest/docs/resources/iaas_abrak) | resource |
22 |
23 | ## Inputs
24 |
25 | | Name | Description | Type | Default | Required |
26 | |------|-------------|------|---------|:--------:|
27 | | [abrak\_disk\_size](#input\_abrak\_disk\_size) | Abrak disk size in GB | `number` | n/a | yes |
28 | | [abrak\_flavor](#input\_abrak\_flavor) | Abrak plan ID, you can get list of plan IDs of each region from sizes api | `string` | n/a | yes |
29 | | [abrak\_ha\_enabled](#input\_abrak\_ha\_enabled) | HA feature in abrak. This feature is exprimental, May not works now. | `bool` | `false` | no |
30 | | [abrak\_image](#input\_abrak\_image) | Abrak image type and name | object({
type = string
name = string
})
| n/a | yes |
31 | | [abrak\_name](#input\_abrak\_name) | Abrak name in Arvancloud web console | `string` | n/a | yes |
32 | | [region](#input\_region) | Arvancloud region name. | `string` | n/a | yes |
33 |
34 | ## Outputs
35 |
36 | | Name | Description |
37 | |------|-------------|
38 | | [id](#output\_id) | n/a |
39 | | [ip\_addresses](#output\_ip\_addresses) | n/a |
40 |
--------------------------------------------------------------------------------
/part20-arvancloud-abrak/modules/abrak/main.tf:
--------------------------------------------------------------------------------
1 | resource "tls_private_key" "sshkey" {
2 | count = var.abrak_sshkey_enabled ? 1 : 0
3 | algorithm = "RSA"
4 | rsa_bits = 4096
5 | }
6 |
7 | resource "arvan_iaas_sshkey" "sshkey_arvan" {
8 | count = var.abrak_sshkey_enabled ? 1 : 0
9 | name = var.abrak_sshkey_name
10 | public_key = tls_private_key.sshkey[0].public_key_openssh
11 | region = var.region
12 | }
13 |
14 | resource "arvan_iaas_abrak" "abrak" {
15 | name = var.abrak_name
16 | region = var.region
17 | flavor = var.abrak_flavor
18 | key_name = var.abrak_sshkey_enabled ? arvan_iaas_sshkey.sshkey_arvan[0].name : null
19 | disk_size = var.abrak_disk_size
20 | ha_enabled = var.abrak_ha_enabled # This is exprimental feature, May not works
21 | image {
22 | type = var.abrak_image.type
23 | name = var.abrak_image.name
24 | }
25 | }
--------------------------------------------------------------------------------
/part20-arvancloud-abrak/modules/abrak/outputs.tf:
--------------------------------------------------------------------------------
1 | output "ip_addresses" {
2 | value = arvan_iaas_abrak.abrak.addresses
3 | }
4 |
5 | output "id" {
6 | value = arvan_iaas_abrak.abrak.id
7 | }
8 |
9 | output "ssh_key" {
10 | value = var.abrak_sshkey_enabled ? tls_private_key.sshkey[0].private_key_openssh : null
11 | sensitive = true
12 | }
--------------------------------------------------------------------------------
/part20-arvancloud-abrak/modules/abrak/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | description = "Arvancloud region name."
3 | type = string
4 | validation {
5 | condition = contains(
6 | [
7 | "ir-thr-c2", # Forogh
8 | "ir-tbz-dc1", # Shahriar
9 | "ir-thr-w1", # Bamdad
10 | "ir-thr-c1" # Simin
11 | ],
12 | var.region
13 | )
14 | error_message = <<-EOF
15 | "
16 | Specify valid region name. Using the following available regions.
17 | Forogh ==> ir-thr-c2
18 | Shahriar ==> ir-tbz-dc1
19 | Bamdad ==> ir-thr-w1
20 | Simin ==> ir-thr-c1
21 | "
22 | EOF
23 | }
24 | }
25 |
26 | variable "abrak_name" {
27 | description = "Abrak name in Arvancloud web console"
28 | type = string
29 | }
30 |
31 | variable "abrak_flavor" {
32 | description = "Abrak plan ID, you can get list of plan IDs of each region from sizes api"
33 | # Check "https://napi.arvancloud.ir/ecc/v1/regions//sizes" to find best falavor size.
34 | type = string
35 | }
36 |
37 | variable "abrak_disk_size" {
38 | description = "Abrak disk size in GB"
39 | type = number
40 | }
41 |
42 | variable "abrak_ha_enabled" {
43 | description = "HA feature in abrak. This feature is exprimental, May not works now."
44 | type = bool
45 | default = false
46 | }
47 |
48 | variable "abrak_image" {
49 | description = "Abrak image type and name"
50 | type = object({
51 | type = string
52 | name = string
53 | })
54 | }
55 |
56 | variable "abrak_sshkey_enabled" {
57 | description = "Enabled Arvan ssh public key"
58 | type = bool
59 | default = false
60 | }
61 |
62 | variable "abrak_sshkey_name" {
63 | description = "Arvan ssh public key name"
64 | type = string
65 | default = "devopshobies"
66 | }
--------------------------------------------------------------------------------
/part20-arvancloud-abrak/modules/abrak/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | arvan = {
4 | source = "arvancloud/arvan"
5 | version = ">=0.6.4"
6 | }
7 | tls = {
8 | source = "hashicorp/tls"
9 | version = ">=4.0.4"
10 | }
11 | }
12 | }
--------------------------------------------------------------------------------
/part20-arvancloud-abrak/output.tf:
--------------------------------------------------------------------------------
1 | output "abrak_id" {
2 | value = module.abrak.id
3 | }
4 |
5 | output "abrak_ip_addresses" {
6 | value = module.abrak.ip_addresses
7 | }
8 |
9 | output "abrak_sshkey" {
10 | value = var.abrak_sshkey_enabled ? module.abrak.ssh_key : null
11 | sensitive = true
12 | }
--------------------------------------------------------------------------------
/part20-arvancloud-abrak/providers.tf:
--------------------------------------------------------------------------------
1 | provider "arvan" {
2 | api_key = var.ApiKey
3 | }
--------------------------------------------------------------------------------
/part20-arvancloud-abrak/variables.tf:
--------------------------------------------------------------------------------
1 | variable "ApiKey" {
2 | type = string
3 | sensitive = true
4 | }
5 |
6 | variable "abrak_name" {
7 | type = string
8 | default = "terraform-abrak-example"
9 | }
10 |
11 | variable "region" {
12 | type = string
13 | default = "ir-thr-c2" # Forogh Datacenter
14 | }
15 |
16 | variable "abrak_sshkey_enabled" {
17 | type = bool
18 | default = true
19 | }
20 |
--------------------------------------------------------------------------------
/part21-proxmox-provider/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Terraform Proxmox Example
3 | This code is based on Proxmox Provide. For more information refer to the [official documentation](https://registry.terraform.io/providers/Telmate/proxmox/latest/docs/resources/vm_qemu)
4 |
5 |
6 | # Requirements:
7 |
8 | ## VM Template
9 | You need a cloud-init template on Proxmox. For this purpose [this youtube tutorial](https://www.youtube.com/watch?v=shiIi38cJe4) would help.
10 |
11 | Commands for creating **Ubuntu Cloud-Init** template in a nutshell:
12 |
13 | ```
14 | # ssh to proxmox and download ubuntu cloud init img file
15 | wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
16 | # create vm
17 | qm create 8000 --memory 2048 --name ubuntu-cloud --net0 virtio,bridge=vmbr1
18 | # create disk from img
19 | qm importdisk 8000 jammy-server-cloudimg-amd64.img local-lvm
20 | # connect disk to vm
21 | qm set 8000 --scsihw virtio-scsi-pci --scsi0 local-lvm:vm-8000-disk-0
22 | # connect cloudinit disk to vm
23 | qm set 8000 --ide2 local-lvm:cloudinit
24 | qm set 8000 --boot c --bootdisk scsi0
25 | # connect proxmox console(novnc) to vm
26 | qm set 8000 --serial0 socket --vga serial0
27 |
28 | # From Proxmox UI, convert ubuntu-cloud to template.
29 | ```
30 |
31 | ## Proxmox API_TOKEN
32 | 1. Creat API_TOKEN: **Login to Proxmox > Datacenter > Permissions > API Tokens**
33 | 2. Set access for API_TOKEN: In **Permissions**, Add API Token permission with '/' path and Administrator Role for the created API Token.
34 |
35 | # More Examples:
36 | On the official GitHub page of the Proxmox library you can see more examples:
37 | *https://github.com/Telmate/terraform-provider-proxmox/tree/master/examples*
--------------------------------------------------------------------------------
/part21-proxmox-provider/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.1.0"
3 | required_providers {
4 | proxmox = {
5 | source = "telmate/proxmox"
6 | version = ">= 2.9.14"
7 | }
8 | }
9 | }
10 |
11 | provider "proxmox" {
12 | pm_tls_insecure = var.pm_tls_insecure
13 | pm_api_url = var.pm_api_url
14 | pm_api_token_id = var.pm_api_token_id
15 | pm_api_token_secret = var.pm_api_token_secret
16 | }
17 |
18 | resource "proxmox_vm_qemu" "pxe-example" {
19 | name = var.vm_name
20 | boot = "order=scsi0;net0"
21 | cores = var.vm_vcpu_cores
22 | vcpus = var.vm_vcpus
23 | cpu = "host"
24 | memory = var.vm_memory
25 | qemu_os = "other"
26 | scsihw = "virtio-scsi-pci"
27 | target_node = var.vm_target_node
28 |
29 | disk {
30 | discard = "on"
31 | iothread = 1
32 | size = var.vm_disk_size
33 | ssd = 1
34 | storage = var.vm_disk_storage
35 | type = "scsi"
36 | }
37 |
38 | network {
39 | bridge = var.vm_network_bridge
40 | firewall = false
41 | model = "e1000"
42 | }
43 |
44 | clone = var.clone_name
45 | os_type = "cloud-init"
46 | # Setup the ip address using cloud-init.
47 | # Keep in mind to use the CIDR notation for the ip.
48 | ipconfig0 = "ip=${var.vm_cloudinit_network.ip}/${var.vm_cloudinit_network.netmask},gw=${var.vm_cloudinit_network.gateway}"
49 |
50 | ciuser = "ubuntu"
51 | sshkeys = < This is module