├── .editorconfig
├── .gitattributes
├── .gitignore
├── .tflint.hcl
├── CHANGELOG.md
├── LICENSE
├── README.md
├── clickhouse-cluster
├── README.md
├── helm
│ └── clickhouse-cluster.yaml.tpl
├── load-balancer.tf
├── main.tf
├── outputs.tf
├── variables.tf
└── versions.tf
├── clickhouse-operator
├── README.md
├── main.tf
├── variables.tf
└── versions.tf
├── docs
├── README.md
├── architecture.png
├── autoscaler.md
├── aws_eks_blueprint_architecture_diagram.svg
├── blueprint.md
├── clickhouse.md
├── ebs.md
├── eks.md
├── prod-ready.md
└── vpc.md
├── eks
├── README.md
├── addons.tf
├── helm
│ └── cluster-autoscaler.yaml.tpl
├── iam.tf
├── main.tf
├── outputs.tf
├── variables.tf
├── versions.tf
└── vpc.tf
├── examples
├── default
│ └── main.tf
├── eks-cluster-only
│ └── main.tf
├── public-loadbalancer
│ └── main.tf
└── public-subnets-only
│ └── main.tf
├── main.tf
├── outputs.tf
├── variables.tf
└── versions.tf
/.editorconfig:
--------------------------------------------------------------------------------
1 | # http://editorconfig.org
2 | root = true
3 |
4 | [*]
5 | indent_style = space
6 | indent_size = 2
7 | charset = utf-8
8 | trim_trailing_whitespace = true
9 | insert_final_newline = true
10 |
11 | [*.md]
12 | trim_trailing_whitespace = false
13 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # end-of-line normalization (https://git-scm.com/docs/gitattributes#_text)
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | terraform.tfstate
2 | terraform.tfstate.backup
3 | .terraform
4 | .terraform.lock.hcl
5 | .terraform.tfstate.lock.info
6 | local.tf
7 | *.backup
8 | *.tfvars
--------------------------------------------------------------------------------
/.tflint.hcl:
--------------------------------------------------------------------------------
1 | plugin "terraform" {
2 | enabled = true
3 | preset = "recommended"
4 | }
5 |
6 | plugin "aws" {
7 | enabled = true
8 | version = "0.29.0"
9 | source = "github.com/terraform-linters/tflint-ruleset-aws"
10 | }
11 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file.
4 |
5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7 |
8 | ## [0.1.1](https://github.com/Altinity/terraform-aws-eks-clickhouse/compare/v0.1.0...v0.1.1)
9 | ### Added
10 | - New `outputs.tf` file with `eks_node_groups` and `eks_cluster` outputs.
11 |
12 | ## [0.1.0](https://github.com/Altinity/terraform-aws-eks-clickhouse/releases/tag/v0.1.0)
13 | ### Added
14 | - EKS cluster optimized for ClickHouse® with EBS driver and autoscaling.
15 | - VPC, subnets, and security groups.
16 | - Node Pools for each combination of instance type and subnet.
17 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2024 Altinity, Inc
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # terraform-aws-eks-clickhouse
2 |
3 | [](http://www.apache.org/licenses/LICENSE-2.0.html)
4 | [](https://github.com/altinity/terraform-aws-eks-clickhouse/issues)
5 |
6 |
7 |
8 |
9 | Terraform module for creating EKS clusters optimized for ClickHouse® with EBS and autoscaling.
10 | It includes the Altinity Kubernetes Operator for ClickHouse and a fully working ClickHouse cluster.
11 |
12 | ## Prerequisites
13 |
14 | - [terraform](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli) (recommended `>= v1.5`)
15 | - [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
16 | - [aws-cli](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html)
17 |
18 | ## Usage
19 | ### Create an EKS Cluster with the Altinity Kubernetes Operator for ClickHouse and ClickHouse Cluster
20 |
21 | Paste the following Terraform sample module into a tf file (`main.tf`) in a new directory. Adjust properties as desired.
22 | The sample module will create a Node Pool for each combination of instance type and subnet. For example, if you have 3 subnets and 2 instance types, this module will create 6 different Node Pools.
23 |
24 | ```hcl
25 | locals {
26 | region = "us-east-1"
27 | }
28 |
29 | provider "aws" {
30 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs
31 | region = local.region
32 | }
33 |
34 | module "eks_clickhouse" {
35 | source = "github.com/Altinity/terraform-aws-eks-clickhouse"
36 |
37 | install_clickhouse_operator = true
38 | install_clickhouse_cluster = true
39 |
40 | # Set to true if you want to use a public load balancer (and expose ports to the public Internet)
41 | clickhouse_cluster_enable_loadbalancer = false
42 |
43 | eks_cluster_name = "clickhouse-cluster"
44 | eks_region = local.region
45 | eks_cidr = "10.0.0.0/16"
46 |
47 | eks_availability_zones = [
48 | "${local.region}a",
49 | "${local.region}b",
50 | "${local.region}c"
51 | ]
52 | eks_private_cidr = [
53 | "10.0.1.0/24",
54 | "10.0.2.0/24",
55 | "10.0.3.0/24"
56 | ]
57 | eks_public_cidr = [
58 | "10.0.101.0/24",
59 | "10.0.102.0/24",
60 | "10.0.103.0/24"
61 | ]
62 |
63 | eks_node_pools = [
64 | {
65 | name = "clickhouse"
66 | instance_type = "m6i.large"
67 | desired_size = 0
68 | max_size = 10
69 | min_size = 0
70 | zones = ["${local.region}a", "${local.region}b", "${local.region}c"]
71 | },
72 | {
73 | name = "system"
74 | instance_type = "t3.large"
75 | desired_size = 1
76 | max_size = 10
77 | min_size = 0
78 | zones = ["${local.region}a"]
79 | }
80 | ]
81 |
82 | eks_tags = {
83 | CreatedBy = "mr-robot"
84 | }
85 | }
86 | ```
87 |
88 | > ⚠️ The instance type of `eks_node_pools` at index `0` will be used for setting up the clickhouse cluster replicas.
89 |
90 | ### Run Terraform to create the cluster
91 |
92 | Execute commands to initialize and apply the Terraform module. It will create an EKS cluster and install a ClickHouse sample database.
93 |
94 | ```sh
95 | terraform init
96 | terraform apply
97 | ```
98 |
99 | > Setting up the EKS cluster and sample database takes from 10 to 30 minutes depending on the load in your cluster and availability of resources.
100 |
101 | ### Access your ClickHouse database
102 | Update your kubeconfig with the credentials of your new EKS Kubernetes cluster.
103 | ```sh
104 | aws eks update-kubeconfig --region us-east-1 --name clickhouse-cluster
105 | ```
106 |
107 | Connect to your ClickHouse server using `kubectl exec`.
108 | ```sh
109 | kubectl exec -it chi-eks-dev-0-0-0 -n clickhouse -- clickhouse-client
110 | ```
111 |
112 | ### Run Terraform to remove the cluster
113 | After use you can destroy the EKS cluster. First, delete any ClickHouse clusters you have created.
114 | ```sh
115 | kubectl delete chi --all --all-namespaces
116 | ```
117 |
118 | Then, run `terraform destroy` to remove the EKS cluster and any cloud resources.
119 | ```sh
120 | terraform destroy
121 | ```
122 |
123 | ## Docs
124 | - [Terraform Registry](https://registry.terraform.io/modules/Altinity/eks-clickhouse/aws/latest)
125 | - [Architecture](https://github.com/Altinity/terraform-aws-eks-clickhouse/tree/master/docs)
126 |
127 | ## Issues
128 | If a terraform operation does not complete, try running it again. If the problem persists, please [file an issue](https://github.com/Altinity/terraform-aws-eks-clickhouse/issues).
129 |
130 | ## More Information and Commercial Support
131 | Altinity is the maintainer of this project. Altinity offers a range of
132 | services related to ClickHouse and analytic applications on Kubernetes.
133 |
134 | - [Official website](https://altinity.com/) - Get a high level overview of Altinity and our offerings.
135 | - [Altinity.Cloud](https://altinity.com/cloud-database/) - Run ClickHouse in our cloud or yours.
136 | - [Altinity Support](https://altinity.com/support/) - Get Enterprise-class support for ClickHouse.
137 | - [Slack](https://altinitydbworkspace.slack.com/join/shared_invite/zt-w6mpotc1-fTz9oYp0VM719DNye9UvrQ) - Talk directly with ClickHouse users and Altinity devs.
138 | - [Contact us](https://hubs.la/Q020sH3Z0) - Contact Altinity with your questions or issues.
139 |
140 | ## Legal
141 | All code, unless specified otherwise, is licensed under the [Apache-2.0](LICENSE) license.
142 | Copyright (c) 2024 Altinity, Inc.
143 |
--------------------------------------------------------------------------------
/clickhouse-cluster/README.md:
--------------------------------------------------------------------------------
1 | # clickhouse-cluster sub-module
2 |
3 | > TBA
--------------------------------------------------------------------------------
/clickhouse-cluster/helm/clickhouse-cluster.yaml.tpl:
--------------------------------------------------------------------------------
1 | all:
2 | metadata:
3 | labels:
4 | application_group: ${name}
5 |
6 | clickhouse:
7 | name: ${name}
8 | cluster: ${cluster_name}
9 | zones:
10 | %{ for zone in zones ~}
11 | - ${zone}
12 | %{ endfor ~}
13 | node_selector: "${instance_type}"
14 | service_type: "${service_type}"
15 | storage_class_name: gp3-encrypted
16 | password: ${password}
17 | user: ${user}
18 | keeper_name: clickhouse-keeper-sts
19 |
--------------------------------------------------------------------------------
/clickhouse-cluster/load-balancer.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | kubeconfig = < $KUBECONFIG_PATH
42 | NAMESPACE=${var.clickhouse_cluster_namespace}
43 | SECONDS=0
44 | SLEEP_TIME=10
45 | TIMEOUT=600
46 |
47 | end=$((SECONDS+TIMEOUT))
48 | echo "Waiting for cluster in the namespace $NAMESPACE to receive a hostname..."
49 |
50 | while [ $SECONDS -lt $end ]; do
51 | HOSTNAME=$(kubectl --kubeconfig $KUBECONFIG_PATH get service --namespace=$NAMESPACE -o jsonpath='{.items[?(@.spec.type=="LoadBalancer")].status.loadBalancer.ingress[0].hostname}' | awk '{print $1}')
52 | if [ -n "$HOSTNAME" ]; then
53 | echo "Cluster has received a hostname: $HOSTNAME"
54 | exit 0
55 | fi
56 | echo "Cluster does not have a hostname yet. Rechecking in $SLEEP_TIME seconds..."
57 | sleep $SLEEP_TIME
58 | done
59 |
60 | echo "Timed out waiting for cluster to receive a hostname in namespace $NAMESPACE."
61 | exit 1
62 | EOT
63 | }
64 | }
65 |
66 | data "kubernetes_service" "clickhouse_load_balancer" {
67 | depends_on = [null_resource.wait_for_clickhouse]
68 | count = var.clickhouse_cluster_enable_loadbalancer ? 1 : 0
69 |
70 | metadata {
71 | name = "clickhouse-eks"
72 | namespace = var.clickhouse_cluster_namespace
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/clickhouse-cluster/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | clickhouse_cluster_chart_name = "clickhouse-eks"
3 | clickhouse_keeper_chart_name = "clickhouse-keeper-sts"
4 | clickhouse_helm_charts_repository = "https://helm.altinity.com"
5 | clickhouse_password = var.clickhouse_cluster_password == null ? join("", random_password.this[*].result) : var.clickhouse_cluster_password
6 | }
7 |
8 | # Generates a random password without special characters if no password is provided
9 | resource "random_password" "this" {
10 | count = var.clickhouse_cluster_password == null ? 1 : 0
11 | length = 22
12 | special = false
13 |
14 | lifecycle {
15 | # ensures the password isn't regenerated on subsequent applies,
16 | # preserving the initial password.
17 | ignore_changes = all
18 | }
19 | }
20 |
21 | # Namespace for all ClickHouse-related Kubernetes resources,
22 | # providing logical isolation within the cluster.
23 | resource "kubernetes_namespace" "clickhouse" {
24 | metadata {
25 | name = var.clickhouse_cluster_namespace
26 | }
27 | }
28 |
29 |
30 | resource "helm_release" "clickhouse_keeper" {
31 | name = local.clickhouse_keeper_chart_name
32 | chart = local.clickhouse_keeper_chart_name
33 | namespace = kubernetes_namespace.clickhouse.metadata[0].name
34 | repository = local.clickhouse_helm_charts_repository
35 | version = var.clickhouse_keeper_chart_version
36 | }
37 |
38 |
39 | resource "helm_release" "clickhouse_cluster" {
40 | name = local.clickhouse_cluster_chart_name
41 | chart = local.clickhouse_cluster_chart_name
42 | namespace = kubernetes_namespace.clickhouse.metadata[0].name
43 | repository = local.clickhouse_helm_charts_repository
44 | version = var.clickhouse_cluster_chart_version
45 |
46 | values = [templatefile("${path.module}/helm/clickhouse-cluster.yaml.tpl", {
47 | zones = var.k8s_availability_zones
48 | instance_type = var.clickhouse_cluster_instance_type
49 | name = var.clickhouse_name
50 | cluster_name = var.clickhouse_cluster_name
51 | service_type = var.clickhouse_cluster_enable_loadbalancer ? "loadbalancer-external" : "cluster-ip"
52 | user = var.clickhouse_cluster_user
53 | password = local.clickhouse_password
54 | })]
55 | }
56 |
--------------------------------------------------------------------------------
/clickhouse-cluster/outputs.tf:
--------------------------------------------------------------------------------
1 | output "clickhouse_cluster_password" {
2 | value = local.clickhouse_password
3 | description = "The generated password for the ClickHouse cluster"
4 | sensitive = true
5 | }
6 |
7 | output "clickhouse_cluster_url" {
8 | value = var.clickhouse_cluster_enable_loadbalancer && length(data.kubernetes_service.clickhouse_load_balancer) > 0 && length(data.kubernetes_service.clickhouse_load_balancer[*].status[*].load_balancer[*].ingress) > 0 ? data.kubernetes_service.clickhouse_load_balancer[0].status[0].load_balancer[0].ingress[0].hostname : "N/A"
9 | description = "The public URL for the ClickHouse cluster"
10 | }
11 |
--------------------------------------------------------------------------------
/clickhouse-cluster/variables.tf:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # ClickHouse Cluster
3 | ################################################################################
4 | variable "clickhouse_name" {
5 | description = "Name of the ClickHouse release"
6 | default = "eks"
7 | type = string
8 | }
9 |
10 | variable "clickhouse_cluster_name" {
11 | description = "Name of the ClickHouse cluster"
12 | default = "dev"
13 | type = string
14 | }
15 |
16 | variable "clickhouse_cluster_chart_version" {
17 | description = "Version of the ClickHouse cluster helm chart version"
18 | default = "0.1.8"
19 | type = string
20 | }
21 |
22 | variable "clickhouse_keeper_chart_version" {
23 | description = "Version of the ClickHouse Keeper cluster helm chart version"
24 | default = "0.1.4"
25 | type = string
26 | }
27 |
28 | variable "clickhouse_cluster_namespace" {
29 | description = "Namespace for the ClickHouse cluster"
30 | default = "clickhouse"
31 | type = string
32 | }
33 |
34 | variable "clickhouse_cluster_user" {
35 | description = "ClickHouse user"
36 | default = "test"
37 | type = string
38 | }
39 |
40 | variable "clickhouse_cluster_password" {
41 | description = "ClickHouse password"
42 | type = string
43 | default = null
44 | }
45 |
46 | variable "clickhouse_cluster_instance_type" {
47 | description = "Instance type for node selection"
48 | type = string
49 | }
50 |
51 | variable "clickhouse_cluster_enable_loadbalancer" {
52 | description = "Enable waiting for the ClickHouse LoadBalancer to receive a hostname"
53 | type = bool
54 | default = false
55 | }
56 |
57 | ################################################################################
58 | # K8S
59 | ################################################################################
60 | variable "k8s_availability_zones" {
61 | description = "The availability zones to deploy the ClickHouse cluster"
62 | type = list(string)
63 | }
64 |
65 | variable "k8s_cluster_endpoint" {
66 | description = "The endpoint for the Kubernetes cluster"
67 | type = string
68 | default = ""
69 | }
70 |
71 | variable "k8s_cluster_name" {
72 | description = "The name of the Kubernetes cluster"
73 | type = string
74 | default = ""
75 | }
76 |
77 | variable "k8s_cluster_region" {
78 | description = "The region of the Kubernetes cluster"
79 | type = string
80 | default = ""
81 | }
82 |
83 | variable "k8s_cluster_certificate_authority" {
84 | description = "The certificate authority data for the Kubernetes cluster"
85 | type = string
86 | default = ""
87 | }
88 |
--------------------------------------------------------------------------------
/clickhouse-cluster/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0"
3 | required_providers {
4 | kubernetes = {
5 | source = "hashicorp/kubernetes"
6 | version = ">= 2.25.2"
7 | }
8 | helm = {
9 | source = "hashicorp/helm"
10 | version = ">= 2.12.1"
11 | }
12 | random = {
13 | source = "hashicorp/random"
14 | version = ">= 3.6.0"
15 | }
16 | null = {
17 | source = "hashicorp/null"
18 | version = ">= 3.2.2"
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/clickhouse-operator/README.md:
--------------------------------------------------------------------------------
1 | # clickhouse-operator sub-module
2 |
3 | > TBA
--------------------------------------------------------------------------------
/clickhouse-operator/main.tf:
--------------------------------------------------------------------------------
1 | resource "helm_release" "altinity_clickhouse_operator" {
2 | name = "altinity-clickhouse-operator"
3 | chart = "altinity-clickhouse-operator"
4 | repository = "https://altinity.github.io/clickhouse-operator"
5 |
6 | version = var.clickhouse_operator_version
7 | namespace = var.clickhouse_operator_namespace
8 | }
9 |
--------------------------------------------------------------------------------
/clickhouse-operator/variables.tf:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # ClickHouse Operator
3 | ################################################################################
4 | variable "clickhouse_operator_namespace" {
5 | description = "Namespace to install the Altinity Kubernetes operator for ClickHouse"
6 | default = "kube-system"
7 | type = string
8 | }
9 |
10 | variable "clickhouse_operator_version" {
11 | description = "Version of the Altinity Kubernetes operator for ClickHouse"
12 | default = "0.24.4"
13 | type = string
14 | }
15 |
--------------------------------------------------------------------------------
/clickhouse-operator/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0"
3 | required_providers {
4 | helm = {
5 | source = "hashicorp/helm"
6 | version = ">= 2.12.1"
7 | }
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Terraform Module for EKS ClickHouse® Cluster
2 |
3 | This Terraform module automates the deployment of a [ClickHouse®](https://clickhouse.com) database cluster on [Amazon EKS](https://aws.amazon.com/eks/) (Elastic Kubernetes Service). It is designed to create and configure the necessary resources for a robust and scalable ClickHouse deployment.
4 |
5 | The code is separated in different modules: one for the EKS cluster, one for the Altinity Kubernetes operator for ClickHouse, and one for ClickHouse (and ClickHouse Keeper) cluster. Variables are used to customize the deployment, including AWS region, cluster name, node configurations, and networking settings.
6 |
7 | ## Components
8 |
9 | This architecture is provides a scalable, secure, and efficient environment for running a ClickHouse database on Kubernetes within AWS EKS. The focus on autoscaling, storage management, and proper IAM configurations its suitability for enterprise-level deployments using the following resources:
10 |
11 | - **EKS Cluster**: Utilizes AWS Elastic Kubernetes Service to manage Kubernetes clusters. Configuration specifies version, node groups, and IAM roles for cluster operations.
12 |
13 | - **VPC and Networking**: Sets up a VPC with public and private subnets, internet gateway, and route tables for network isolation and internet access. Default behaviour will create a NAT gateway and locate the EKS cluster under private subnets. If the NAT gatewway is disabled, cluster's node will be automatically move to public subnets and the private subnets will be ommited or destroyed.
14 |
15 | - **IAM Roles and Policies**: Defines roles and policies for EKS cluster, node groups, and service accounts, facilitating secure interaction with AWS services.
16 |
17 | - **ClickHouse Deployment**:
18 | - **Operator**: Deploys ClickHouse and its operator using the Altinity helm charts, with configurations for namespace, user, and password (among ohters).
19 | - **ClickHouse Keeper**: Configures a ClickHouse Keeper cluster for ClickHouse coordination (deployed in the same ClickHouse namespace).
20 |
21 | - **Storage**:
22 | - **EBS CSI Driver**: Implements the Container Storage Interface (CSI) for EBS, enabling dynamic provisioning of block storage for stateful applications.
23 | - **Storage Classes**: Defines storage classes for gp3 encrypted EBS volumes, supporting dynamic volume provisioning.
24 |
25 | - **Cluster Autoscaler**: Implements autoscaling for EKS node groups based on workload demands, ensuring efficient resource utilization.
26 |
27 | - **Security**: Configures different service accounts with IAM roles for fine-grained access control to AWS services.
28 |
29 | ## Architecture:
30 |
31 | 
32 |
33 | - [VPC & Subnets](./vpc.md)
34 | - [EKS Cluster & Node Groups](./eks.md)
35 | - [K8S Autoscaler](./autoscaler.md)
36 | - [EBS & CSI Driver](./ebs.md)
37 | - [ClickHouse](./clickhouse.md)
38 |
39 | ## Prerequisites
40 |
41 | - AWS Account with appropriate permissions
42 | - Terraform installed (recommended `>= v1.5`)
43 | - Basic knowledge of Kubernetes and AWS services
44 |
45 | ## Usage
46 |
47 | ```hcl
48 | locals {
49 | region = "us-east-1"
50 | }
51 |
52 | provider "aws" {
53 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs
54 | region = local.region
55 | }
56 |
57 | module "eks_clickhouse" {
58 | source = "github.com/Altinity/terraform-aws-eks-clickhouse"
59 |
60 | install_clickhouse_operator = true
61 | install_clickhouse_cluster = true
62 |
63 | # Set to true if you want to use a public load balancer (and expose ports to the public Internet)
64 | clickhouse_cluster_enable_loadbalancer = false
65 |
66 | eks_cluster_name = "clickhouse-cluster"
67 | eks_region = local.region
68 | eks_cidr = "10.0.0.0/16"
69 |
70 | eks_availability_zones = [
71 | "${local.region}a",
72 | "${local.region}b",
73 | "${local.region}c"
74 | ]
75 | eks_private_cidr = [
76 | "10.0.1.0/24",
77 | "10.0.2.0/24",
78 | "10.0.3.0/24"
79 | ]
80 | eks_public_cidr = [
81 | "10.0.101.0/24",
82 | "10.0.102.0/24",
83 | "10.0.103.0/24"
84 | ]
85 |
86 | # ⚠️ The instance type of `eks_node_pools` at index `0` will be used for setting up the clickhouse cluster replicas.
87 | eks_node_pools = [
88 | {
89 | name = "clickhouse"
90 | instance_type = "m6i.large"
91 | desired_size = 0
92 | max_size = 10
93 | min_size = 0
94 | zones = ["us-east-1a", "us-east-1b", "us-east-1c"]
95 | },
96 | {
97 | name = "system"
98 | instance_type = "t3.large"
99 | desired_size = 1
100 | max_size = 10
101 | min_size = 0
102 | zones = ["us-east-1a"]
103 | }
104 | ]
105 |
106 | eks_tags = {
107 | CreatedBy = "mr-robot"
108 | }
109 | }
110 | ```
111 |
112 | > ⚠️ The module will create a Node Pool for each combination of instance type and availability zones. For example, if you have 3 azs and 2 instance types, this module will create 6 different Node Pools.
113 |
114 | 👉 Check the [Terraform registry](https://registry.terraform.io/modules/Altinity/eks-clickhouse/aws/latest) for a complete Terraform specification for this module.
115 |
116 | ## AWS Labs Blueprint
117 |
118 | This module is the consequence of a collaboration between [Altinity](https://altinity.com) and [AWS Labs](https://awslabs.github.io/data-on-eks/). It is part of a series of tutorials to aim people do fancy stuff with data on AWS EKS (using different technologies)
119 |
120 | You can find the complete blueprint [here](#), which use most of the code provided in this repo as a terraform module.
121 |
--------------------------------------------------------------------------------
/docs/architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Altinity/terraform-aws-eks-clickhouse/780fd7b8ed3211011468c12aaceedf27142cd193/docs/architecture.png
--------------------------------------------------------------------------------
/docs/autoscaler.md:
--------------------------------------------------------------------------------
1 | # Kubernetes Cluster Autoscaler
2 |
3 | > 💡 TLDR; This setup configures the Cluster Autoscaler to dynamically manage the number of nodes in an AWS EKS cluster based on workload demands, ensuring optimal resource utilization and cost-efficiency.
4 |
5 | This Terraform module leverages the `[aws-ia/eks-blueprints-addons/aws](https://registry.terraform.io/modules/aws-ia/eks-blueprints-addon/aws/latest)` to set up the Cluster Autoscaler for an AWS EKS cluster. The setup includes necessary IAM roles and policies, along with Helm for deployment, ensuring that the autoscaler can adjust the number of nodes efficiently. Below is a breakdown of the key components:
6 |
7 | ### IAM Policy for Cluster Autoscaler
8 | - `aws_iam_policy.cluster_autoscaler`: creates an IAM policy with permissions necessary for the Cluster Autoscaler to interact with AWS services, particularly the Auto Scaling groups and EC2 instances.
9 |
10 | ### IAM Role for Cluster Autoscaler
11 | - `aws_iam_role.cluster_autoscaler`: defines an IAM role with a trust relationship that allows entities assuming this role via Web Identity (in this case, Kubernetes service accounts) to perform actions as defined in the IAM policy.
12 |
13 | ### AWS Identity and Access Management
14 | - **`module.eks_aws.module.eks_blueprints_addons.module.cluster_autoscaler.data.aws_caller_identity.current`** and **`module.eks_aws.module.eks_blueprints_addons.module.cluster_autoscaler.data.aws_partition.current`**: Retrieve AWS account details and the partition in which the resources are being created, ensuring that the setup aligns with the AWS environment where the EKS cluster resides.
15 |
16 | ### Deployment via Helm
17 | - **`module.eks_aws.module.eks_blueprints_addons.module.cluster_autoscaler.helm_release.this`**: Deploys the Cluster Autoscaler using a Helm chart. The configuration is provided through a template file that includes necessary parameters such as the AWS region, cluster ID, autoscaler version, and the role ARN.
18 |
19 |
--------------------------------------------------------------------------------
/docs/blueprint.md:
--------------------------------------------------------------------------------
1 | # ClickHouse® Cluster on EKS with Terraform
2 |
3 | With this pattern, you can deploy a ClickHouse® cluster on AWS EKS (Elastic Kubernetes Service) with a single Terraform file. This module sets up the EKS cluster and node groups with all the tooling required to run ClickHouse clusters on Kubernetes.
4 |
5 | 
6 |
7 | The module uses opinionated defaults for the EKS cluster and node groups, including the EBS CSI driver, Kubernetes autoscaler, and IAM roles and policies. It also includes configurations for VPCs, public subnets, route tables, and internet gateways, which are essential for the network infrastructure of the EKS cluster.
8 |
9 | The deployment experience is simple but flexible. You can customize several settings about the EKS cluster and node groups, such as scaling configurations, disk size, and instance types.
10 |
11 | We recommend keeping the defaults if you are new to EKS and ClickHouse. However, if you are familiar with EKS and ClickHouse, feel free to use this template as a starting point and customize it to your needs.
12 |
13 | > ⚠️ There are some configurations or resources that may not be considered "production-ready" depending your security guideliness. Use these examples with caution and as a starting point for your learning and development process.
14 |
15 | ## Components
16 |
17 | This architecture provides a scalable, secure, and efficient environment for running a ClickHouse database on Kubernetes within AWS EKS. The focus on autoscaling, storage management, and proper IAM configurations ensures its suitability for enterprise-level deployments using the following resources:
18 |
19 | - **EKS Cluster**: Utilizes AWS Elastic Kubernetes Service to manage Kubernetes clusters. Configuration specifies version, node groups, and IAM roles for cluster operations.
20 |
21 | - **VPC and Networking**: Sets up a VPC with public and private subnets, NAT gateway, Internet gateway, and route tables for network isolation and internet access. Public subnets and an S3 VPC endpoint are created for external and internal communications, respectively.
22 |
23 | > The VPC configuration we've chosen is advisable for a real and productive environment. It strikes a good balance between simplicity and user-friendliness for getting started with ClickHouse, while still preserving fundamental security aspects. The main idea behind this setup is to generate a ClickHouse cluster with an optional public URL to which you can easily connect once the provisioning is complete.
24 |
25 | - **IAM Roles and Policies**: Defines roles and policies for EKS cluster, node groups, and service accounts, facilitating secure interaction with AWS services.
26 |
27 | - **ClickHouse Deployment**: This ClickHouse cluster, is designed for flexibility and high availability. It integrates with **ClickHouse Keeper** for cluster management and coordination, and allows external access with enhanced security. The cluster's architecture supports high availability with a replica structure across multiple zones, ensuring fault tolerance. Storage is secured and performant, utilizing an encrypted gp3 class. The setups is performed using 3 different helm charts:
28 | - **Operator**: The operator facilitates the lifecycle of ClickHouse clusters, including scaling, backup, and recovery.
29 | - **Cluster**: Creates a ClickHouse cluster using the Altinity Kubernetes operator for ClickHouse, with configurations for namespace, user, and password.
30 | - **ClickHouseKeeper**: Set up a ClickHouse Keeper cluster for ClickHouse coordination to enhance ClickHouse clusters by managing configuration and ensuring consistency.
31 |
32 | > For more information about the Helm Charts, you can check the [kubernetes-blueprints-for-clickhouse](https://github.com/Altinity/kubernetes-blueprints-for-clickhouse) repository.
33 |
34 |
35 | - **Storage**: We opted for Amazon EBS (Elastic Block Store) for our cluster's storage due to its cost-effectiveness compared to other AWS storage options. EBS provides high performance, durability, and the flexibility to scale, making it ideal for database workloads like ClickHouse. It offers a cost-efficient solution for maintaining data integrity and availability.
36 |
37 | - **EBS CSI Driver**: Implements the Container Storage Interface (CSI) for EBS, enabling dynamic provisioning of block storage for stateful applications.
38 | - **Storage Classes**: Defines storage classes for gp3 encrypted EBS volumes, supporting dynamic volume provisioning.
39 |
40 | - **Cluster Autoscaler**: Implements autoscaling for EKS node groups based on workload demands, ensuring efficient resource utilization.
41 |
42 | > Autoscaling is a critical feature for managing the resources of your EKS cluster. It automatically adjusts the number of nodes in a node group based on the resource requests and limits of the pods running on the nodes. This ensures that the cluster is right-sized for the current workload, optimizing costs and performance.
43 |
44 | ## Deploying the Solution
45 |
46 | ### Pre-requisites
47 |
48 | - [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
49 | - [aws cli](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html)
50 | - [terraform](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli)
51 | - [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
52 | - [clickhouse client](https://clickhouse.com/docs/en/integrations/sql-clients/clickhouse-client-local)
53 |
54 | ### Setup
55 |
56 | 1. **Clone the repository**
57 |
58 | ```bash
59 | git clone https://github.com/awslabs/data-on-eks.git
60 | ```
61 |
62 | 2. Navigate into the example directory and use `terraform` command to initialize Terraform and apply the changes.
63 |
64 | ```bash
65 | cd data-on-eks/analytics/terraform/clickhouse-eks
66 |
67 | # If you already have an AWS profile setup, you can skip this step
68 | export AWS_ACCESS_KEY_ID=""
69 | export AWS_SECRET_ACCESS_KEY=""
70 | export AWS_SESSION_TOKEN=""
71 |
72 | terraform init
73 | terraform apply
74 | ```
75 |
76 | > This will take a few minutes to complete. Once it's done, you will see the output of the `terraform apply` command, including the `kubeconfig` setup for the EKS cluster.
77 |
78 | ### Verify
79 |
80 | Let's verify that the EKS cluster and ClickHouse deployment are running as expected.
81 | ```bash
82 | aws eks describe-cluster --name clickhouse-cluster --region us-east-1
83 | ```
84 |
85 | Verify that the EKS cluster is active and the nodes are ready.
86 |
87 | ```bash
88 | # Use this command to setup the `kubeconfig` for the EKS cluster.
89 | eval $(terraform output eks_configure_kubectl | tr -d '"')
90 |
91 | # Get aws, autoscaler, ebs-csi, clickhouse operator and other k8s pods
92 | kubectl get pods -n kube-system
93 |
94 | # Get clickhouse and zookeeper pods
95 | kubectl get pods -n clickhouse
96 | ```
97 |
98 | ## Create your first ClickHouse table
99 |
100 | Clickhouse uses a SQL-like language to interact with the database. You can use the `clickhouse-client` to connect to the database and create your first table.
101 |
102 | ### Connect to the ClickHouse cluster
103 | Retrieve the ClickHouse cluster credentials and connect using the `clickhouse-client`.
104 |
105 | ```bash
106 | # Get password and host from the terraform output
107 | password=$(terraform output clickhouse_cluster_password | tr -d '"')
108 | host=$(terraform output clickhouse_cluster_url | tr -d '"')
109 |
110 | # Connect to the ClickHouse cluster using clickhouse-client
111 | clickhouse client --host=$host --user=test --password=$password
112 | ```
113 |
114 | ### Create a database
115 | Create a new database named `helloworld` if it doesn't already exist.
116 |
117 | ```sql
118 | CREATE DATABASE IF NOT EXISTS helloworld
119 | ```
120 |
121 | ### Create a table
122 | Define a new table `my_first_table` in the `helloworld` database, specifying its schema.
123 | ```sql
124 | CREATE TABLE helloworld.my_first_table
125 | (
126 | user_id UInt32,
127 | message String,
128 | timestamp DateTime,
129 | metric Float32
130 | )
131 | ENGINE = MergeTree()
132 | PRIMARY KEY (user_id, timestamp)
133 | ```
134 |
135 | ### Add some data
136 | Insert sample data into `my_first_table` demonstrating basic usage.
137 |
138 | ```sql
139 | INSERT INTO helloworld.my_first_table (user_id, message, timestamp, metric) VALUES
140 | (101, 'Hello, ClickHouse!', now(), -1.0 ),
141 | (102, 'Insert a lot of rows per batch', yesterday(), 1.41421 ),
142 | (102, 'Sort your data based on your commonly-used queries', today(), 2.718 ),
143 | (101, 'Granules are the smallest chunks of data read', now() + 5, 3.14159 )
144 | ```
145 |
146 | ### Query the data
147 | Retrieve and display all records from `my_first_table`, ordered by `timestamp`.
148 |
149 | ```sql
150 | SELECT *
151 | FROM helloworld.my_first_table
152 | ORDER BY timestamp
153 | ```
154 |
155 | ## Next Steps
156 | - Implement Monitoring & Observability solutions for in-depth performance and health insights.
157 | - Consider additional security measures, backup strategies, and disaster recovery plans.
158 | - Investigate advanced networking configurations, focusing on the use of private subnets and NAT gateways to enhance security and control traffic flow within your EKS environment.
159 |
160 | ## Cleanup
161 |
162 | When you are done with the ClickHouse cluster, you can remove it by running the `destroy` command. This will delete the EKS cluster and all the resources created by the Terraform script.
163 |
164 | ```bash
165 | cd data-on-eks/analytics/terraform/clickhouse-eks && terraform destroy
166 | ```
167 |
168 | ## Altinity
169 |
170 | If you are looking for a managed ClickHouse service, [Altinity](https://altinity.cloud) offers enterprise-grade support for ClickHouse, including optimized builds and consultancy services.
171 |
172 | - [Altinity.Cloud](https://altinity.com/cloud-database/) - Run ClickHouse in our cloud or yours.
173 | - [Altinity Support](https://altinity.com/support/) - Get Enterprise-class support for ClickHouse.
174 | - [Slack](https://altinitydbworkspace.slack.com/join/shared_invite/zt-w6mpotc1-fTz9oYp0VM719DNye9UvrQ) - Talk directly with ClickHouse users and Altinity devs.
175 | - [Contact us](https://hubs.la/Q020sH3Z0) - Contact Altinity with your questions or issues.
176 |
--------------------------------------------------------------------------------
/docs/clickhouse.md:
--------------------------------------------------------------------------------
1 | # Altinity Kubernetes Operator for ClickHouse® & ClickHouse Cluster Deployment with Zookeeper Integration on AWS EKS
2 |
3 | > 💡 **TL;DR**: This Terraform module automates the deployment of the Altinity Kubernetes operator for ClickHouse® and a ClickHouse cluster with Zookeeper on K8S. It meticulously manages dependencies, streamlines password generation, and applies necessary Kubernetes manifests, culminating in a robust, maintainable, and secure setup for cloud-native database management. The configuration leverages local values for parsing YAML manifests of both the ClickHouse operator and the Zookeeper cluster, ensuring a modular and dynamic deployment process. By integrating Zookeeper, the module supports high-availability and distributed ClickHouse configurations, enhancing the resilience and scalability of the database infrastructure.
4 |
5 | This Terraform module orchestrates the deployment of the [Altinity Kubernetes operator for ClickHouse](https://github.com/Altinity/clickhouse-operator) on an AWS EKS cluster and sets up a ClickHouse cluster with Zookeeper integration. It is designed to streamline the process of managing ClickHouse databases within a Kubernetes environment, emphasizing automation and ease of use on AWS EKS.
6 |
7 | ### Random Password Generation
8 | - `resource "random_password" "this"`: Generates a random password for the ClickHouse cluster if a predefined one is not supplied. The password has 22 characters, excluding special characters, and includes a lifecycle policy to disregard changes, preserving the password across Terraform `apply` operations.
9 |
10 | ### Altinity Kubernetes operator for ClickHouse
11 | - **Operator Deployment**: Utilizes `resource "kubectl_manifest" "clickhouse_operator"` to apply the necessary manifests (CRD, Service, ConfigMap, Deployment) for the ClickHouse operator. It iterates over `local.clickhouse_operator_manifests`, applying each manifest individually.
12 |
13 | ### Zookeeper Cluster Deployment
14 | - **Zookeeper Cluster**: The `resource "kubectl_manifest" "zookeeper_cluster"` deploys the Zookeeper cluster necessary for ClickHouse, iterating over `local.zookeeper_cluster_manifests` to apply each manifest. This setup is critical for enabling distributed ClickHouse configurations.
15 |
16 | ### Namespace Creation
17 | - **ClickHouse Namespace**: `resource "kubernetes_namespace" "clickhouse"` creates a Kubernetes namespace dedicated to the ClickHouse cluster, ensuring isolation and organization.
18 | - **Zookeeper Namespace**: Similarly, `resource "kubernetes_namespace" "zookeeper"` establishes a separate namespace for the Zookeeper cluster, maintaining a clear separation of concerns and operational clarity.
19 |
20 | ### ClickHouse Cluster Creation
21 | - `resource "kubectl_manifest" "clickhouse_cluster"`: Deploys the ClickHouse cluster by provisioning a new `ClickHouseInstallation` custom resource, incorporating variables such as cluster name, namespace, user, and either a generated or provided password. This resource incorporates the Zookeeper namespace for proper cluster coordination.
22 |
23 | ### Service Data Retrieval
24 | - `data "kubernetes_service" "clickhouse_load_balancer"`: Fetches details about the ClickHouse service, focusing on the load balancer setup, to facilitate external access. This data source is contingent on the successful rollout of the ClickHouse cluster.
25 |
26 |
--------------------------------------------------------------------------------
/docs/ebs.md:
--------------------------------------------------------------------------------
1 | # EBS & CSI Driver
2 |
3 | > 💡 TL;DR This configuration sets up the necessary IAM roles and policies, Kubernetes roles, service accounts, and deployments to enable the AWS EBS CSI driver in an EKS cluster. This setup allows the Kubernetes cluster to dynamically provision EBS volumes as persistent storage for pods, leveraging the capabilities of AWS EBS.
4 |
5 | This Terraform module is configuring the AWS Elastic Block Store (EBS) Container Storage Interface (CSI) driver in a Kubernetes cluster managed by AWS EKS. The EBS CSI driver allows Kubernetes to provision, mount, and manage EBS volumes. Here are the key resources and their roles in this setup:
6 |
7 | ### IAM Policy and Role for EBS CSI Driver
8 | - `aws_iam_policy_document.ebs_csi_driver_assume_role_policy` and `aws_iam_role.ebs_csi_driver_role`: These define an IAM role that the EBS CSI driver will assume. This role grants the driver permissions to interact with AWS resources like EBS volumes.
9 |
10 | ### IAM Role Policy Attachment
11 | - `aws_iam_role_policy_attachment.ebs_csi_driver_policy_attachment`: Attaches the `AmazonEBSCSIDriverPolicy` to the IAM role, granting necessary permissions for the CSI driver to manage EBS volumes.
12 |
13 | ### Kubernetes Service Accounts
14 | - `kubernetes_service_account.ebs_csi_controller_sa` and `kubernetes_service_account.ebs_csi_node_sa`: These service accounts are used by the EBS CSI driver's controller and node components, respectively. The `eks.amazonaws.com/role-arn` annotation links these accounts to the IAM role created earlier.
15 |
16 | ### Kubernetes Cluster Roles and Role Bindings
17 | - Resources like `kubernetes_cluster_role.ebs_external_attacher_role` and related `kubernetes_cluster_role_binding.ebs_csi_attacher_binding`: These define the permissions required by the EBS CSI driver within the Kubernetes cluster, following the principle of least privilege.
18 |
19 | ### Kubernetes DaemonSet and Deployment
20 | - `kubernetes_daemonset.ebs_csi_node`: Deploys the EBS CSI driver on each node in the cluster. This daemonset is responsible for operations like mounting and unmounting EBS volumes on the nodes.
21 | - `kubernetes_deployment.ebs_csi_controller`: Deploys the controller component of the EBS CSI driver, which is responsible for provisioning and managing the lifecycle of EBS volumes.
22 |
23 | ### CSI Driver and Storage Class
24 | - `kubernetes_csi_driver_v1.ebs_csi_aws_com`: Registers the `ebs.csi.aws.com` CSI driver in the Kubernetes cluster.
25 | - `kubernetes_storage_class.gp3-encrypted`: Defines a storage class for provisioning EBS volumes. This particular storage class is set to use the `gp3` volume type and encrypt the volumes. (which is what we recommended for ClickHouse®)
26 |
27 |
28 |
29 | # AWS EBS CSI Driver Setup
30 |
31 | > 💡 TLDR; This configuration sets up the AWS EBS CSI driver within an EKS cluster, enabling dynamic provisioning of EBS volumes for persistent storage. The setup includes the necessary IAM roles, Kubernetes roles, service accounts, and driver deployments to integrate AWS EBS efficiently with the Kubernetes environment.
32 |
33 | This Terraform module configures the AWS Elastic Block Store (EBS) Container Storage Interface (CSI) driver using the `[aws-ia/eks-blueprints-addons/aws](https://registry.terraform.io/modules/aws-ia/eks-blueprints-addon/aws/latest)` module for a Kubernetes cluster managed by AWS EKS. The AWS EBS CSI driver facilitates the provisioning, mounting, and management of AWS EBS volumes directly via Kubernetes. Below is a detailed breakdown of the components involved in this setup:
34 |
35 | ### IAM Setup for EBS CSI Driver
36 | - **`module.eks_aws.aws_iam_role.ebs_csi_driver_role`**: Defines an IAM role that the EBS CSI driver will assume. This role grants the driver permissions to interact with AWS resources necessary for managing EBS volumes.
37 | - **`module.eks_aws.aws_iam_role_policy_attachment.ebs_csi_driver_policy_attachment`**: Attaches the necessary IAM policies to the IAM role, specifically the `AmazonEBSCSIDriverPolicy`, empowering the CSI driver to perform operations on EBS volumes.
38 |
39 | ### EKS Addon Configuration
40 | - **`module.eks_aws.module.eks_blueprints_addons.aws_eks_addon.this["aws-ebs-csi-driver"]`**: Configures the AWS EBS CSI driver as an EKS addon, simplifying management and ensuring it is kept up-to-date with the latest releases and security patches.
41 |
42 | ### Kubernetes Storage Class
43 | - **`kubernetes_storage_class.gp3-encrypted`**: Defines a storage class named `gp3-encrypted`, which is set as the default class for dynamic volume provisioning. It uses the `gp3` volume type with encryption enabled, suitable for applications requiring secure and performant storage solutions.
44 | - **Parameters**: Specifies encryption, filesystem type (`ext4`), and the volume type (`gp3`).
45 | - **Reclaim Policy**: Set to `Delete`, meaning volumes will be automatically deleted when the corresponding Kubernetes persistent volume is deleted.
46 | - **Volume Binding Mode**: Set to `WaitForFirstConsumer`, which delays the binding and provisioning of a volume until a pod using it is scheduled.
47 |
48 | ### Integration and Dependency Management
49 | - **Depends on**: Ensures that the EBS CSI driver setup only begins after the necessary EKS cluster components (such as the cluster itself and related IAM roles) are fully provisioned and operational.
50 |
--------------------------------------------------------------------------------
/docs/eks.md:
--------------------------------------------------------------------------------
1 | # EKS Cluster & Node Groups
2 |
3 | > 💡 TL;DR: This Terraform configuration leverages the [`terraform-aws-modules/eks/aws`](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest) module to establish a robust EKS cluster setup. It ensures flexible node group scaling and proper security practices with comprehensive IAM roles and policies. The integration of the Kubernetes Cluster Autoscaler enables dynamic node group scaling based on workload demands.
4 |
5 | This Terraform module orchestrates an AWS EKS (Elastic Kubernetes Service) deployment, handling everything from IAM roles to node group configurations using the [`terraform-aws-modules/eks/aws`](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest) module. Below is an overview of the key components involved:
6 |
7 | ### Policy Attachments
8 | - `aws_iam_role.eks_cluster_role`: An IAM role for the EKS cluster with permissions to make AWS API calls on your behalf.
9 | - `aws_iam_role_policy_attachment` resources: Attach AWS-managed policies to the EKS cluster role. These include policies for EKS cluster management, service roles, and VPC resource controllers.
10 | - `aws_iam_policy.eks_admin_policy` and `aws_iam_role.eks_admin_role`: Define an administrative policy and role for EKS. This setup includes permissions for creating and managing EKS clusters and associated IAM roles.
11 | - `aws_iam_role.eks_node_role`: An IAM role for EKS worker nodes to allow them to make AWS API calls.
12 | - `aws_iam_role_policy_attachment` resources for the node role: Attach necessary policies for EKS worker nodes, including EKS worker node policy, CNI plugin policy, and read-only access to ECR (Elastic Container Registry).
13 |
14 | ### EKS Cluster
15 | - `module.eks_aws.module.eks.aws_eks_cluster.this`: creates an EKS cluster with specified settings like version, role ARN, and subnets configuration.
16 |
17 | ### EKS Node Group
18 | - `module.eks_aws.module.eks.module.eks_managed_node_group[1-N]`: creates EKS managed node groups for each subnet and instance type combination from the `local.node_pool_combinations`. These node groups are where your Kubernetes pods will run.
19 | - Includes scaling configurations, such as desired, minimum, and maximum size of each node group.
20 | - Tags node groups for integration with the Kubernetes Cluster Autoscaler.
21 | - Specifies disk size and instance types for the node groups, which are essential for defining the resources available to your Kubernetes pods.
22 | - Defines the desired, minimum, and maximum size for the auto-scaling of the node groups.
23 | - The instance type of `eks_node_pools` at index `0` will be used for setting up the clickhouse cluster replicas.
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/docs/prod-ready.md:
--------------------------------------------------------------------------------
1 | # Getting Production Ready
2 |
3 | Deploying applications in a production environment necessitates stringent security measures to mitigate risks such as unauthorized access and potential data breaches. A secure Kubernetes deployment minimizes these risks by restricting public internet exposure of critical components and enforcing data encryption. This guideline outlines essential steps to achieve a secure and robust production environment.
4 |
5 | ## Restrict Kubernetes API Access
6 |
7 | You can simply restrict the access to the Kubernetes API by setting the module variable `eks_endpoint_public_access` to `false`.
8 | This will restrict access to the Kubernetes API to the VPC only, **that means that the module needs to be run from within the VPC**.
9 |
10 | If you don't want to do that, a possible workaround is to manually change this property using the AWS CLI after the cluster is created.
11 |
12 | ```sh
13 | aws eks update-cluster-config \
14 | --region \
15 | --name \
16 | --resources-vpc-config endpointPublicAccess=false
17 | ```
18 |
19 | > If for some reason you still need to access the Kubernetes API from the public Internet, consider restricting access to specific IP addresses using the `eks_public_access_cidrs` variable.
20 |
21 | ## Remove Public Load Balancer
22 |
23 | Utilizing public load balancers, especially for database clusters like ClickHouse®, poses a significant security risk by exposing your services to the public internet. This can lead to unauthorized access and potential data exploitation.
24 |
25 | Switch to a private load balancer by setting `clickhouse_cluster_enable_loadbalancer` to `false`. This adjustment allows for dynamic creation or removal of the load balancer, aligning with security best practices.
26 |
27 | ## Change Default Passwords (and Kubernetes Secrets)
28 | When setting up the cluster, you can configure the ClickHouse default credentials by setting the `clickhouse_cluster_password` and `clickhouse_cluster_password` variables. If you don't provide a password, the module will generate a random one for you. The credentials will be store in the terraform state and also in a Kubernetes secret named `clickhouse-credentials`.
29 |
30 | Consider changing credential values in the Kubernetes secrets to enhance security. Even if you set random/strong passwords, the initial values will be part of state files, logs, or other artifacts, which could lead to unauthorized access.
31 |
32 | ## Cluster Monitoring and Logging
33 | > TBA
34 |
35 | ## Clickhouse Cluster Sharding
36 | > TBA
37 |
38 | ## Clickhouse Keeper High Availability
39 | > TBA
40 |
41 |
42 |
--------------------------------------------------------------------------------
/docs/vpc.md:
--------------------------------------------------------------------------------
1 | # VPC & Subnets
2 |
3 | > 💡 TL;DR: This setup, utilizing the `terraform-aws-modules/vpc`, is a typical pattern for establishing a network infrastructure in AWS. It includes creating a VPC as an isolated network environment, segmenting it further with subnets, enabling internet access via an internet gateway, defining network routing rules through route tables, and establishing secure, private connections to AWS services with VPC endpoints.
4 |
5 | This Terraform module configures essential networking components within AWS, specifically within a VPC (Virtual Private Cloud). It internally uses the [`terraform-aws-modules/vpc/aws`](https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws) to set up all networking-related configurations.
6 |
7 | ### AWS VPC
8 | - **`module.eks_aws.module.vpc.aws_vpc.this`**: Creates a new VPC with a specified CIDR block, enabling DNS support and DNS hostnames, which are crucial for domain name resolution within the VPC and effective communication across AWS services.
9 | - Tags are applied from the `var.tags` variable for easier identification and management.
10 |
11 | ### Internet Gateway
12 | - **`module.eks_aws.module.vpc.aws_internet_gateway.this`**: Attaches an internet gateway to the VPC, facilitating communication between the VPC and the internet. This is essential for allowing internet access to and from resources within the VPC.
13 |
14 | ### NAT Gateway
15 | - **`module.eks_aws.module.vpc.aws_nat_gateway.this`**: Establishes a NAT gateway, utilizing an Elastic IP allocated with `module.eks_aws.module.vpc.aws_eip.nat`. This configuration allows instances in private subnets to access the internet while maintaining their security.
16 | - The `eks_enable_nat_gateway` variable, set to `true` by default, controls the creation of the NAT Gateway. Disabling it means private subnets and subsequently the NAT gateway will not be created, and EKS clusters will operate within public subnets.
17 |
18 | ### Public & Private Subnets
19 | - **`module.eks_aws.module.vpc.aws_subnet.private[0-N]`** and **`module.eks_aws.module.vpc.aws_subnet.public[0-N]`**: Create multiple public and private subnets across different availability zones for high availability. Private subnets house the EKS cluster by default.
20 | - Each subnet is assigned a unique CIDR block and an availability zone based on the variables `var.eks_availability_zones`, `eks_private_cidr`, and `eks_public_cidr`.
21 | - The `map_public_ip_on_launch` attribute is set to `true` for public subnets, assigning public IP addresses to instances within these subnets. This occurs when the NAT Gateway is disabled.
22 |
23 | ### Route Tables
24 | - **`module.eks_aws.module.vpc.aws_route_table.public`** and **`module.eks_aws.module.vpc.aws_route_table.private`**: Define route tables in the VPC. The public route table directs traffic through the internet gateway, while the private route table routes traffic via the NAT gateway.
25 |
26 | ### Route Table Association
27 | - **`module.eks_aws.module.vpc.aws_route_table_association.public[0-N]`** and **`module.eks_aws.module.vpc.aws_route_table_association.private[0-N]`**: Associates each subnet with its respective route table, applying the defined routing rules to the subnets.
28 |
29 | ### Routes
30 | - **`module.eks_aws.module.vpc.aws_route.public_internet_gateway`**: Establishes a route in the public route table directing all traffic to the internet gateway.
31 | - **`module.eks_aws.module.vpc.aws_route.private_nat_gateway`**: Adds a route in the private route table to direct traffic through the NAT gateway, enabling secure internet access for instances in private subnets.
32 |
33 | ### Default Network ACL and Security Group
34 | - **`module.eks_aws.module.vpc.aws_default_network_acl.this`**: Sets a default network ACL, which provides a basic level of security by regulating traffic into and out of the associated subnets.
35 | - **`module.eks_aws.module.vpc.aws_default_security_group.this`**: Implements the default security group for the VPC, instantly providing fundamental security settings, such as traffic blocking protocols.
36 |
--------------------------------------------------------------------------------
/eks/README.md:
--------------------------------------------------------------------------------
1 | # eks sub-module
2 |
3 | > TBA
--------------------------------------------------------------------------------
/eks/addons.tf:
--------------------------------------------------------------------------------
1 | module "eks_blueprints_addons" {
2 | source = "aws-ia/eks-blueprints-addons/aws"
3 | version = "~> 1.16.2"
4 |
5 | depends_on = [module.eks]
6 |
7 | cluster_name = module.eks.cluster_name
8 | cluster_endpoint = module.eks.cluster_endpoint
9 | cluster_version = module.eks.cluster_version
10 | oidc_provider_arn = module.eks.oidc_provider_arn
11 |
12 | eks_addons = {
13 | aws-ebs-csi-driver = {
14 | service_account_role_arn = aws_iam_role.ebs_csi_driver_role.arn
15 | }
16 | }
17 |
18 | enable_cluster_autoscaler = true
19 | cluster_autoscaler = {
20 | timeout = "300"
21 | values = [templatefile("${path.module}/helm/cluster-autoscaler.yaml.tpl", {
22 | aws_region = var.region,
23 | eks_cluster_id = var.cluster_name,
24 | autoscaler_version = "v${var.autoscaler_version}",
25 | autoscaler_replicas = var.autoscaler_replicas,
26 | role_arn = aws_iam_role.cluster_autoscaler.arn
27 | })]
28 | }
29 | }
30 |
31 | resource "kubernetes_storage_class" "gp3-encrypted" {
32 | depends_on = [module.eks_blueprints_addons]
33 |
34 | metadata {
35 | name = "gp3-encrypted"
36 | annotations = {
37 | "storageclass.kubernetes.io/is-default-class" = "true"
38 | }
39 | }
40 |
41 | storage_provisioner = "ebs.csi.aws.com"
42 |
43 | parameters = {
44 | encrypted = "true"
45 | fsType = "ext4"
46 | type = "gp3"
47 | }
48 |
49 | reclaim_policy = "Delete"
50 | volume_binding_mode = "WaitForFirstConsumer"
51 | allow_volume_expansion = true
52 | }
53 |
54 | resource "kubernetes_annotations" "disable_gp2" {
55 | depends_on = [module.eks_blueprints_addons]
56 | annotations = {
57 | "storageclass.kubernetes.io/is-default-class" : "false"
58 | }
59 |
60 | api_version = "storage.k8s.io/v1"
61 | kind = "StorageClass"
62 | metadata {
63 | name = "gp2"
64 | }
65 |
66 | force = true
67 | }
68 |
--------------------------------------------------------------------------------
/eks/helm/cluster-autoscaler.yaml.tpl:
--------------------------------------------------------------------------------
1 | autoDiscovery:
2 | clusterName: ${eks_cluster_id}
3 | tags:
4 | - k8s.io/cluster-autoscaler/enabled
5 | - k8s.io/cluster-autoscaler/${eks_cluster_id}
6 |
7 | awsRegion: ${aws_region}
8 |
9 | cloudProvider: aws
10 |
11 | serviceAccount:
12 | annotations:
13 | eks.amazonaws.com/role-arn: "${role_arn}"
14 | create: true
15 | name: cluster-autoscaler
16 |
17 | rbac:
18 | create: true
19 |
20 | extraArgs:
21 | logtostderr: true
22 | stderrthreshold: info
23 | v: 4
24 | balance-similar-node-groups: true
25 | skip-nodes-with-local-storage: false
26 | skip-nodes-with-system-pods: false
27 | expander: most-pods
28 |
29 | image:
30 | tag: ${autoscaler_version}
31 |
32 | replicaCount: ${autoscaler_replicas}
33 |
34 | resources:
35 | limits:
36 | cpu: "100m"
37 | memory: "600Mi"
38 | requests:
39 | cpu: "100m"
40 | memory: "600Mi"
41 |
42 | podDisruptionBudget:
43 | maxUnavailable: 1
44 |
45 | priorityClassName: "system-cluster-critical"
46 |
--------------------------------------------------------------------------------
/eks/iam.tf:
--------------------------------------------------------------------------------
1 | data "aws_caller_identity" "current" {}
2 |
3 | # Admin role for administrative operations on the EKS cluster
4 | resource "aws_iam_role" "eks_admin_role" {
5 | name = "${var.cluster_name}-eks-admin-role"
6 |
7 | assume_role_policy = jsonencode({
8 | Version = "2012-10-17",
9 | Statement = [
10 | {
11 | Effect = "Allow",
12 | Principal = {
13 | AWS = "arn:aws:iam::${local.account_id}:root"
14 | },
15 | Action = "sts:AssumeRole"
16 | },
17 | ]
18 | })
19 |
20 | tags = var.tags
21 | }
22 |
23 | # Cluster role for EKS service to manage resources on behalf of the user.
24 | resource "aws_iam_role" "eks_cluster_role" {
25 | name = "${var.cluster_name}-eks-cluster-role"
26 |
27 | assume_role_policy = jsonencode({
28 | Version = "2012-10-17",
29 | Statement = [
30 | {
31 | Effect = "Allow",
32 | Principal = {
33 | Service = "eks.amazonaws.com"
34 | },
35 | Action = "sts:AssumeRole"
36 | }
37 | ]
38 | })
39 |
40 | tags = var.tags
41 | }
42 |
43 | # Node role for EKS worker nodes to interact with AWS services.
44 | resource "aws_iam_role" "eks_node_role" {
45 | name = "${var.cluster_name}-eks-node-role"
46 |
47 | assume_role_policy = jsonencode({
48 | Version = "2012-10-17",
49 | Statement = [
50 | {
51 | Effect = "Allow",
52 | Principal = {
53 | Service = "ec2.amazonaws.com"
54 | },
55 | Action = "sts:AssumeRole"
56 | }
57 | ]
58 | })
59 |
60 | tags = var.tags
61 | }
62 |
63 | resource "aws_iam_policy" "eks_admin_policy" {
64 | name = "${var.cluster_name}-eks-admin-policy"
65 | description = "EKS Admin Policy"
66 |
67 | policy = jsonencode({
68 | Version = "2012-10-17",
69 | Statement = [
70 | {
71 | Effect = "Allow",
72 | Action = [
73 | "eks:CreateCluster",
74 | "eks:TagResource",
75 | "eks:DescribeCluster"
76 | ],
77 | Resource = "arn:aws:eks:${var.region}:${local.account_id}:cluster/${var.cluster_name}"
78 | },
79 | {
80 | Effect = "Allow",
81 | Action = "iam:CreateServiceLinkedRole",
82 | Resource = "arn:aws:iam::${local.account_id}:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS",
83 | Condition = {
84 | "ForAnyValue:StringEquals" = {
85 | "iam:AWSServiceName" = "eks"
86 | }
87 | }
88 | },
89 | {
90 | Effect = "Allow",
91 | Action = "iam:PassRole",
92 | Resource = aws_iam_role.eks_cluster_role.arn
93 | },
94 | ]
95 | })
96 |
97 | tags = var.tags
98 | }
99 |
100 | resource "aws_iam_role_policy_attachment" "eks_cluster_policy_attachment" {
101 | role = aws_iam_role.eks_cluster_role.name
102 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
103 | }
104 |
105 | resource "aws_iam_role_policy_attachment" "eks_service_policy_attachment" {
106 | role = aws_iam_role.eks_cluster_role.name
107 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
108 | }
109 |
110 | resource "aws_iam_role_policy_attachment" "eks_vpc_resource_controller_attachment" {
111 | role = aws_iam_role.eks_cluster_role.name
112 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
113 | }
114 |
115 | resource "aws_iam_role_policy_attachment" "eks_admin_attach" {
116 | role = aws_iam_role.eks_admin_role.name
117 | policy_arn = aws_iam_policy.eks_admin_policy.arn
118 | }
119 |
120 | resource "aws_iam_role_policy_attachment" "eks_worker_node_policy" {
121 | role = aws_iam_role.eks_node_role.name
122 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
123 | }
124 |
125 | resource "aws_iam_role_policy_attachment" "eks_cni_policy" {
126 | role = aws_iam_role.eks_node_role.name
127 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
128 | }
129 |
130 | resource "aws_iam_role_policy_attachment" "ecr_read_only_policy" {
131 | role = aws_iam_role.eks_node_role.name
132 | policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
133 | }
134 |
135 | data "aws_iam_policy_document" "ebs_csi_driver_assume_role_policy" {
136 | statement {
137 | actions = ["sts:AssumeRoleWithWebIdentity"]
138 | effect = "Allow"
139 |
140 | principals {
141 | type = "Federated"
142 | identifiers = ["arn:aws:iam::${local.account_id}:oidc-provider/${replace(module.eks.oidc_provider, "https://", "")}"]
143 | }
144 |
145 | condition {
146 | test = "StringEquals"
147 | variable = "${replace(module.eks.oidc_provider, "https://", "")}:sub"
148 | values = ["system:serviceaccount:kube-system:ebs-csi-controller-sa"]
149 | }
150 | }
151 | }
152 |
153 | # IAM role that the EBS CSI driver pods will assume when interacting with
154 | # AWS to manage EBS volumes.
155 | resource "aws_iam_role" "ebs_csi_driver_role" {
156 | name = "${var.cluster_name}-eks-ebs-csi-driver"
157 | assume_role_policy = data.aws_iam_policy_document.ebs_csi_driver_assume_role_policy.json
158 | tags = var.tags
159 | }
160 |
161 | resource "aws_iam_role_policy_attachment" "ebs_csi_driver_policy_attachment" {
162 | role = aws_iam_role.ebs_csi_driver_role.name
163 | policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
164 | }
165 |
166 | locals {
167 | id = module.eks.cluster_id
168 | }
169 |
170 | resource "aws_iam_policy" "cluster_autoscaler" {
171 | name = "${var.cluster_name}-eks-cluster-autoscaler"
172 | policy = < subnet.id }
8 |
9 | node_pool_defaults = {
10 | ami_type = "AL2_x86_64"
11 | disk_size = 20
12 | desired_size = 1
13 | max_size = 10
14 | min_size = 0
15 | }
16 |
17 | labels = {
18 | "altinity.cloud/created-by" = "terraform-aws-eks-clickhouse"
19 | }
20 |
21 | clickhouse_taints = [
22 | {
23 | key = "dedicated"
24 | value = "clickhouse"
25 | effect = "NO_SCHEDULE"
26 | },
27 | ]
28 |
29 | # Generate all node pools possible combinations of subnets and node pools
30 | node_pool_combinations = flatten([
31 | for np in var.node_pools : [
32 | for i, zone in(np.zones != null ? np.zones : keys(local.subnets_by_zone)) : [
33 | {
34 | name = np.name != null ? np.name : np.instance_type
35 | subnet_id = local.subnets_by_zone[zone]
36 | instance_type = np.instance_type
37 | labels = merge(np.labels, local.labels)
38 | taints = startswith(np.name, local.CLICKHOUSE_NODE_POOL_PREFIX) ? concat(np.taints, local.clickhouse_taints) : np.taints
39 |
40 | desired_size = np.desired_size == null ? (
41 | local.node_pool_defaults.desired_size
42 | ) : (
43 | startswith(np.name, local.SYSTEM_NODE_POOL_PREFIX) && i == 0 && np.desired_size == 0 ? (
44 | local.node_pool_defaults.desired_size
45 | ) : (
46 | np.desired_size
47 | )
48 | )
49 | max_size = np.max_size != null ? np.max_size : local.node_pool_defaults.max_size
50 | min_size = np.min_size != null ? np.min_size : local.node_pool_defaults.min_size
51 | disk_size = np.disk_size != null ? np.disk_size : local.node_pool_defaults.disk_size
52 | ami_type = np.ami_type != null ? np.ami_type : local.node_pool_defaults.ami_type
53 | }
54 | ]
55 | ]
56 | ])
57 | }
58 |
59 | data "aws_subnet" "subnets" {
60 | for_each = { for idx, subnet_id in local.subnets : idx => subnet_id }
61 | id = each.value
62 | }
63 |
64 | module "eks" {
65 | source = "terraform-aws-modules/eks/aws"
66 | version = "~> 20.8.4"
67 |
68 | cluster_name = var.cluster_name
69 | cluster_version = var.cluster_version
70 | vpc_id = module.vpc.vpc_id
71 | subnet_ids = local.subnets
72 |
73 | enable_cluster_creator_admin_permissions = true
74 | create_iam_role = false
75 | iam_role_arn = aws_iam_role.eks_cluster_role.arn
76 |
77 | node_security_group_additional_rules = {
78 | ingress_self_all = {
79 | description = "Node to node all ports/protocols"
80 | protocol = "-1"
81 | from_port = 0
82 | to_port = 0
83 | type = "ingress"
84 | self = true
85 | }
86 | }
87 |
88 | # Node Groups
89 | eks_managed_node_groups = { for idx, np in local.node_pool_combinations : "node-group-${tostring(idx)}" => {
90 | desired_size = np.desired_size
91 | max_size = np.max_size
92 | min_size = np.min_size
93 |
94 | name = np.name
95 | use_name_prefix = true
96 |
97 | iam_role_use_name_prefix = false
98 | create_iam_role = false
99 | iam_role_arn = aws_iam_role.eks_node_role.arn
100 |
101 | instance_types = [np.instance_type]
102 | subnet_ids = [np.subnet_id]
103 | disk_size = np.disk_size
104 | ami_type = np.ami_type
105 |
106 | labels = np.labels
107 | taints = np.taints
108 |
109 | tags = merge(
110 | var.tags,
111 | {
112 | "k8s.io/cluster-autoscaler/enabled" = "true",
113 | "k8s.io/cluster-autoscaler/${var.cluster_name}" = "owned"
114 | }
115 | )
116 | } }
117 |
118 | cluster_endpoint_private_access = true
119 | cluster_endpoint_public_access = true
120 | cluster_endpoint_public_access_cidrs = var.public_access_cidrs
121 |
122 | tags = var.tags
123 | }
124 |
--------------------------------------------------------------------------------
/eks/outputs.tf:
--------------------------------------------------------------------------------
1 | output "cluster_node_pools" {
2 | value = local.node_pool_combinations
3 | }
4 |
5 | output "cluster_arn" {
6 | value = module.eks.cluster_arn
7 | description = "The Amazon Resource Name (ARN) of the cluster"
8 | }
9 |
10 | output "cluster_name" {
11 | value = module.eks.cluster_name
12 | description = "The name of the cluster"
13 | }
14 |
15 | output "cluster_certificate_authority" {
16 | value = module.eks.cluster_certificate_authority_data
17 | description = "The certificate authority of the cluster"
18 | }
19 |
20 | output "cluster_endpoint" {
21 | value = module.eks.cluster_endpoint
22 | description = "The endpoint for your Kubernetes API server"
23 | }
24 |
--------------------------------------------------------------------------------
/eks/variables.tf:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # Global
3 | ################################################################################
4 | variable "region" {
5 | description = "The AWS region"
6 | type = string
7 | default = "us-east-1"
8 | }
9 |
10 | variable "tags" {
11 | description = "Map with AWS tags"
12 | type = map(string)
13 | default = {}
14 | }
15 |
16 | ################################################################################
17 | # VPC
18 | ################################################################################
19 | variable "cidr" {
20 | description = "CIDR block"
21 | type = string
22 | default = "10.0.0.0/16"
23 | }
24 |
25 | variable "private_cidr" {
26 | description = "List of private CIDR blocks (one block per availability zones)"
27 | type = list(string)
28 | default = [
29 | "10.0.1.0/24",
30 | "10.0.2.0/24",
31 | "10.0.3.0/24"
32 | ]
33 | }
34 |
35 | variable "public_cidr" {
36 | description = "List of public CIDR blocks (one block per availability zones)"
37 | type = list(string)
38 | default = [
39 | "10.0.101.0/24",
40 | "10.0.102.0/24",
41 | "10.0.103.0/24"
42 | ]
43 | }
44 |
45 | variable "availability_zones" {
46 | description = "List of AWS availability zones"
47 | type = list(string)
48 | default = [
49 | "us-east-1",
50 | "us-east-2",
51 | "us-east-3"
52 | ]
53 | }
54 |
55 | variable "enable_nat_gateway" {
56 | description = "Enable NAT Gateway and private subnets (recommeded)"
57 | type = bool
58 | default = true
59 | }
60 |
61 | ################################################################################
62 | # EKS
63 | ################################################################################
64 | variable "cluster_name" {
65 | description = "The name of the cluster"
66 | type = string
67 | default = "clickhouse-cluster"
68 | }
69 |
70 | variable "cluster_version" {
71 | description = "Version of the cluster"
72 | type = string
73 | default = "1.32"
74 | }
75 |
76 | variable "autoscaler_version" {
77 | description = "Autoscaler version"
78 | type = string
79 | default = "1.32.0"
80 | }
81 |
82 | variable "autoscaler_replicas" {
83 | description = "Autoscaler replicas"
84 | type = number
85 | default = 1
86 | }
87 |
88 | variable "node_pools" {
89 | description = "Node pools configuration. The module will create a node pool for each combination of instance type and subnet. For example, if you have 3 subnets and 2 instance types, this module will create 6 different node pools."
90 |
91 | type = list(object({
92 | name = string
93 | instance_type = string
94 | ami_type = optional(string)
95 | disk_size = optional(number)
96 | desired_size = number
97 | max_size = number
98 | min_size = number
99 | zones = optional(list(string))
100 |
101 | labels = optional(map(string))
102 | taints = optional(list(object({
103 | key = string
104 | value = string
105 | effect = string
106 | })), [])
107 | }))
108 |
109 | default = [
110 | {
111 | name = "clickhouse"
112 | instance_type = "m6i.large"
113 | ami_type = "AL2_x86_64"
114 | desired_size = 0
115 | max_size = 10
116 | min_size = 0
117 | disk_size = 20
118 | zones = ["us-east-1a", "us-east-1b", "us-east-1c"]
119 | },
120 | {
121 | name = "system"
122 | instance_type = "t3.large"
123 | ami_type = "AL2_x86_64"
124 | desired_size = 1
125 | max_size = 10
126 | min_size = 0
127 | disk_size = 20
128 | zones = ["us-east-1a", "us-east-1b", "us-east-1c"]
129 | }
130 | ]
131 |
132 | validation {
133 | condition = alltrue([
134 | for np in var.node_pools :
135 | startswith(np.name, "clickhouse") || startswith(np.name, "system")
136 | ])
137 | error_message = "Each node pool name must start with either 'clickhouse' or 'system' prefix."
138 | }
139 | }
140 |
141 | variable "public_access_cidrs" {
142 | description = "List of CIDRs for public access, use this variable to restrict access to the EKS control plane."
143 | type = list(string)
144 | default = ["0.0.0.0/0"]
145 | }
146 |
--------------------------------------------------------------------------------
/eks/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.57"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.25.2"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/eks/vpc.tf:
--------------------------------------------------------------------------------
1 | # 🚨 This VPC module includes the creation of an Internet Gateway and public subnets, which simplifies cluster deployment and testing.
2 | # IMPORTANT: For preprod and prod use cases, it is crucial to consult with your security team and AWS architects to design a private infrastructure solution that aligns with your security requirements.
3 |
4 | # The VPC is configured with DNS support and hostnames,
5 | # which are essential for EKS and other AWS services to operate correctly.
6 | # ---
7 | # Creates a series of public subnets within the VPC based on the var.subnets input variable,
8 | # which contains details like CIDR blocks and availability zones.
9 | module "vpc" {
10 | source = "terraform-aws-modules/vpc/aws"
11 | version = "~> 5.8.1"
12 |
13 | name = "${var.cluster_name}-vpc"
14 | cidr = var.cidr
15 | azs = var.availability_zones
16 |
17 | public_subnets = var.public_cidr
18 | # ⚠️ If NAT gateway is disabled, your EKS nodes will automatically run under public subnets.
19 | private_subnets = var.enable_nat_gateway ? var.private_cidr : []
20 |
21 | map_public_ip_on_launch = !var.enable_nat_gateway
22 | enable_vpn_gateway = !var.enable_nat_gateway
23 | enable_nat_gateway = var.enable_nat_gateway
24 | single_nat_gateway = true
25 |
26 | tags = var.tags
27 | }
28 |
29 | output "private_subnets" {
30 | value = module.vpc.private_subnets
31 | }
32 |
33 | output "public_subnets" {
34 | value = module.vpc.public_subnets
35 | }
36 |
--------------------------------------------------------------------------------
/examples/default/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | region = "us-east-1"
3 | }
4 |
5 | provider "aws" {
6 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs
7 | region = local.region
8 | }
9 |
10 | module "eks_clickhouse" {
11 | source = "github.com/Altinity/terraform-aws-eks-clickhouse"
12 |
13 | install_clickhouse_operator = true
14 | install_clickhouse_cluster = true
15 |
16 | # Set to true if you want to use a public load balancer (and expose ports to the public Internet)
17 | clickhouse_cluster_enable_loadbalancer = false
18 |
19 | eks_cluster_name = "clickhouse-cluster"
20 | eks_region = local.region
21 | eks_cidr = "10.0.0.0/16"
22 |
23 | eks_availability_zones = [
24 | "${local.region}a",
25 | "${local.region}b",
26 | "${local.region}c"
27 | ]
28 | eks_private_cidr = [
29 | "10.0.1.0/24",
30 | "10.0.2.0/24",
31 | "10.0.3.0/24"
32 | ]
33 | eks_public_cidr = [
34 | "10.0.101.0/24",
35 | "10.0.102.0/24",
36 | "10.0.103.0/24"
37 | ]
38 |
39 | # ⚠️ The instance type of `eks_node_pools` at index `0` will be used for setting up the clickhouse cluster replicas.
40 | eks_node_pools = [
41 | {
42 | name = "clickhouse"
43 | instance_type = "m6i.large"
44 | desired_size = 0
45 | max_size = 10
46 | min_size = 0
47 | zones = ["${local.region}a", "${local.region}b", "${local.region}c"]
48 | },
49 | {
50 | name = "system"
51 | instance_type = "t3.large"
52 | desired_size = 1
53 | max_size = 10
54 | min_size = 0
55 | zones = ["${local.region}a"]
56 | }
57 | ]
58 |
59 | eks_tags = {
60 | CreatedBy = "mr-robot"
61 | }
62 | }
63 |
64 | output "eks_configure_kubectl" {
65 | value = module.eks_clickhouse.eks_configure_kubectl
66 | }
67 |
--------------------------------------------------------------------------------
/examples/eks-cluster-only/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | region = "us-east-1"
3 | }
4 |
5 | provider "aws" {
6 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs
7 | region = local.region
8 | }
9 |
10 |
11 | module "eks_clickhouse" {
12 | source = "github.com/Altinity/terraform-aws-eks-clickhouse"
13 |
14 | install_clickhouse_operator = false
15 | install_clickhouse_cluster = false
16 |
17 | eks_cluster_name = "clickhouse-cluster"
18 | eks_region = local.region
19 | eks_cidr = "10.0.0.0/16"
20 |
21 | eks_availability_zones = [
22 | "${local.region}a",
23 | "${local.region}b",
24 | "${local.region}c"
25 | ]
26 | eks_private_cidr = [
27 | "10.0.1.0/24",
28 | "10.0.2.0/24",
29 | "10.0.3.0/24"
30 | ]
31 | eks_public_cidr = [
32 | "10.0.101.0/24",
33 | "10.0.102.0/24",
34 | "10.0.103.0/24"
35 | ]
36 |
37 | eks_node_pools = [
38 | {
39 | name = "clickhouse"
40 | instance_type = "m6i.large"
41 | desired_size = 0
42 | max_size = 10
43 | min_size = 0
44 | zones = ["${local.region}a", "${local.region}b", "${local.region}c"]
45 | },
46 | {
47 | name = "system"
48 | instance_type = "t3.large"
49 | desired_size = 1
50 | max_size = 10
51 | min_size = 0
52 | zones = ["${local.region}a"]
53 | }
54 | ]
55 |
56 | eks_tags = {
57 | CreatedBy = "mr-robot"
58 | }
59 | }
60 |
61 | output "eks_configure_kubectl" {
62 | value = module.eks_clickhouse.eks_configure_kubectl
63 | }
64 |
--------------------------------------------------------------------------------
/examples/public-loadbalancer/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | region = "us-east-1"
3 | }
4 |
5 | provider "aws" {
6 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs
7 | region = local.region
8 | }
9 |
10 |
11 | module "eks_clickhouse" {
12 | source = "github.com/Altinity/terraform-aws-eks-clickhouse"
13 |
14 | install_clickhouse_operator = true
15 | install_clickhouse_cluster = true
16 |
17 | # Set to true if you want to use a public load balancer (and expose ports to the public Internet)
18 | clickhouse_cluster_enable_loadbalancer = true
19 |
20 | eks_cluster_name = "clickhouse-cluster"
21 | eks_region = local.region
22 | eks_cidr = "10.0.0.0/16"
23 |
24 | eks_availability_zones = [
25 | "${local.region}a",
26 | "${local.region}b",
27 | "${local.region}c"
28 | ]
29 | eks_private_cidr = [
30 | "10.0.1.0/24",
31 | "10.0.2.0/24",
32 | "10.0.3.0/24"
33 | ]
34 | eks_public_cidr = [
35 | "10.0.101.0/24",
36 | "10.0.102.0/24",
37 | "10.0.103.0/24"
38 | ]
39 |
40 | # ⚠️ The instance type of `eks_node_pools` at index `0` will be used for setting up the clickhouse cluster replicas.
41 | eks_node_pools = [
42 | {
43 | name = "clickhouse"
44 | instance_type = "m6i.large"
45 | desired_size = 0
46 | max_size = 10
47 | min_size = 0
48 | zones = ["${local.region}a", "${local.region}b", "${local.region}c"]
49 | },
50 | {
51 | name = "system"
52 | instance_type = "t3.large"
53 | desired_size = 1
54 | max_size = 10
55 | min_size = 0
56 | zones = ["${local.region}a"]
57 | }
58 | ]
59 |
60 | eks_tags = {
61 | CreatedBy = "mr-robot"
62 | }
63 | }
64 |
65 | output "eks_configure_kubectl" {
66 | value = module.eks_clickhouse.eks_configure_kubectl
67 | }
68 |
69 | output "clickhouse_cluster_url" {
70 | value = module.eks_clickhouse.clickhouse_cluster_url
71 | }
72 |
73 | output "clickhouse_cluster_password" {
74 | value = module.eks_clickhouse.clickhouse_cluster_password
75 | sensitive = true
76 | }
77 |
--------------------------------------------------------------------------------
/examples/public-subnets-only/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | region = "us-east-1"
3 | }
4 |
5 | provider "aws" {
6 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs
7 | region = local.region
8 | }
9 |
10 |
11 | module "eks_clickhouse" {
12 | source = "github.com/Altinity/terraform-aws-eks-clickhouse"
13 |
14 | install_clickhouse_operator = true
15 | install_clickhouse_cluster = true
16 | eks_enable_nat_gateway = false
17 |
18 | eks_cluster_name = "clickhouse-cluster"
19 | eks_region = local.region
20 | eks_cidr = "10.0.0.0/16"
21 |
22 | eks_availability_zones = [
23 | "${local.region}a",
24 | "${local.region}b",
25 | "${local.region}c"
26 | ]
27 | eks_public_cidr = [
28 | "10.0.101.0/24",
29 | "10.0.102.0/24",
30 | "10.0.103.0/24"
31 | ]
32 |
33 | # ⚠️ The instance type of `eks_node_pools` at index `0` will be used for setting up the clickhouse cluster replicas.
34 | eks_node_pools = [
35 | {
36 | name = "clickhouse"
37 | instance_type = "m6i.large"
38 | desired_size = 0
39 | max_size = 10
40 | min_size = 0
41 | zones = ["${local.region}a", "${local.region}b", "${local.region}c"]
42 | },
43 | {
44 | name = "system"
45 | instance_type = "t3.large"
46 | desired_size = 1
47 | max_size = 10
48 | min_size = 0
49 | zones = ["${local.region}a"]
50 | }
51 | ]
52 |
53 | eks_tags = {
54 | CreatedBy = "mr-robot"
55 | }
56 | }
57 |
58 | output "eks_configure_kubectl" {
59 | value = module.eks_clickhouse.eks_configure_kubectl
60 | }
61 |
--------------------------------------------------------------------------------
/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | eks_get_token_args = var.aws_profile != null ? ["eks", "get-token", "--cluster-name", var.eks_cluster_name, "--region", var.eks_region, "--profile", var.aws_profile] : ["eks", "get-token", "--cluster-name", var.eks_cluster_name, "--region", var.eks_region]
3 | }
4 |
5 | provider "kubernetes" {
6 | host = module.eks_aws.cluster_endpoint
7 | cluster_ca_certificate = base64decode(module.eks_aws.cluster_certificate_authority)
8 |
9 | exec {
10 | api_version = "client.authentication.k8s.io/v1beta1"
11 | args = local.eks_get_token_args
12 | command = "aws"
13 | }
14 | }
15 |
16 | provider "helm" {
17 | kubernetes {
18 | host = module.eks_aws.cluster_endpoint
19 | cluster_ca_certificate = base64decode(module.eks_aws.cluster_certificate_authority)
20 | exec {
21 | api_version = "client.authentication.k8s.io/v1beta1"
22 | args = local.eks_get_token_args
23 | command = "aws"
24 | }
25 | }
26 | }
27 |
28 | provider "aws" {
29 | region = var.eks_region
30 | }
31 |
32 | module "eks_aws" {
33 | source = "./eks"
34 |
35 | region = var.eks_region
36 | cluster_name = var.eks_cluster_name
37 | cidr = var.eks_cidr
38 | public_cidr = var.eks_public_cidr
39 | public_access_cidrs = var.eks_public_access_cidrs
40 | private_cidr = var.eks_private_cidr
41 | availability_zones = var.eks_availability_zones
42 | cluster_version = var.eks_cluster_version
43 | autoscaler_version = var.eks_autoscaler_version
44 | autoscaler_replicas = var.autoscaler_replicas
45 | node_pools = var.eks_node_pools
46 | tags = var.eks_tags
47 | enable_nat_gateway = var.eks_enable_nat_gateway
48 | }
49 |
50 | module "clickhouse_operator" {
51 | depends_on = [module.eks_aws]
52 | count = var.install_clickhouse_operator ? 1 : 0
53 | source = "./clickhouse-operator"
54 |
55 | clickhouse_operator_namespace = var.clickhouse_operator_namespace
56 | clickhouse_operator_version = var.clickhouse_operator_version
57 | }
58 |
59 | module "clickhouse_cluster" {
60 | depends_on = [module.eks_aws, module.clickhouse_operator]
61 | count = var.install_clickhouse_cluster ? 1 : 0
62 | source = "./clickhouse-cluster"
63 |
64 | clickhouse_cluster_name = var.clickhouse_cluster_name
65 | clickhouse_cluster_namespace = var.clickhouse_cluster_namespace
66 | clickhouse_cluster_password = var.clickhouse_cluster_password
67 | clickhouse_cluster_user = var.clickhouse_cluster_user
68 | clickhouse_cluster_instance_type = var.eks_node_pools[0].instance_type
69 | clickhouse_cluster_enable_loadbalancer = var.clickhouse_cluster_enable_loadbalancer
70 | clickhouse_cluster_chart_version = var.clickhouse_cluster_chart_version
71 | clickhouse_keeper_chart_version = var.clickhouse_keeper_chart_version
72 |
73 | k8s_availability_zones = var.eks_availability_zones
74 | k8s_cluster_region = var.eks_region
75 | k8s_cluster_name = var.eks_cluster_name
76 | k8s_cluster_endpoint = module.eks_aws.cluster_endpoint
77 | k8s_cluster_certificate_authority = base64decode(module.eks_aws.cluster_certificate_authority)
78 | }
79 |
--------------------------------------------------------------------------------
/outputs.tf:
--------------------------------------------------------------------------------
1 | output "eks_cluster_arn" {
2 | description = "The Amazon Resource Name (ARN) of the cluster"
3 | value = module.eks_aws.cluster_arn
4 | }
5 |
6 | output "eks_cluster_endpoint" {
7 | description = "The endpoint for your Kubernetes API server"
8 | value = module.eks_aws.cluster_endpoint
9 | }
10 |
11 | output "eks_cluster_name" {
12 | description = "The name for your Kubernetes API server"
13 | value = module.eks_aws.cluster_name
14 | }
15 |
16 | output "eks_cluster_ca_certificate" {
17 | description = "The base64 encoded certificate data required to communicate with your cluster"
18 | value = module.eks_aws.cluster_certificate_authority
19 | sensitive = true
20 | }
21 |
22 | output "eks_configure_kubectl" {
23 | description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
24 | value = "aws eks --region ${var.eks_region} update-kubeconfig --name ${module.eks_aws.cluster_name}"
25 | }
26 |
27 | output "clickhouse_cluster_password" {
28 | description = "The generated password for the ClickHouse cluster"
29 | value = length(module.clickhouse_cluster) > 0 ? module.clickhouse_cluster[0].clickhouse_cluster_password : ""
30 | sensitive = true
31 | }
32 |
33 | output "clickhouse_cluster_url" {
34 | description = "The public URL for the ClickHouse cluster"
35 | value = length(module.clickhouse_cluster) > 0 ? module.clickhouse_cluster[0].clickhouse_cluster_url : ""
36 | }
37 |
38 | output "cluster_node_pools" {
39 | value = module.eks_aws.cluster_node_pools
40 | }
41 |
--------------------------------------------------------------------------------
/variables.tf:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # GLOBAL
3 | ################################################################################
4 | variable "install_clickhouse_cluster" {
5 | description = "Enable the installation of the ClickHouse cluster"
6 | type = bool
7 | default = true
8 | }
9 |
10 | variable "install_clickhouse_operator" {
11 | description = "Enable the installation of the Altinity Kubernetes operator for ClickHouse"
12 | type = bool
13 | default = true
14 | }
15 |
16 | variable "aws_profile" {
17 | description = "AWS profile of deployed cluster."
18 | type = string
19 | default = null
20 | }
21 |
22 | ################################################################################
23 | # ClickHouse Operator
24 | ################################################################################
25 | variable "clickhouse_operator_namespace" {
26 | description = "Namespace to install the Altinity Kubernetes operator for ClickHouse"
27 | default = "kube-system"
28 | type = string
29 | }
30 |
31 | variable "clickhouse_operator_version" {
32 | description = "Version of the Altinity Kubernetes operator for ClickHouse"
33 | default = "0.24.4"
34 | type = string
35 | }
36 |
37 | ################################################################################
38 | # ClickHouse Cluster
39 | ################################################################################
40 | variable "clickhouse_cluster_name" {
41 | description = "Name of the ClickHouse cluster"
42 | default = "dev"
43 | type = string
44 | }
45 |
46 | variable "clickhouse_cluster_namespace" {
47 | description = "Namespace of the ClickHouse cluster"
48 | default = "clickhouse"
49 | type = string
50 | }
51 |
52 | variable "clickhouse_cluster_user" {
53 | description = "ClickHouse user"
54 | default = "test"
55 | type = string
56 | }
57 |
58 | variable "clickhouse_cluster_password" {
59 | description = "ClickHouse password"
60 | type = string
61 | default = null
62 | }
63 |
64 | variable "clickhouse_cluster_enable_loadbalancer" {
65 | description = "Enable waiting for the ClickHouse LoadBalancer to receive a hostname"
66 | type = bool
67 | default = false
68 | }
69 |
70 | variable "clickhouse_cluster_chart_version" {
71 | description = "Version of the ClickHouse cluster helm chart version"
72 | default = "0.1.8"
73 | type = string
74 | }
75 |
76 | variable "clickhouse_keeper_chart_version" {
77 | description = "Version of the ClickHouse Keeper cluster helm chart version"
78 | default = "0.1.4"
79 | type = string
80 | }
81 |
82 | ################################################################################
83 | # EKS
84 | ################################################################################
85 | variable "eks_region" {
86 | description = "The AWS region"
87 | type = string
88 | default = "us-east-1"
89 | }
90 |
91 | variable "eks_cluster_name" {
92 | description = "The name of the cluster"
93 | type = string
94 | default = "clickhouse-cluster"
95 | }
96 |
97 | variable "eks_cluster_version" {
98 | description = "Version of the cluster"
99 | type = string
100 | default = "1.32"
101 | }
102 |
103 | variable "eks_autoscaler_version" {
104 | description = "Version of AWS Autoscaler"
105 | type = string
106 | default = "1.32.0"
107 | }
108 |
109 | variable "eks_autoscaler_replicas" {
110 | description = "Number of replicas for AWS Autoscaler"
111 | type = number
112 | default = 1
113 | }
114 |
115 | variable "autoscaler_replicas" {
116 | description = "Autoscaler replicas"
117 | type = number
118 | default = 1
119 | }
120 |
121 | variable "eks_tags" {
122 | description = "A map of AWS tags"
123 | type = map(string)
124 | default = {}
125 | }
126 |
127 | variable "eks_cidr" {
128 | description = "CIDR block"
129 | type = string
130 | default = "10.0.0.0/16"
131 | }
132 |
133 | variable "eks_node_pools" {
134 | description = "Node pools configuration. The module will create a node pool for each combination of instance type and subnet. For example, if you have 3 subnets and 2 instance types, this module will create 6 different node pools."
135 |
136 | type = list(object({
137 | name = string
138 | instance_type = string
139 | ami_type = optional(string)
140 | disk_size = optional(number)
141 | desired_size = number
142 | max_size = number
143 | min_size = number
144 | zones = optional(list(string))
145 |
146 | labels = optional(map(string))
147 | taints = optional(list(object({
148 | key = string
149 | value = string
150 | effect = string
151 | })), [])
152 | }))
153 |
154 | default = [
155 | {
156 | name = "clickhouse"
157 | instance_type = "m6i.large"
158 | ami_type = "AL2_x86_64"
159 | desired_size = 0
160 | disk_size = 20
161 | max_size = 10
162 | min_size = 0
163 | zones = ["us-east-1a", "us-east-1b", "us-east-1c"]
164 | },
165 | {
166 | name = "system"
167 | instance_type = "t3.large"
168 | ami_type = "AL2_x86_64"
169 | disk_size = 20
170 | desired_size = 1
171 | max_size = 10
172 | min_size = 0
173 | zones = ["us-east-1a"]
174 | }
175 | ]
176 |
177 | validation {
178 | condition = alltrue([
179 | for np in var.eks_node_pools :
180 | startswith(np.name, "clickhouse") || startswith(np.name, "system")
181 | ])
182 | error_message = "Each node pool name must start with either 'clickhouse' or 'system' prefix."
183 | }
184 | }
185 |
186 | variable "eks_enable_nat_gateway" {
187 | description = "Enable NAT Gateway and private subnets (recommeded)"
188 | type = bool
189 | default = true
190 | }
191 |
192 | variable "eks_private_cidr" {
193 | description = "List of private CIDR. When set, the number of private CIDRs must match the number of availability zones"
194 | type = list(string)
195 | default = [
196 | "10.0.1.0/24",
197 | "10.0.2.0/24",
198 | "10.0.3.0/24"
199 | ]
200 | }
201 |
202 | variable "eks_public_cidr" {
203 | description = "List of public CIDR. When set, The number of public CIDRs must match the number of availability zones"
204 | type = list(string)
205 | default = [
206 | "10.0.101.0/24",
207 | "10.0.102.0/24",
208 | "10.0.103.0/24"
209 | ]
210 | }
211 |
212 | variable "eks_availability_zones" {
213 | description = ""
214 | type = list(string)
215 | default = [
216 | "us-east-1a",
217 | "us-east-1b",
218 | "us-east-1c"
219 | ]
220 | }
221 |
222 | variable "eks_public_access_cidrs" {
223 | description = "List of CIDRs for public access, use this variable to restrict access to the EKS control plane."
224 | type = list(string)
225 | default = ["0.0.0.0/0"]
226 | }
227 |
--------------------------------------------------------------------------------
/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.57"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.25.2"
12 | }
13 | helm = {
14 | source = "hashicorp/helm"
15 | version = ">= 2.12.1"
16 | }
17 | }
18 | }
19 |
--------------------------------------------------------------------------------