6 | ## Requirements
7 |
8 | | Name | Version |
9 | |------|---------|
10 | | [terraform](#requirement\_terraform) | >= 1.1.0 |
11 | | [aws](#requirement\_aws) | >= 4.0.0 |
12 | | [helm](#requirement\_helm) | >= 2.4.1 |
13 | | [kubectl](#requirement\_kubectl) | >= 2.0.3 |
14 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 |
15 |
16 | ## Providers
17 |
18 | | Name | Version |
19 | |------|---------|
20 | | [aws](#provider\_aws) | >= 4.0.0 |
21 |
22 | ## Modules
23 |
24 | | Name | Source | Version |
25 | |------|--------|---------|
26 | | [eks\_blueprints\_kubernetes\_addons](#module\_eks\_blueprints\_kubernetes\_addons) | github.com/aws-ia/terraform-aws-eks-blueprints//modules/kubernetes-addons | v4.32.1 |
27 | | [eks\_monitoring](#module\_eks\_monitoring) | ../../modules/eks-monitoring | n/a |
28 |
29 | ## Resources
30 |
31 | | Name | Type |
32 | |------|------|
33 | | [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
34 | | [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
35 | | [aws_grafana_workspace.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/grafana_workspace) | data source |
36 |
37 | ## Inputs
38 |
39 | | Name | Description | Type | Default | Required |
40 | |------|-------------|------|---------|:--------:|
41 | | [aws\_region](#input\_aws\_region) | AWS Region | `string` | n/a | yes |
42 | | [eks\_cluster\_id](#input\_eks\_cluster\_id) | Name of the EKS cluster | `string` | `"eks-cluster-with-vpc"` | no |
43 | | [enable\_dashboards](#input\_enable\_dashboards) | Enables or disables curated dashboards. Dashboards are managed by the Grafana Operator | `bool` | `true` | no |
44 | | [grafana\_api\_key](#input\_grafana\_api\_key) | API key for authorizing the Grafana provider to make changes to Amazon Managed Grafana | `string` | n/a | yes |
45 | | [managed\_grafana\_workspace\_id](#input\_managed\_grafana\_workspace\_id) | Amazon Managed Grafana Workspace ID | `string` | n/a | yes |
46 | | [managed\_prometheus\_workspace\_id](#input\_managed\_prometheus\_workspace\_id) | Amazon Managed Service for Prometheus Workspace ID | `string` | `""` | no |
47 |
48 | ## Outputs
49 |
50 | | Name | Description |
51 | |------|-------------|
52 | | [eks\_cluster\_id](#output\_eks\_cluster\_id) | EKS Cluster Id |
53 | | [eks\_cluster\_version](#output\_eks\_cluster\_version) | EKS Cluster version |
54 | | [managed\_prometheus\_workspace\_endpoint](#output\_managed\_prometheus\_workspace\_endpoint) | Amazon Managed Prometheus workspace endpoint |
55 | | [managed\_prometheus\_workspace\_id](#output\_managed\_prometheus\_workspace\_id) | Amazon Managed Prometheus workspace ID |
56 | | [managed\_prometheus\_workspace\_region](#output\_managed\_prometheus\_workspace\_region) | AWS Region |
57 |
58 |
--------------------------------------------------------------------------------
/examples/eks-istio/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = local.region
3 | }
4 |
5 | data "aws_eks_cluster_auth" "this" {
6 | name = var.eks_cluster_id
7 | }
8 |
9 | data "aws_eks_cluster" "this" {
10 | name = var.eks_cluster_id
11 | }
12 |
13 | data "aws_grafana_workspace" "this" {
14 | workspace_id = var.managed_grafana_workspace_id
15 | }
16 |
17 | provider "kubernetes" {
18 | host = local.eks_cluster_endpoint
19 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data)
20 | token = data.aws_eks_cluster_auth.this.token
21 | }
22 |
23 | provider "helm" {
24 | kubernetes {
25 | host = local.eks_cluster_endpoint
26 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data)
27 | token = data.aws_eks_cluster_auth.this.token
28 | }
29 | }
30 |
31 | locals {
32 | region = var.aws_region
33 | eks_cluster_endpoint = data.aws_eks_cluster.this.endpoint
34 | create_new_workspace = var.managed_prometheus_workspace_id == "" ? true : false
35 | tags = {
36 | Source = "github.com/aws-observability/terraform-aws-observability-accelerator"
37 | }
38 | }
39 |
40 | module "eks_blueprints_kubernetes_addons" {
41 | source = "github.com/aws-ia/terraform-aws-eks-blueprints//modules/kubernetes-addons?ref=v4.32.1"
42 |
43 | eks_cluster_id = var.eks_cluster_id
44 | #eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
45 | #eks_oidc_provider = module.eks_blueprints.oidc_provider
46 | #eks_cluster_version = module.eks_blueprints.eks_cluster_version
47 |
48 | # EKS Managed Add-ons
49 | #enable_amazon_eks_vpc_cni = true
50 | #enable_amazon_eks_coredns = true
51 | #enable_amazon_eks_kube_proxy = true
52 |
53 | # Add-ons
54 | enable_metrics_server = true
55 | enable_cluster_autoscaler = true
56 |
57 | # Tetrate Istio Add-on
58 | enable_tetrate_istio = true
59 |
60 | tags = local.tags
61 | }
62 |
63 | module "eks_monitoring" {
64 | source = "../../modules/eks-monitoring"
65 | # source = "github.com/aws-observability/terraform-aws-observability-accelerator//modules/eks-monitoring?ref=v2.0.0"
66 | enable_istio = true
67 | eks_cluster_id = var.eks_cluster_id
68 |
69 | # deploys AWS Distro for OpenTelemetry operator into the cluster
70 | enable_amazon_eks_adot = true
71 |
72 | # reusing existing certificate manager? defaults to true
73 | enable_cert_manager = true
74 |
75 | # deploys external-secrets in to the cluster
76 | enable_external_secrets = true
77 | grafana_api_key = var.grafana_api_key
78 | target_secret_name = "grafana-admin-credentials"
79 | target_secret_namespace = "grafana-operator"
80 | grafana_url = "https://${data.aws_grafana_workspace.this.endpoint}"
81 |
82 | # control the publishing of dashboards by specifying the boolean value for the variable 'enable_dashboards', default is 'true'
83 | enable_dashboards = var.enable_dashboards
84 |
85 | enable_managed_prometheus = local.create_new_workspace
86 | managed_prometheus_workspace_id = var.managed_prometheus_workspace_id
87 |
88 | # optional, defaults to 60s interval and 15s timeout
89 | prometheus_config = {
90 | global_scrape_interval = "60s"
91 | global_scrape_timeout = "15s"
92 | }
93 |
94 | enable_logs = true
95 |
96 | tags = local.tags
97 | }
98 |
--------------------------------------------------------------------------------
/examples/eks-istio/outputs.tf:
--------------------------------------------------------------------------------
1 | output "managed_prometheus_workspace_region" {
2 | description = "AWS Region"
3 | value = module.eks_monitoring.managed_prometheus_workspace_region
4 | }
5 |
6 | output "managed_prometheus_workspace_endpoint" {
7 | description = "Amazon Managed Prometheus workspace endpoint"
8 | value = module.eks_monitoring.managed_prometheus_workspace_endpoint
9 | }
10 |
11 | output "managed_prometheus_workspace_id" {
12 | description = "Amazon Managed Prometheus workspace ID"
13 | value = module.eks_monitoring.managed_prometheus_workspace_id
14 | }
15 |
16 | output "eks_cluster_version" {
17 | description = "EKS Cluster version"
18 | value = module.eks_monitoring.eks_cluster_version
19 | }
20 |
21 | output "eks_cluster_id" {
22 | description = "EKS Cluster Id"
23 | value = module.eks_monitoring.eks_cluster_id
24 | }
25 |
--------------------------------------------------------------------------------
/examples/eks-istio/variables.tf:
--------------------------------------------------------------------------------
1 | variable "eks_cluster_id" {
2 | description = "Name of the EKS cluster"
3 | type = string
4 | default = "eks-cluster-with-vpc"
5 | }
6 |
7 | variable "aws_region" {
8 | description = "AWS Region"
9 | type = string
10 | }
11 |
12 | variable "managed_prometheus_workspace_id" {
13 | description = "Amazon Managed Service for Prometheus Workspace ID"
14 | type = string
15 | default = ""
16 | }
17 |
18 | variable "managed_grafana_workspace_id" {
19 | description = "Amazon Managed Grafana Workspace ID"
20 | type = string
21 | }
22 |
23 | variable "grafana_api_key" {
24 | description = "API key for authorizing the Grafana provider to make changes to Amazon Managed Grafana"
25 | type = string
26 | sensitive = true
27 | }
28 |
29 | variable "enable_dashboards" {
30 | description = "Enables or disables curated dashboards. Dashboards are managed by the Grafana Operator"
31 | type = bool
32 | default = true
33 | }
34 |
--------------------------------------------------------------------------------
/examples/eks-istio/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.0.0"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.10"
12 | }
13 | kubectl = {
14 | source = "alekc/kubectl"
15 | version = ">= 2.0.3"
16 | }
17 | helm = {
18 | source = "hashicorp/helm"
19 | version = ">= 2.4.1"
20 | }
21 | }
22 |
23 | # ## Used for end-to-end testing on project; update to suit your needs
24 | # backend "s3" {
25 | # bucket = "aws-observability-accelerator-terraform-states"
26 | # region = "us-west-2"
27 | # key = "e2e/eks-istio/terraform.tfstate"
28 | # }
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/examples/eks-multicluster/README.md:
--------------------------------------------------------------------------------
1 | # AWS EKS Multicluster Observability
2 |
3 | This example shows how to use the [AWS Observability Accelerator](https://github.com/aws-observability/terraform-aws-observability-accelerator), with more than one EKS cluster and verify the collected metrics from all the clusters in the dashboards of a common `Amazon Managed Grafana` workspace.
4 |
5 | ## Prerequisites
6 |
7 | #### 1. EKS clusters
8 |
9 | Using the example [eks-cluster-with-vpc](../../examples/eks-cluster-with-vpc/), create two EKS clusters with the names:
10 | 1. `eks-cluster-1`
11 | 2. `eks-cluster-2`
12 |
13 | #### 2. Amazon Managed Serivce for Prometheus (AMP) workspace
14 |
15 | We recommend that you create a new AMP workspace. To do that you can run the following command.
16 |
17 | Ensure you have the following necessary IAM permissions
18 | * `aps.CreateWorkspace`
19 |
20 | ```sh
21 | export TF_VAR_managed_prometheus_workspace_id=$(aws amp create-workspace --alias observability-accelerator --query='workspaceId' --output text)
22 | ```
23 |
24 | #### 3. Amazon Managed Grafana (AMG) workspace
25 |
26 | To run this example you need an AMG workspace. If you have
27 | an existing workspace, create an environment variable as described below.
28 | To create a new workspace, visit our supporting example for managed Grafana.
29 |
30 | !!! note
31 | For the URL `https://g-xyz.grafana-workspace.eu-central-1.amazonaws.com`, the workspace ID would be `g-xyz`
32 |
33 | ```sh
34 | export TF_VAR_managed_grafana_workspace_id=g-xxx
35 | ```
36 |
37 | #### 4. Grafana API Key
38 |
39 | AMG provides a control plane API for generating Grafana API keys.
40 | As a security best practice, we will provide to Terraform a short lived API key to
41 | run the `apply` or `destroy` command.
42 |
43 | Ensure you have the following necessary IAM permissions
44 | * `grafana.CreateWorkspaceApiKey`
45 | * `grafana.DeleteWorkspaceApiKey`
46 |
47 | ```sh
48 | export TF_VAR_grafana_api_key=`aws grafana create-workspace-api-key --key-name "observability-accelerator-$(date +%s)" --key-role ADMIN --seconds-to-live 1200 --workspace-id $TF_VAR_managed_grafana_workspace_id --query key --output text`
49 | ```
50 |
51 | ## Setup
52 |
53 | #### 1. Download sources and initialize Terraform
54 |
55 | ```sh
56 | git clone https://github.com/aws-observability/terraform-aws-observability-accelerator.git
57 | cd terraform-aws-observability-accelerator/examples/eks-multicluster
58 | terraform init
59 | ```
60 |
61 | #### 2. Deploy
62 |
63 | Verify by looking at the file `variables.tf` that there are two EKS clusters targeted for deployment by the names/ids:
64 | 1. `eks-cluster-1`
65 | 2. `eks-cluster-2`
66 |
67 | The difference in deployment between these clusters is that Terraform, when setting up the EKS cluster behind variable `eks_cluster_1_id` for observability, also sets up:
68 | * Dashboard folder and files in `AMG`
69 | * Prometheus and Java, alerting and recording rules in `AMP`
70 |
71 | !!! warning
72 | To override the defaults, create a `terraform.tfvars` and change the default values of the variables.
73 |
74 | Run the following command to deploy
75 |
76 | ```sh
77 | terraform apply --auto-approve
78 | ```
79 |
80 | ## Verifying Multicluster Observability
81 |
82 | One you have successfully run the above setup, you should be able to see dashboards similar to the images shown below in `Amazon Managed Grafana` workspace.
83 |
84 | Note how you are able to use the `cluster` dropdown to filter the dashboards to metrics collected from a specific EKS cluster.
85 |
86 |
87 |
88 |
89 |
90 | ## Cleanup
91 |
92 | To clean up entirely, run the following command:
93 |
94 | ```sh
95 | terraform destroy --auto-approve
96 | ```
97 |
--------------------------------------------------------------------------------
/examples/eks-multicluster/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_eks_cluster_auth" "eks_cluster_1" {
2 | name = var.eks_cluster_1_id
3 | provider = aws.eks_cluster_1
4 | }
5 |
6 | data "aws_eks_cluster_auth" "eks_cluster_2" {
7 | name = var.eks_cluster_2_id
8 | provider = aws.eks_cluster_2
9 | }
10 |
11 | data "aws_eks_cluster" "eks_cluster_1" {
12 | name = var.eks_cluster_1_id
13 | provider = aws.eks_cluster_1
14 | }
15 |
16 | data "aws_eks_cluster" "eks_cluster_2" {
17 | name = var.eks_cluster_2_id
18 | provider = aws.eks_cluster_2
19 | }
20 |
21 | data "aws_grafana_workspace" "this" {
22 | workspace_id = var.managed_grafana_workspace_id
23 | provider = aws.eks_cluster_1
24 | }
25 |
26 | data "aws_prometheus_workspace" "this" {
27 | workspace_id = local.managed_prometheus_workspace_id
28 | provider = aws.eks_cluster_1
29 | }
30 |
--------------------------------------------------------------------------------
/examples/eks-multicluster/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | create_new_workspace = var.managed_prometheus_workspace_id == "" ? true : false
3 | managed_prometheus_workspace_id = local.create_new_workspace ? module.managed_service_prometheus[0].workspace_id : var.managed_prometheus_workspace_id
4 | }
5 | module "eks_cluster_1_monitoring" {
6 | source = "../..//modules/eks-monitoring"
7 | eks_cluster_id = var.eks_cluster_1_id
8 | enable_amazon_eks_adot = true
9 | enable_cert_manager = true
10 | enable_java = true
11 |
12 | # This configuration section results in actions performed on AMG and AMP; and it needs to be done just once
13 | # And hence, this in performed in conjunction with the setup of the eks_cluster_1 EKS cluster
14 | enable_dashboards = true
15 | enable_external_secrets = true
16 | enable_fluxcd = true
17 | enable_alerting_rules = true
18 | enable_recording_rules = true
19 |
20 | # Additional dashboards
21 | enable_apiserver_monitoring = true
22 | enable_adotcollector_metrics = true
23 |
24 | grafana_api_key = var.grafana_api_key
25 | grafana_url = "https://${data.aws_grafana_workspace.this.endpoint}"
26 |
27 | # prevents the module to create a workspace
28 | enable_managed_prometheus = false
29 |
30 | managed_prometheus_workspace_id = local.managed_prometheus_workspace_id
31 | managed_prometheus_workspace_endpoint = data.aws_prometheus_workspace.this.prometheus_endpoint
32 | managed_prometheus_workspace_region = var.eks_cluster_1_region
33 |
34 | prometheus_config = {
35 | global_scrape_interval = "60s"
36 | global_scrape_timeout = "15s"
37 | scrape_sample_limit = 2000
38 | }
39 |
40 | providers = {
41 | aws = aws.eks_cluster_1
42 | kubernetes = kubernetes.eks_cluster_1
43 | helm = helm.eks_cluster_1
44 | }
45 | }
46 |
47 | module "eks_cluster_2_monitoring" {
48 | source = "../..//modules/eks-monitoring"
49 | eks_cluster_id = var.eks_cluster_2_id
50 | enable_amazon_eks_adot = true
51 | enable_cert_manager = true
52 | enable_java = true
53 |
54 | # Since the following were enabled in conjunction with the set up of the
55 | # eks_cluster_1 EKS cluster, we will skip them with the eks_cluster_2 EKS cluster
56 | enable_dashboards = false
57 | enable_external_secrets = false
58 | enable_fluxcd = false
59 | enable_alerting_rules = false
60 | enable_recording_rules = false
61 |
62 | # Disable additional dashboards
63 | enable_apiserver_monitoring = false
64 | enable_adotcollector_metrics = false
65 |
66 | # prevents the module to create a workspace
67 | enable_managed_prometheus = false
68 |
69 | managed_prometheus_workspace_id = var.managed_prometheus_workspace_id
70 | managed_prometheus_workspace_endpoint = data.aws_prometheus_workspace.this.prometheus_endpoint
71 | managed_prometheus_workspace_region = var.eks_cluster_1_region
72 |
73 | prometheus_config = {
74 | global_scrape_interval = "60s"
75 | global_scrape_timeout = "15s"
76 | scrape_sample_limit = 2000
77 | }
78 |
79 | providers = {
80 | aws = aws.eks_cluster_2
81 | kubernetes = kubernetes.eks_cluster_2
82 | helm = helm.eks_cluster_2
83 | }
84 | }
85 |
86 | module "managed_service_prometheus" {
87 | count = local.create_new_workspace ? 1 : 0
88 | source = "terraform-aws-modules/managed-service-prometheus/aws"
89 | version = "~> 2.2.2"
90 | providers = {
91 | aws = aws.eks_cluster_1
92 | }
93 |
94 | workspace_alias = "aws-observability-accelerator-multicluster"
95 | }
96 |
--------------------------------------------------------------------------------
/examples/eks-multicluster/outputs.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-observability/terraform-aws-observability-accelerator/c432af44ee1df1b4ccd654e401922b99cea2ada5/examples/eks-multicluster/outputs.tf
--------------------------------------------------------------------------------
/examples/eks-multicluster/providers.tf:
--------------------------------------------------------------------------------
1 | provider "kubernetes" {
2 | host = data.aws_eks_cluster.eks_cluster_1.endpoint
3 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster_1.certificate_authority[0].data)
4 | token = data.aws_eks_cluster_auth.eks_cluster_1.token
5 | alias = "eks_cluster_1"
6 | }
7 |
8 | provider "kubernetes" {
9 | host = data.aws_eks_cluster.eks_cluster_2.endpoint
10 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster_2.certificate_authority[0].data)
11 | token = data.aws_eks_cluster_auth.eks_cluster_2.token
12 | alias = "eks_cluster_2"
13 | }
14 |
15 | provider "helm" {
16 | kubernetes {
17 | host = data.aws_eks_cluster.eks_cluster_1.endpoint
18 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster_1.certificate_authority[0].data)
19 | token = data.aws_eks_cluster_auth.eks_cluster_1.token
20 | }
21 | alias = "eks_cluster_1"
22 | }
23 |
24 | provider "helm" {
25 | kubernetes {
26 | host = data.aws_eks_cluster.eks_cluster_2.endpoint
27 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster_2.certificate_authority[0].data)
28 | token = data.aws_eks_cluster_auth.eks_cluster_2.token
29 | }
30 | alias = "eks_cluster_2"
31 | }
32 |
33 | provider "aws" {
34 | region = var.eks_cluster_1_region
35 | alias = "eks_cluster_1"
36 | }
37 |
38 | provider "aws" {
39 | region = var.eks_cluster_2_region
40 | alias = "eks_cluster_2"
41 | }
42 |
--------------------------------------------------------------------------------
/examples/eks-multicluster/variables.tf:
--------------------------------------------------------------------------------
1 | variable "eks_cluster_1_id" {
2 | description = "Name or ID of the EKS cluster 1"
3 | type = string
4 | default = "eks-cluster-1"
5 | nullable = false
6 | }
7 |
8 | variable "eks_cluster_1_region" {
9 | description = "AWS region of the EKS cluster 1"
10 | type = string
11 | default = "us-west-2"
12 | nullable = false
13 | }
14 |
15 | variable "eks_cluster_2_id" {
16 | description = "Name or ID of the EKS cluster 2"
17 | type = string
18 | default = "eks-cluster-2"
19 | nullable = true
20 | }
21 |
22 | variable "eks_cluster_2_region" {
23 | description = "AWS region of the EKS cluster 2"
24 | type = string
25 | default = "us-west-2"
26 | nullable = true
27 | }
28 |
29 | variable "managed_prometheus_workspace_id" {
30 | description = "Amazon Managed Service for Prometheus Workspace ID"
31 | type = string
32 | default = ""
33 | }
34 |
35 | variable "managed_grafana_workspace_id" {
36 | description = "Amazon Managed Grafana Workspace ID"
37 | type = string
38 | default = ""
39 | }
40 |
41 | variable "grafana_api_key" {
42 | description = "API key for external-secrets to create secrets for grafana-operator"
43 | type = string
44 | default = ""
45 | sensitive = true
46 | }
47 |
--------------------------------------------------------------------------------
/examples/eks-multicluster/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.3.9"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.55.0"
8 | configuration_aliases = [aws.eks_cluster_1, aws.eks_cluster_2]
9 | }
10 | kubernetes = {
11 | source = "hashicorp/kubernetes"
12 | version = ">= 2.18.0"
13 | configuration_aliases = [kubernetes.eks_cluster_1, kubernetes.eks_cluster_2]
14 | }
15 | helm = {
16 | source = "hashicorp/helm"
17 | version = ">= 2.9.0"
18 | configuration_aliases = [helm.eks_cluster_1, helm.eks_cluster_2]
19 | }
20 | kubectl = {
21 | source = "alekc/kubectl"
22 | version = ">= 2.0.3"
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/examples/existing-cluster-java/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = local.region
3 | }
4 |
5 | data "aws_eks_cluster_auth" "this" {
6 | name = var.eks_cluster_id
7 | }
8 |
9 | data "aws_eks_cluster" "this" {
10 | name = var.eks_cluster_id
11 | }
12 |
13 | data "aws_grafana_workspace" "this" {
14 | workspace_id = var.managed_grafana_workspace_id
15 | }
16 |
17 | provider "kubernetes" {
18 | host = local.eks_cluster_endpoint
19 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data)
20 | token = data.aws_eks_cluster_auth.this.token
21 | }
22 |
23 | provider "helm" {
24 | kubernetes {
25 | host = local.eks_cluster_endpoint
26 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data)
27 | token = data.aws_eks_cluster_auth.this.token
28 | }
29 | }
30 |
31 | locals {
32 | region = var.aws_region
33 | eks_cluster_endpoint = data.aws_eks_cluster.this.endpoint
34 | create_new_workspace = var.managed_prometheus_workspace_id == "" ? true : false
35 | tags = {
36 | Source = "github.com/aws-observability/terraform-aws-observability-accelerator"
37 | }
38 | }
39 |
40 | module "eks_monitoring" {
41 | source = "../../modules/eks-monitoring"
42 | # source = "github.com/aws-observability/terraform-aws-observability-accelerator//modules/eks-monitoring?ref=v2.0.0"
43 |
44 | # enable java metrics collection, dashboards and alerts rules creation
45 | enable_java = true
46 |
47 | # deploys external-secrets in to the cluster
48 | enable_external_secrets = true
49 | grafana_api_key = var.grafana_api_key
50 | target_secret_name = "grafana-admin-credentials"
51 | target_secret_namespace = "grafana-operator"
52 | grafana_url = "https://${data.aws_grafana_workspace.this.endpoint}"
53 |
54 | eks_cluster_id = var.eks_cluster_id
55 |
56 | # control the publishing of dashboards by specifying the boolean value for the variable 'enable_dashboards', default is 'true'
57 | enable_dashboards = var.enable_dashboards
58 |
59 | enable_managed_prometheus = local.create_new_workspace
60 | managed_prometheus_workspace_id = var.managed_prometheus_workspace_id
61 |
62 | # optional, defaults to 60s interval and 15s timeout
63 | prometheus_config = {
64 | global_scrape_interval = "60s"
65 | global_scrape_timeout = "15s"
66 | scrape_sample_limit = 2000
67 | }
68 |
69 | enable_logs = true
70 |
71 | tags = local.tags
72 | }
73 |
--------------------------------------------------------------------------------
/examples/existing-cluster-java/outputs.tf:
--------------------------------------------------------------------------------
1 | output "managed_prometheus_workspace_region" {
2 | description = "AWS Region"
3 | value = module.eks_monitoring.managed_prometheus_workspace_region
4 | }
5 |
6 | output "managed_prometheus_workspace_endpoint" {
7 | description = "Amazon Managed Prometheus workspace endpoint"
8 | value = module.eks_monitoring.managed_prometheus_workspace_endpoint
9 | }
10 |
11 | output "managed_prometheus_workspace_id" {
12 | description = "Amazon Managed Prometheus workspace ID"
13 | value = module.eks_monitoring.managed_prometheus_workspace_id
14 | }
15 |
16 | output "eks_cluster_version" {
17 | description = "EKS Cluster version"
18 | value = module.eks_monitoring.eks_cluster_version
19 | }
20 |
21 | output "eks_cluster_id" {
22 | description = "EKS Cluster Id"
23 | value = module.eks_monitoring.eks_cluster_id
24 | }
25 |
--------------------------------------------------------------------------------
/examples/existing-cluster-java/variables.tf:
--------------------------------------------------------------------------------
1 | variable "eks_cluster_id" {
2 | description = "Name of the EKS cluster"
3 | type = string
4 | }
5 |
6 | variable "aws_region" {
7 | description = "AWS Region"
8 | type = string
9 | }
10 |
11 | variable "managed_prometheus_workspace_id" {
12 | description = "Amazon Managed Service for Prometheus Workspace ID"
13 | type = string
14 | default = ""
15 | }
16 |
17 | variable "managed_grafana_workspace_id" {
18 | description = "Amazon Managed Grafana Workspace ID"
19 | type = string
20 | }
21 |
22 | variable "grafana_api_key" {
23 | description = "API key for external-secrets to create secrets for grafana-operator"
24 | type = string
25 | sensitive = true
26 | }
27 |
28 | variable "enable_dashboards" {
29 | description = "Enables or disables curated dashboards"
30 | type = bool
31 | default = true
32 | }
33 |
--------------------------------------------------------------------------------
/examples/existing-cluster-java/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.0.0"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.10"
12 | }
13 | kubectl = {
14 | source = "alekc/kubectl"
15 | version = ">= 2.0.3"
16 | }
17 | helm = {
18 | source = "hashicorp/helm"
19 | version = ">= 2.4.1"
20 | }
21 | }
22 |
23 | # ## Used for end-to-end testing on project; update to suit your needs
24 | # backend "s3" {
25 | # bucket = "aws-observability-accelerator-terraform-states"
26 | # region = "us-west-2"
27 | # key = "e2e/existing-cluster-java/terraform.tfstate"
28 | # }
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/examples/existing-cluster-nginx/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = local.region
3 | }
4 |
5 | data "aws_eks_cluster_auth" "this" {
6 | name = var.eks_cluster_id
7 | }
8 |
9 | data "aws_eks_cluster" "this" {
10 | name = var.eks_cluster_id
11 | }
12 |
13 | data "aws_grafana_workspace" "this" {
14 | workspace_id = var.managed_grafana_workspace_id
15 | }
16 |
17 | provider "kubernetes" {
18 | host = local.eks_cluster_endpoint
19 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data)
20 | token = data.aws_eks_cluster_auth.this.token
21 | }
22 |
23 | provider "helm" {
24 | kubernetes {
25 | host = local.eks_cluster_endpoint
26 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data)
27 | token = data.aws_eks_cluster_auth.this.token
28 | }
29 | }
30 |
31 | locals {
32 | region = var.aws_region
33 | eks_cluster_endpoint = data.aws_eks_cluster.this.endpoint
34 | create_new_workspace = var.managed_prometheus_workspace_id == "" ? true : false
35 |
36 | tags = {
37 | Source = "github.com/aws-observability/terraform-aws-observability-accelerator"
38 | }
39 | }
40 |
41 | module "eks_monitoring" {
42 | source = "../../modules/eks-monitoring"
43 | # source = "github.com/aws-observability/terraform-aws-observability-accelerator//modules/eks-monitoring?ref=v2.0.0"
44 |
45 | # enable NGINX metrics collection, dashboards and alerts rules creation
46 | enable_nginx = true
47 |
48 | eks_cluster_id = var.eks_cluster_id
49 |
50 | # deploys external-secrets in to the cluster
51 | enable_external_secrets = true
52 | grafana_api_key = var.grafana_api_key
53 | target_secret_name = "grafana-admin-credentials"
54 | target_secret_namespace = "grafana-operator"
55 | grafana_url = "https://${data.aws_grafana_workspace.this.endpoint}"
56 |
57 | # control the publishing of dashboards by specifying the boolean value for the variable 'enable_dashboards', default is 'true'
58 | enable_dashboards = var.enable_dashboards
59 |
60 | enable_managed_prometheus = local.create_new_workspace
61 | managed_prometheus_workspace_id = var.managed_prometheus_workspace_id
62 |
63 | enable_logs = true
64 |
65 | tags = local.tags
66 | }
67 |
--------------------------------------------------------------------------------
/examples/existing-cluster-nginx/outputs.tf:
--------------------------------------------------------------------------------
1 | output "managed_prometheus_workspace_region" {
2 | description = "AWS Region"
3 | value = module.eks_monitoring.managed_prometheus_workspace_region
4 | }
5 |
6 | output "managed_prometheus_workspace_endpoint" {
7 | description = "Amazon Managed Prometheus workspace endpoint"
8 | value = module.eks_monitoring.managed_prometheus_workspace_endpoint
9 | }
10 |
11 | output "managed_prometheus_workspace_id" {
12 | description = "Amazon Managed Prometheus workspace ID"
13 | value = module.eks_monitoring.managed_prometheus_workspace_id
14 | }
15 |
16 | output "eks_cluster_version" {
17 | description = "EKS Cluster version"
18 | value = module.eks_monitoring.eks_cluster_version
19 | }
20 |
21 | output "eks_cluster_id" {
22 | description = "EKS Cluster Id"
23 | value = module.eks_monitoring.eks_cluster_id
24 | }
25 |
--------------------------------------------------------------------------------
/examples/existing-cluster-nginx/sample_traffic/nginix-traffic-sample.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: {{namespace}}
5 | labels:
6 | name: {{namespace}}
7 |
8 | ---
9 |
10 | kind: Pod
11 | apiVersion: v1
12 | metadata:
13 | name: banana-app
14 | namespace: {{namespace}}
15 | labels:
16 | app: banana
17 | spec:
18 | containers:
19 | - name: banana-app
20 | image: hashicorp/http-echo
21 | args:
22 | - "-text=banana"
23 | resources:
24 | limits:
25 | cpu: 100m
26 | memory: 100Mi
27 | requests:
28 | cpu: 50m
29 | memory: 50Mi
30 | ---
31 |
32 | kind: Service
33 | apiVersion: v1
34 | metadata:
35 | name: banana-service
36 | namespace: {{namespace}}
37 | spec:
38 | selector:
39 | app: banana
40 | ports:
41 | - port: 5678 # Default port for image
42 |
43 | ---
44 |
45 | kind: Pod
46 | apiVersion: v1
47 | metadata:
48 | name: apple-app
49 | namespace: {{namespace}}
50 | labels:
51 | app: apple
52 | spec:
53 | containers:
54 | - name: apple-app
55 | image: hashicorp/http-echo
56 | args:
57 | - "-text=apple"
58 | resources:
59 | limits:
60 | cpu: 100m
61 | memory: 100Mi
62 | requests:
63 | cpu: 50m
64 | memory: 50Mi
65 | ---
66 |
67 | kind: Service
68 | apiVersion: v1
69 | metadata:
70 | name: apple-service
71 | namespace: {{namespace}}
72 | spec:
73 | selector:
74 | app: apple
75 | ports:
76 | - port: 5678 # Default port for image
77 |
78 | ---
79 |
80 | apiVersion: networking.k8s.io/v1
81 | kind: Ingress
82 | metadata:
83 | name: ingress-nginx-demo
84 | namespace: {{namespace}}
85 | spec:
86 | rules:
87 | - host: {{external_ip}}
88 | http:
89 | paths:
90 | - path: /apple
91 | pathType: Prefix
92 | backend:
93 | service:
94 | name: apple-service
95 | port:
96 | number: 5678
97 | - path: /banana
98 | pathType: Prefix
99 | backend:
100 | service:
101 | name: banana-service
102 | port:
103 | number: 5678
104 |
105 | ---
106 |
107 | apiVersion: v1
108 | kind: Pod
109 | metadata:
110 | name: traffic-generator
111 | namespace: {{namespace}}
112 | spec:
113 | containers:
114 | - name: traffic-generator
115 | image: ellerbrock/alpine-bash-curl-ssl
116 | command: ["/bin/bash"]
117 | args: ["-c", "while :; do curl http://{{external_ip}}/apple > /dev/null 2>&1; curl http://{{external_ip}}/banana > /dev/null 2>&1; sleep 1; done"]
118 | resources:
119 | limits:
120 | cpu: 100m
121 | memory: 100Mi
122 | requests:
123 | cpu: 50m
124 | memory: 50Mi
125 |
--------------------------------------------------------------------------------
/examples/existing-cluster-nginx/variables.tf:
--------------------------------------------------------------------------------
1 | variable "eks_cluster_id" {
2 | description = "EKS Cluster Id"
3 | type = string
4 | }
5 |
6 | variable "aws_region" {
7 | description = "AWS Region"
8 | type = string
9 | }
10 |
11 | variable "managed_prometheus_workspace_id" {
12 | description = "Amazon Managed Service for Prometheus (AMP) workspace ID"
13 | type = string
14 | default = ""
15 | }
16 |
17 | variable "managed_grafana_workspace_id" {
18 | description = "Amazon Managed Grafana (AMG) workspace ID"
19 | type = string
20 | }
21 |
22 | variable "grafana_api_key" {
23 | description = "API key for external-secrets to create secrets for grafana-operator"
24 | type = string
25 | sensitive = true
26 | }
27 |
28 | variable "enable_dashboards" {
29 | description = "Enables or disables curated dashboards"
30 | type = bool
31 | default = true
32 | }
33 |
--------------------------------------------------------------------------------
/examples/existing-cluster-nginx/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.0.0"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.10"
12 | }
13 | kubectl = {
14 | source = "alekc/kubectl"
15 | version = ">= 2.0.3"
16 | }
17 | helm = {
18 | source = "hashicorp/helm"
19 | version = ">= 2.4.1"
20 | }
21 | }
22 |
23 | # ## Used for end-to-end testing on project; update to suit your needs
24 | # backend "s3" {
25 | # bucket = "aws-observability-accelerator-terraform-states"
26 | # region = "us-west-2"
27 | # key = "e2e/existing-cluster-nginx/terraform.tfstate"
28 | # }
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/examples/existing-cluster-with-base-and-infra/README.md:
--------------------------------------------------------------------------------
1 | # Existing Cluster with the AWS Observability accelerator EKS Infrastructure monitoring
2 |
3 | This example demonstrates how to use the AWS Observability Accelerator Terraform
4 | modules with Infrastructure monitoring enabled.
5 | The current example deploys the [AWS Distro for OpenTelemetry Operator](https://docs.aws.amazon.com/eks/latest/userguide/opentelemetry.html)
6 | for Amazon EKS with its requirements and make use of an existing Amazon Managed Grafana workspace.
7 | It creates a new Amazon Managed Service for Prometheus workspace unless provided with an existing one to reuse.
8 |
9 | It uses the `EKS monitoring` [module](../../modules/eks-monitoring/)
10 | to provide an existing EKS cluster with an OpenTelemetry collector,
11 | curated Grafana dashboards, Prometheus alerting and recording rules with multiple
12 | configuration options on the cluster infrastructure.
13 |
14 | View the full documentation for this example [here](https://aws-observability.github.io/terraform-aws-observability-accelerator/eks/)
15 |
16 |
17 |
18 | ## Requirements
19 |
20 | | Name | Version |
21 | |------|---------|
22 | | [terraform](#requirement\_terraform) | >= 1.1.0 |
23 | | [aws](#requirement\_aws) | >= 4.0.0 |
24 | | [helm](#requirement\_helm) | >= 2.4.1 |
25 | | [kubectl](#requirement\_kubectl) | >= 2.0.3 |
26 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 |
27 |
28 | ## Providers
29 |
30 | | Name | Version |
31 | |------|---------|
32 | | [aws](#provider\_aws) | >= 4.0.0 |
33 |
34 | ## Modules
35 |
36 | | Name | Source | Version |
37 | |------|--------|---------|
38 | | [eks\_monitoring](#module\_eks\_monitoring) | ../../modules/eks-monitoring | n/a |
39 |
40 | ## Resources
41 |
42 | | Name | Type |
43 | |------|------|
44 | | [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
45 | | [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
46 | | [aws_grafana_workspace.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/grafana_workspace) | data source |
47 |
48 | ## Inputs
49 |
50 | | Name | Description | Type | Default | Required |
51 | |------|-------------|------|---------|:--------:|
52 | | [aws\_region](#input\_aws\_region) | AWS Region | `string` | n/a | yes |
53 | | [eks\_cluster\_id](#input\_eks\_cluster\_id) | Name of the EKS cluster | `string` | `"eks-cluster-with-vpc"` | no |
54 | | [enable\_dashboards](#input\_enable\_dashboards) | Enables or disables curated dashboards. Dashboards are managed by the Grafana Operator | `bool` | `true` | no |
55 | | [grafana\_api\_key](#input\_grafana\_api\_key) | API key for authorizing the Grafana provider to make changes to Amazon Managed Grafana | `string` | n/a | yes |
56 | | [managed\_grafana\_workspace\_id](#input\_managed\_grafana\_workspace\_id) | Amazon Managed Grafana Workspace ID | `string` | n/a | yes |
57 | | [managed\_prometheus\_workspace\_id](#input\_managed\_prometheus\_workspace\_id) | Amazon Managed Service for Prometheus Workspace ID | `string` | `""` | no |
58 |
59 | ## Outputs
60 |
61 | | Name | Description |
62 | |------|-------------|
63 | | [eks\_cluster\_id](#output\_eks\_cluster\_id) | EKS Cluster Id |
64 | | [eks\_cluster\_version](#output\_eks\_cluster\_version) | EKS Cluster version |
65 | | [managed\_prometheus\_workspace\_endpoint](#output\_managed\_prometheus\_workspace\_endpoint) | Amazon Managed Prometheus workspace endpoint |
66 | | [managed\_prometheus\_workspace\_id](#output\_managed\_prometheus\_workspace\_id) | Amazon Managed Prometheus workspace ID |
67 | | [managed\_prometheus\_workspace\_region](#output\_managed\_prometheus\_workspace\_region) | AWS Region |
68 |
69 |
--------------------------------------------------------------------------------
/examples/existing-cluster-with-base-and-infra/cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -o errexit
3 | set -o pipefail
4 |
5 | read -p "Enter the region: " region
6 | export AWS_DEFAULT_REGION=$region
7 |
8 | targets=(
9 | "module.eks_monitoring"
10 | )
11 |
12 | for target in "${targets[@]}"
13 | do
14 | terraform destroy -target="$target" -auto-approve
15 | destroy_output=$(terraform destroy -target="$target" -auto-approve 2>&1)
16 | if [[ $? -eq 0 && $destroy_output == *"Destroy complete!"* ]]; then
17 | echo "SUCCESS: Terraform destroy of $target completed successfully"
18 | else
19 | echo "FAILED: Terraform destroy of $target failed"
20 | exit 1
21 | fi
22 | done
23 |
24 | terraform destroy -auto-approve
25 | destroy_output=$(terraform destroy -auto-approve 2>&1)
26 | if [[ $? -eq 0 && $destroy_output == *"Destroy complete!"* ]]; then
27 | echo "SUCCESS: Terraform destroy of all targets completed successfully"
28 | else
29 | echo "FAILED: Terraform destroy of all targets failed"
30 | exit 1
31 | fi
32 |
--------------------------------------------------------------------------------
/examples/existing-cluster-with-base-and-infra/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Initializing ..."
4 | terraform init || echo "\"terraform init\" failed"
5 |
6 | # List of Terraform modules to apply in sequence
7 | targets=(
8 | "module.eks_monitoring"
9 | )
10 |
11 | # Apply modules in sequence
12 | for target in "${targets[@]}"
13 | do
14 | echo "Applying module $target..."
15 | apply_output=$(terraform apply -target="$target" -auto-approve 2>&1 | tee /dev/tty)
16 | if [[ ${PIPESTATUS[0]} -eq 0 && $apply_output == *"Apply complete"* ]]; then
17 | echo "SUCCESS: Terraform apply of $target completed successfully"
18 | else
19 | echo "FAILED: Terraform apply of $target failed"
20 | exit 1
21 | fi
22 | done
23 |
24 | # Final apply to catch any remaining resources
25 | echo "Applying remaining resources..."
26 | apply_output=$(terraform apply -auto-approve 2>&1 | tee /dev/tty)
27 | if [[ ${PIPESTATUS[0]} -eq 0 && $apply_output == *"Apply complete"* ]]; then
28 | echo "SUCCESS: Terraform apply of all modules completed successfully"
29 | else
30 | echo "FAILED: Terraform apply of all modules failed"
31 | exit 1
32 | fi
33 |
--------------------------------------------------------------------------------
/examples/existing-cluster-with-base-and-infra/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = local.region
3 | }
4 |
5 | data "aws_eks_cluster_auth" "this" {
6 | name = var.eks_cluster_id
7 | }
8 |
9 | data "aws_eks_cluster" "this" {
10 | name = var.eks_cluster_id
11 | }
12 |
13 | data "aws_grafana_workspace" "this" {
14 | workspace_id = var.managed_grafana_workspace_id
15 | }
16 |
17 | provider "kubernetes" {
18 | host = local.eks_cluster_endpoint
19 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data)
20 | token = data.aws_eks_cluster_auth.this.token
21 | }
22 |
23 | provider "helm" {
24 | kubernetes {
25 | host = local.eks_cluster_endpoint
26 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data)
27 | token = data.aws_eks_cluster_auth.this.token
28 | }
29 | }
30 |
31 | locals {
32 | region = var.aws_region
33 | eks_cluster_endpoint = data.aws_eks_cluster.this.endpoint
34 | create_new_workspace = var.managed_prometheus_workspace_id == "" ? true : false
35 | tags = {
36 | Source = "github.com/aws-observability/terraform-aws-observability-accelerator"
37 | }
38 | }
39 |
40 | module "eks_monitoring" {
41 | source = "../../modules/eks-monitoring"
42 | # source = "github.com/aws-observability/terraform-aws-observability-accelerator//modules/eks-monitoring?ref=v2.0.0"
43 |
44 | eks_cluster_id = var.eks_cluster_id
45 |
46 | # deploys AWS Distro for OpenTelemetry operator into the cluster
47 | enable_amazon_eks_adot = true
48 |
49 | # reusing existing certificate manager? defaults to true
50 | enable_cert_manager = true
51 |
52 | # enable EKS API server monitoring
53 | enable_apiserver_monitoring = true
54 |
55 | # deploys external-secrets in to the cluster
56 | enable_external_secrets = true
57 | grafana_api_key = var.grafana_api_key
58 | target_secret_name = "grafana-admin-credentials"
59 | target_secret_namespace = "grafana-operator"
60 | grafana_url = "https://${data.aws_grafana_workspace.this.endpoint}"
61 |
62 | # control the publishing of dashboards by specifying the boolean value for the variable 'enable_dashboards', default is 'true'
63 | enable_dashboards = var.enable_dashboards
64 |
65 | # creates a new Amazon Managed Prometheus workspace, defaults to true
66 | enable_managed_prometheus = local.create_new_workspace
67 | managed_prometheus_workspace_id = var.managed_prometheus_workspace_id
68 |
69 | # sets up the Amazon Managed Prometheus alert manager at the workspace level
70 | enable_alertmanager = true
71 |
72 | # optional, defaults to 60s interval and 15s timeout
73 | prometheus_config = {
74 | global_scrape_interval = "60s"
75 | global_scrape_timeout = "15s"
76 | }
77 |
78 | enable_logs = true
79 |
80 | tags = local.tags
81 | }
82 |
--------------------------------------------------------------------------------
/examples/existing-cluster-with-base-and-infra/outputs.tf:
--------------------------------------------------------------------------------
1 | output "managed_prometheus_workspace_region" {
2 | description = "AWS Region"
3 | value = module.eks_monitoring.managed_prometheus_workspace_region
4 | }
5 |
6 | output "managed_prometheus_workspace_endpoint" {
7 | description = "Amazon Managed Prometheus workspace endpoint"
8 | value = module.eks_monitoring.managed_prometheus_workspace_endpoint
9 | }
10 |
11 | output "managed_prometheus_workspace_id" {
12 | description = "Amazon Managed Prometheus workspace ID"
13 | value = module.eks_monitoring.managed_prometheus_workspace_id
14 | }
15 |
16 | output "eks_cluster_version" {
17 | description = "EKS Cluster version"
18 | value = module.eks_monitoring.eks_cluster_version
19 | }
20 |
21 | output "eks_cluster_id" {
22 | description = "EKS Cluster Id"
23 | value = module.eks_monitoring.eks_cluster_id
24 | }
25 |
--------------------------------------------------------------------------------
/examples/existing-cluster-with-base-and-infra/variables.tf:
--------------------------------------------------------------------------------
1 | variable "eks_cluster_id" {
2 | description = "Name of the EKS cluster"
3 | type = string
4 | default = "eks-cluster-with-vpc"
5 | }
6 |
7 | variable "aws_region" {
8 | description = "AWS Region"
9 | type = string
10 | }
11 |
12 | variable "managed_prometheus_workspace_id" {
13 | description = "Amazon Managed Service for Prometheus Workspace ID"
14 | type = string
15 | default = ""
16 | }
17 |
18 | variable "managed_grafana_workspace_id" {
19 | description = "Amazon Managed Grafana Workspace ID"
20 | type = string
21 | }
22 |
23 | variable "grafana_api_key" {
24 | description = "API key for authorizing the Grafana provider to make changes to Amazon Managed Grafana"
25 | type = string
26 | sensitive = true
27 | }
28 |
29 | variable "enable_dashboards" {
30 | description = "Enables or disables curated dashboards. Dashboards are managed by the Grafana Operator"
31 | type = bool
32 | default = true
33 | }
34 |
--------------------------------------------------------------------------------
/examples/existing-cluster-with-base-and-infra/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.0.0"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.10"
12 | }
13 | kubectl = {
14 | source = "alekc/kubectl"
15 | version = ">= 2.0.3"
16 | }
17 | helm = {
18 | source = "hashicorp/helm"
19 | version = ">= 2.4.1"
20 | }
21 | }
22 |
23 | # ## Used for end-to-end testing on project; update to suit your needs
24 | # backend "s3" {
25 | # bucket = "aws-observability-accelerator-terraform-states"
26 | # region = "us-west-2"
27 | # key = "e2e/existing-cluster-with-base-and-infra/terraform.tfstate"
28 | # }
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/examples/managed-grafana-workspace/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = var.aws_region
3 | }
4 |
5 | locals {
6 | name = "aws-observability-accelerator"
7 | description = "Amazon Managed Grafana workspace for ${local.name}"
8 |
9 | tags = {
10 | GithubRepo = "terraform-aws-observability-accelerator"
11 | GithubOrg = "aws-observability"
12 | }
13 | }
14 |
15 | module "managed_grafana" {
16 | source = "terraform-aws-modules/managed-service-grafana/aws"
17 | version = "1.10.0"
18 |
19 | name = local.name
20 | associate_license = false
21 | description = local.description
22 | account_access_type = "CURRENT_ACCOUNT"
23 | authentication_providers = ["AWS_SSO"]
24 | permission_type = "SERVICE_MANAGED"
25 | data_sources = ["CLOUDWATCH", "PROMETHEUS", "XRAY"]
26 | notification_destinations = ["SNS"]
27 | stack_set_name = local.name
28 |
29 | configuration = jsonencode({
30 | unifiedAlerting = {
31 | enabled = true
32 | }
33 | })
34 |
35 | grafana_version = "9.4"
36 |
37 |
38 | # Workspace IAM role
39 | create_iam_role = true
40 | iam_role_name = local.name
41 | use_iam_role_name_prefix = true
42 | iam_role_description = local.description
43 | iam_role_path = "/grafana/"
44 | iam_role_force_detach_policies = true
45 | iam_role_max_session_duration = 7200
46 | iam_role_tags = local.tags
47 |
48 | tags = local.tags
49 | }
50 |
--------------------------------------------------------------------------------
/examples/managed-grafana-workspace/outputs.tf:
--------------------------------------------------------------------------------
1 | output "grafana_workspace_endpoint" {
2 | description = "Amazon Managed Grafana Workspace endpoint"
3 | value = "https://${module.managed_grafana.workspace_endpoint}"
4 | }
5 |
6 | output "grafana_workspace_id" {
7 | description = "Amazon Managed Grafana Workspace ID"
8 | value = module.managed_grafana.workspace_id
9 | }
10 |
11 | output "grafana_workspace_iam_role_arn" {
12 | description = "Amazon Managed Grafana Workspace's IAM Role ARN"
13 | value = module.managed_grafana.workspace_iam_role_arn
14 | }
15 |
--------------------------------------------------------------------------------
/examples/managed-grafana-workspace/readme.md:
--------------------------------------------------------------------------------
1 | # Amazon Managed Grafana Workspace Setup
2 |
3 | This example creates an Amazon Managed Grafana Workspace with
4 | Amazon CloudWatch, AWS X-Ray and Amazon Managed Service for Prometheus
5 | datasources
6 |
7 | The authentication method chosen for this example is with IAM Identity
8 | Center (former SSO). You can extend this example to add SAML.
9 |
10 | Step-by-step instructions available on our [docs site](https://aws-observability.github.io/terraform-aws-observability-accelerator/)
11 | under **Supporting Examples**
12 |
13 |
14 | ## Requirements
15 |
16 | | Name | Version |
17 | |------|---------|
18 | | [terraform](#requirement\_terraform) | >= 1.3.0 |
19 | | [aws](#requirement\_aws) | >= 5.0.0 |
20 |
21 | ## Providers
22 |
23 | No providers.
24 |
25 | ## Modules
26 |
27 | | Name | Source | Version |
28 | |------|--------|---------|
29 | | [managed\_grafana](#module\_managed\_grafana) | terraform-aws-modules/managed-service-grafana/aws | 1.10.0 |
30 |
31 | ## Resources
32 |
33 | No resources.
34 |
35 | ## Inputs
36 |
37 | | Name | Description | Type | Default | Required |
38 | |------|-------------|------|---------|:--------:|
39 | | [aws\_region](#input\_aws\_region) | AWS Region | `string` | n/a | yes |
40 |
41 | ## Outputs
42 |
43 | | Name | Description |
44 | |------|-------------|
45 | | [grafana\_workspace\_endpoint](#output\_grafana\_workspace\_endpoint) | Amazon Managed Grafana Workspace endpoint |
46 | | [grafana\_workspace\_iam\_role\_arn](#output\_grafana\_workspace\_iam\_role\_arn) | Amazon Managed Grafana Workspace's IAM Role ARN |
47 | | [grafana\_workspace\_id](#output\_grafana\_workspace\_id) | Amazon Managed Grafana Workspace ID |
48 |
49 |
--------------------------------------------------------------------------------
/examples/managed-grafana-workspace/variables.tf:
--------------------------------------------------------------------------------
1 | variable "aws_region" {
2 | description = "AWS Region"
3 | type = string
4 | }
5 |
--------------------------------------------------------------------------------
/examples/managed-grafana-workspace/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.3.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 5.0.0"
8 | }
9 | }
10 |
11 | # ## Used for end-to-end testing on project; update to suit your needs
12 | # backend "s3" {
13 | # bucket = "aws-observability-accelerator-terraform-states"
14 | # region = "us-west-2"
15 | # key = "e2e/managed-grafana-workspace/terraform.tfstate"
16 | # }
17 | }
18 |
--------------------------------------------------------------------------------
/examples/managed-prometheus-monitoring/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = local.region
3 | }
4 |
5 | provider "grafana" {
6 | url = local.amg_ws_endpoint
7 | auth = var.grafana_api_key
8 | }
9 |
10 | data "aws_grafana_workspace" "this" {
11 | workspace_id = var.managed_grafana_workspace_id
12 | }
13 |
14 | locals {
15 | region = var.aws_region
16 | amg_ws_endpoint = "https://${data.aws_grafana_workspace.this.endpoint}"
17 | }
18 |
19 | resource "grafana_folder" "this" {
20 | title = "Amazon Managed Prometheus monitoring dashboards"
21 | }
22 |
23 | module "managed_prometheus_monitoring" {
24 | source = "../../modules/managed-prometheus-monitoring"
25 | dashboards_folder_id = resource.grafana_folder.this.id
26 | aws_region = local.region
27 | managed_prometheus_workspace_ids = var.managed_prometheus_workspace_ids
28 | }
29 |
--------------------------------------------------------------------------------
/examples/managed-prometheus-monitoring/outputs.tf:
--------------------------------------------------------------------------------
1 | output "grafana_dashboard_urls" {
2 | description = "URLs for dashboards created"
3 | value = module.managed_prometheus_monitoring.grafana_dashboard_urls
4 | }
5 |
--------------------------------------------------------------------------------
/examples/managed-prometheus-monitoring/variables.tf:
--------------------------------------------------------------------------------
1 | variable "grafana_api_key" {
2 | description = "API key for authorizing the Grafana provider to make changes to Amazon Managed Grafana"
3 | type = string
4 | sensitive = true
5 | }
6 |
7 | variable "aws_region" {
8 | description = "AWS Region"
9 | type = string
10 | }
11 |
12 | variable "managed_prometheus_workspace_ids" {
13 | description = "Amazon Managed Service for Prometheus Workspace IDs to create Alarms for"
14 | type = string
15 | }
16 |
17 | variable "managed_grafana_workspace_id" {
18 | description = "Amazon Managed Grafana workspace ID"
19 | type = string
20 | }
21 |
--------------------------------------------------------------------------------
/examples/managed-prometheus-monitoring/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.0.0"
8 | }
9 | grafana = {
10 | source = "grafana/grafana"
11 | version = ">= 1.25.0"
12 | }
13 | }
14 |
15 | # ## Used for end-to-end testing on project; update to suit your needs
16 | # backend "s3" {
17 | # bucket = "aws-observability-accelerator-terraform-states"
18 | # region = "us-west-2"
19 | # key = "e2e/managed-prometheus-monitoring/terraform.tfstate"
20 | # }
21 | }
22 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: AWS Observability Accelerator for Terraform
2 | docs_dir: "docs"
3 | copyright: "Copyright © Amazon 2022"
4 | site_author: "AWS"
5 | site_url: "https://aws-observability.github.io/terraform-aws-observability-accelerator/"
6 | repo_name: "aws-observability/terraform-aws-observability-accelerator"
7 | repo_url: "https://github.com/aws-observability/terraform-aws-observability-accelerator"
8 |
9 | theme:
10 | logo: images/aws-logo.png
11 | favicon: images/aws-favicon.png
12 | name: material
13 | font:
14 | text: ember
15 | custom_dir: docs/overrides
16 | icon:
17 | repo: fontawesome/brands/github
18 | features:
19 | - navigation.tabs.sticky
20 |
21 | palette:
22 | primary: indigo
23 | accent: grey
24 |
25 | nav:
26 | - Home: index.md
27 | - Concepts: concepts.md
28 | - Amazon EKS:
29 | - Infrastructure: eks/index.md
30 | - EKS API server: eks/eks-apiserver.md
31 | - EKS GPU montitoring: eks/gpu-monitoring.md
32 | - Multicluster:
33 | - Single AWS account: eks/multicluster.md
34 | - Cross AWS account: eks/multiaccount.md
35 | - Viewing logs: eks/logs.md
36 | - Tracing: eks/tracing.md
37 | - Patterns:
38 | - Java/JMX: eks/java.md
39 | - Nginx: eks/nginx.md
40 | - Istio: eks/istio.md
41 | - Troubleshooting: eks/troubleshooting.md
42 | - Teardown: eks/destroy.md
43 | - AWS Distro for OpenTelemetry (ADOT):
44 | - Monitoring ADOT collector health: adothealth/index.md
45 | - CloudWatch Container Insights & CloudWatch Application Signals:
46 | - Amazon EKS: container-insights/eks.md
47 | - Monitoring Managed Service for Prometheus Workspaces: workloads/managed-prometheus.md
48 | - Amazon ECS:
49 | - Cluster Monitoring: ecs/ecs-monitoring-on-ec2.md
50 | - Supporting Examples:
51 | - EKS Cluster with VPC: helpers/new-eks-cluster.md
52 | - Amazon Managed Grafana setup: helpers/managed-grafana.md
53 | - ECS Cluster with VPC: helpers/ecs-cluster-with-vpc.md
54 | - Support & Feedback: support.md
55 | - Contributors: contributors.md
56 |
57 | markdown_extensions:
58 | - toc:
59 | permalink: true
60 | - admonition
61 | - codehilite
62 | - footnotes
63 | - pymdownx.critic
64 | - pymdownx.tabbed:
65 | alternate_style: true
66 | - pymdownx.superfences:
67 | custom_fences:
68 | - name: mermaid
69 | class: mermaid
70 | format: !!python/name:pymdownx.superfences.fence_code_format
71 |
72 | plugins:
73 | - search
74 |
--------------------------------------------------------------------------------
/modules/ecs-monitoring/configs/config.yaml:
--------------------------------------------------------------------------------
1 | extensions:
2 | sigv4auth:
3 | region: "${aws_region}"
4 | service: "aps"
5 | ecs_observer: # extension type is ecs_observer
6 | cluster_name: "${cluster_name}" # cluster name need to configured manually
7 | cluster_region: "${cluster_region}" # region can be configured directly or use AWS_REGION env var
8 | result_file: "/etc/ecs_sd_targets.yaml" # the directory for file must already exists
9 | refresh_interval: ${refresh_interval}
10 | job_label_name: prometheus_job
11 | # JMX
12 | docker_labels:
13 | - port_label: "ECS_PROMETHEUS_EXPORTER_PORT"
14 |
15 | receivers:
16 | otlp:
17 | protocols:
18 | grpc:
19 | endpoint: ${otlp_grpc_endpoint}
20 | http:
21 | endpoint: ${otlp_http_endpoint}
22 | prometheus:
23 | config:
24 | scrape_configs:
25 | - job_name: "ecssd"
26 | file_sd_configs:
27 | - files:
28 | - "/etc/ecs_sd_targets.yaml"
29 | relabel_configs:
30 | - source_labels: [__meta_ecs_cluster_name]
31 | action: replace
32 | target_label: ClusterName
33 | - source_labels: [__meta_ecs_service_name]
34 | action: replace
35 | target_label: ServiceName
36 | - source_labels: [__meta_ecs_task_definition_family]
37 | action: replace
38 | target_label: TaskDefinitionFamily
39 | - source_labels: [__meta_ecs_task_launch_type]
40 | action: replace
41 | target_label: LaunchType
42 | - source_labels: [__meta_ecs_container_name]
43 | action: replace
44 | target_label: container_name
45 | - action: labelmap
46 | regex: ^__meta_ecs_container_labels_(.+)$
47 | replacement: "$$1"
48 | awsecscontainermetrics:
49 | collection_interval: ${ecs_metrics_collection_interval}
50 |
51 | processors:
52 | resource:
53 | attributes:
54 | - key: receiver
55 | value: "prometheus"
56 | action: insert
57 | filter:
58 | metrics:
59 | include:
60 | match_type: strict
61 | metric_names:
62 | - ecs.task.memory.utilized
63 | - ecs.task.memory.reserved
64 | - ecs.task.memory.usage
65 | - ecs.task.cpu.utilized
66 | - ecs.task.cpu.reserved
67 | - ecs.task.cpu.usage.vcpu
68 | - ecs.task.network.rate.rx
69 | - ecs.task.network.rate.tx
70 | - ecs.task.storage.read_bytes
71 | - ecs.task.storage.write_bytes
72 | metricstransform:
73 | transforms:
74 | - include: ".*"
75 | match_type: regexp
76 | action: update
77 | operations:
78 | - label: prometheus_job
79 | new_label: job
80 | action: update_label
81 | - include: ecs.task.memory.utilized
82 | action: update
83 | new_name: MemoryUtilized
84 | - include: ecs.task.memory.reserved
85 | action: update
86 | new_name: MemoryReserved
87 | - include: ecs.task.memory.usage
88 | action: update
89 | new_name: MemoryUsage
90 | - include: ecs.task.cpu.utilized
91 | action: update
92 | new_name: CpuUtilized
93 | - include: ecs.task.cpu.reserved
94 | action: update
95 | new_name: CpuReserved
96 | - include: ecs.task.cpu.usage.vcpu
97 | action: update
98 | new_name: CpuUsage
99 | - include: ecs.task.network.rate.rx
100 | action: update
101 | new_name: NetworkRxBytes
102 | - include: ecs.task.network.rate.tx
103 | action: update
104 | new_name: NetworkTxBytes
105 | - include: ecs.task.storage.read_bytes
106 | action: update
107 | new_name: StorageReadBytes
108 | - include: ecs.task.storage.write_bytes
109 | action: update
110 | new_name: StorageWriteBytes
111 |
112 | exporters:
113 | prometheusremotewrite:
114 | endpoint: "${amp_remote_write_ep}"
115 | auth:
116 | authenticator: sigv4auth
117 | logging:
118 | loglevel: debug
119 |
120 | service:
121 | extensions: [ecs_observer, sigv4auth]
122 | pipelines:
123 | metrics:
124 | receivers: [prometheus]
125 | processors: [resource, metricstransform]
126 | exporters: [prometheusremotewrite]
127 | metrics/ecs:
128 | receivers: [awsecscontainermetrics]
129 | processors: [filter]
130 | exporters: [logging, prometheusremotewrite]
131 |
--------------------------------------------------------------------------------
/modules/ecs-monitoring/locals.tf:
--------------------------------------------------------------------------------
1 | data "aws_region" "current" {}
2 |
3 | locals {
4 | region = data.aws_region.current.name
5 | name = "amg-ex-${replace(basename(path.cwd), "_", "-")}"
6 | description = "AWS Managed Grafana service for ${local.name}"
7 | prometheus_ws_endpoint = module.managed_prometheus_default[0].workspace_prometheus_endpoint
8 |
9 | default_otel_values = {
10 | aws_region = data.aws_region.current.name
11 | cluster_name = var.aws_ecs_cluster_name
12 | cluster_region = data.aws_region.current.name
13 | refresh_interval = var.refresh_interval
14 | ecs_metrics_collection_interval = var.ecs_metrics_collection_interval
15 | amp_remote_write_ep = "${local.prometheus_ws_endpoint}api/v1/remote_write"
16 | otlp_grpc_endpoint = var.otlp_grpc_endpoint
17 | otlp_http_endpoint = var.otlp_http_endpoint
18 | }
19 |
20 | ssm_param_value = yamlencode(
21 | templatefile("${path.module}/configs/config.yaml", local.default_otel_values)
22 | )
23 |
24 | container_def_default_values = {
25 | container_name = var.container_name
26 | otel_image_ver = var.otel_image_ver
27 | aws_region = data.aws_region.current.name
28 | }
29 |
30 | container_definitions = templatefile("${path.module}/task-definitions/otel_collector.json", local.container_def_default_values)
31 |
32 | }
33 |
--------------------------------------------------------------------------------
/modules/ecs-monitoring/main.tf:
--------------------------------------------------------------------------------
1 | # SSM Parameter for storing and distrivuting the ADOT config
2 | resource "aws_ssm_parameter" "adot_config" {
3 | name = "/terraform-aws-observability/otel_collector_config"
4 | description = "SSM parameter for aws-observability-accelerator/otel-collector-config"
5 | type = "String"
6 | value = local.ssm_param_value
7 | tier = "Intelligent-Tiering"
8 | }
9 |
10 | ############################################
11 | # Managed Grafana and Prometheus Module
12 | ############################################
13 |
14 | module "managed_grafana_default" {
15 | count = var.create_managed_grafana_ws ? 1 : 0
16 |
17 | source = "terraform-aws-modules/managed-service-grafana/aws"
18 | version = "2.1.0"
19 | name = "${local.name}-default"
20 | associate_license = false
21 | }
22 |
23 | module "managed_prometheus_default" {
24 | count = var.create_managed_prometheus_ws ? 1 : 0
25 |
26 | source = "terraform-aws-modules/managed-service-prometheus/aws"
27 | version = "2.2.2"
28 | workspace_alias = "${local.name}-default"
29 | }
30 |
31 | ###########################################
32 | # Task Definition for ADOT ECS Prometheus
33 | ###########################################
34 | resource "aws_ecs_task_definition" "adot_ecs_prometheus" {
35 | family = "adot_prometheus_td"
36 | task_role_arn = var.task_role_arn
37 | execution_role_arn = var.execution_role_arn
38 | network_mode = "bridge"
39 | requires_compatibilities = ["EC2"]
40 | cpu = var.ecs_adot_cpu
41 | memory = var.ecs_adot_mem
42 | container_definitions = local.container_definitions
43 | }
44 |
45 | ############################################
46 | # ECS Service
47 | ############################################
48 | resource "aws_ecs_service" "adot_ecs_prometheus" {
49 | name = "adot_prometheus_svc"
50 | cluster = var.aws_ecs_cluster_name
51 | task_definition = aws_ecs_task_definition.adot_ecs_prometheus.arn
52 | desired_count = 1
53 | }
54 |
--------------------------------------------------------------------------------
/modules/ecs-monitoring/outputs.tf:
--------------------------------------------------------------------------------
1 | output "grafana_workspace_id" {
2 | description = "The ID of the Grafana workspace"
3 | value = try(module.managed_grafana_default[0].workspace_id, "")
4 | }
5 |
6 | output "grafana_workspace_endpoint" {
7 | description = "The endpoint of the Grafana workspace"
8 | value = try(module.managed_grafana_default[0].workspace_endpoint, "")
9 | }
10 |
11 | output "prometheus_workspace_id" {
12 | description = "Identifier of the workspace"
13 | value = try(module.managed_prometheus_default[0].id, "")
14 | }
15 |
16 | output "prometheus_workspace_prometheus_endpoint" {
17 | description = "Prometheus endpoint available for this workspace"
18 | value = try(module.managed_prometheus_default[0].prometheus_endpoint, "")
19 | }
20 |
--------------------------------------------------------------------------------
/modules/ecs-monitoring/task-definitions/otel_collector.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "${container_name}",
4 | "image": "amazon/aws-otel-collector:${otel_image_ver}",
5 | "secrets": [
6 | {
7 | "name": "AOT_CONFIG_CONTENT",
8 | "valueFrom": "/terraform-aws-observability/otel_collector_config"
9 | }
10 | ],
11 | "logConfiguration": {
12 | "logDriver": "awslogs",
13 | "options": {
14 | "awslogs-create-group": "True",
15 | "awslogs-group": "/adot/collector",
16 | "awslogs-region": "${aws_region}",
17 | "awslogs-stream-prefix": "ecs-prometheus"
18 | }
19 | }
20 | }
21 | ]
22 |
--------------------------------------------------------------------------------
/modules/ecs-monitoring/variables.tf:
--------------------------------------------------------------------------------
1 | variable "aws_ecs_cluster_name" {
2 | description = "Name of your ECS cluster"
3 | type = string
4 | }
5 |
6 | variable "task_role_arn" {
7 | description = "ARN of the IAM Task Role"
8 | type = string
9 | }
10 |
11 | variable "execution_role_arn" {
12 | description = "ARN of the IAM Execution Role"
13 | type = string
14 | }
15 |
16 | variable "ecs_adot_cpu" {
17 | description = "CPU to be allocated for the ADOT ECS TASK"
18 | type = string
19 | default = "256"
20 | }
21 |
22 | variable "ecs_adot_mem" {
23 | description = "Memory to be allocated for the ADOT ECS TASK"
24 | type = string
25 | default = "512"
26 | }
27 |
28 | variable "create_managed_grafana_ws" {
29 | description = "Creates a Workspace for Amazon Managed Grafana"
30 | type = bool
31 | default = true
32 | }
33 |
34 | variable "create_managed_prometheus_ws" {
35 | description = "Creates a Workspace for Amazon Managed Prometheus"
36 | type = bool
37 | default = true
38 | }
39 |
40 | variable "refresh_interval" {
41 | description = "Refresh interval for ecs_observer"
42 | type = string
43 | default = "60s"
44 | }
45 |
46 | variable "ecs_metrics_collection_interval" {
47 | description = "Collection interval for ecs metrics"
48 | type = string
49 | default = "15s"
50 | }
51 |
52 | variable "otlp_grpc_endpoint" {
53 | description = "otlpGrpcEndpoint"
54 | type = string
55 | default = "0.0.0.0:4317"
56 | }
57 |
58 |
59 | variable "otlp_http_endpoint" {
60 | description = "otlpHttpEndpoint"
61 | type = string
62 | default = "0.0.0.0:4318"
63 | }
64 |
65 | variable "container_name" {
66 | description = "Container Name for Adot"
67 | type = string
68 | default = "adot_new"
69 | }
70 |
71 | variable "otel_image_ver" {
72 | description = "Otel Docker Image version"
73 | type = string
74 | default = "v0.31.0"
75 | }
76 |
--------------------------------------------------------------------------------
/modules/ecs-monitoring/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 5.0.0"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/eks-container-insights/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_partition" "current" {}
2 | data "aws_caller_identity" "current" {}
3 | data "aws_region" "current" {}
4 |
5 | data "aws_eks_cluster" "eks_cluster" {
6 | name = var.eks_cluster_id
7 | }
8 |
--------------------------------------------------------------------------------
/modules/eks-container-insights/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | kubernetes_version = var.kubernetes_version
3 | eks_oidc_issuer_url = replace(data.aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer, "https://", "")
4 |
5 | addon_context = {
6 | aws_caller_identity_account_id = data.aws_caller_identity.current.account_id
7 | aws_caller_identity_arn = data.aws_caller_identity.current.arn
8 | aws_partition_id = data.aws_partition.current.partition
9 | aws_region_name = data.aws_region.current.name
10 | eks_oidc_provider_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:oidc-provider/${local.eks_oidc_issuer_url}"
11 | eks_cluster_id = data.aws_eks_cluster.eks_cluster.id
12 | tags = var.tags
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/modules/eks-container-insights/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "amazon-cloudwatch-observability"
3 | }
4 |
5 | module "cloudwatch_observability_irsa_role" {
6 | count = var.create_cloudwatch_observability_irsa_role ? 1 : 0
7 |
8 | source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
9 | version = "v5.33.0"
10 | role_name = "cloudwatch-observability"
11 | attach_cloudwatch_observability_policy = true
12 |
13 | oidc_providers = {
14 | ex = {
15 | provider_arn = var.eks_oidc_provider_arn
16 | namespace_service_accounts = ["amazon-cloudwatch:cloudwatch-agent"]
17 | }
18 | }
19 | }
20 |
21 | data "aws_eks_addon_version" "eks_addon_version" {
22 | addon_name = local.name
23 | kubernetes_version = try(var.addon_config.kubernetes_version, var.kubernetes_version)
24 | most_recent = try(var.addon_config.most_recent, true)
25 | }
26 |
27 | resource "aws_eks_addon" "amazon_cloudwatch_observability" {
28 | count = var.enable_amazon_eks_cw_observability ? 1 : 0
29 |
30 | cluster_name = var.eks_cluster_id
31 | addon_name = local.name
32 | addon_version = try(var.addon_config.addon_version, data.aws_eks_addon_version.eks_addon_version.version)
33 | resolve_conflicts_on_create = try(var.addon_config.resolve_conflicts_on_create, "OVERWRITE")
34 | service_account_role_arn = try(module.cloudwatch_observability_irsa_role[0].iam_role_arn, null)
35 | preserve = try(var.addon_config.preserve, true)
36 | configuration_values = try(var.addon_config.configuration_values, null)
37 |
38 | tags = merge(
39 | # var.addon_context.tags,
40 | try(var.addon_config.tags, {})
41 | )
42 | }
43 |
44 | resource "aws_iam_service_linked_role" "application_signals_cw" {
45 | count = var.create_cloudwatch_application_signals_role ? 1 : 0
46 | aws_service_name = "application-signals.cloudwatch.amazonaws.com"
47 | }
48 |
--------------------------------------------------------------------------------
/modules/eks-container-insights/outputs.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-observability/terraform-aws-observability-accelerator/c432af44ee1df1b4ccd654e401922b99cea2ada5/modules/eks-container-insights/outputs.tf
--------------------------------------------------------------------------------
/modules/eks-container-insights/variables.tf:
--------------------------------------------------------------------------------
1 | variable "eks_cluster_id" {
2 | description = "Name of the EKS cluster"
3 | default = "eks-cw"
4 | type = string
5 | }
6 |
7 | variable "enable_amazon_eks_cw_observability" {
8 | description = "Enable Amazon EKS CloudWatch Observability add-on"
9 | type = bool
10 | default = true
11 | }
12 |
13 | variable "addon_config" {
14 | description = "Amazon EKS Managed CloudWatch Observability Add-on config"
15 | type = any
16 | default = {}
17 | }
18 |
19 | variable "kubernetes_version" {
20 | description = "Kubernetes version"
21 | type = string
22 | default = "1.28"
23 | }
24 |
25 | variable "most_recent" {
26 | description = "Determines if the most recent or default version of the addon should be returned."
27 | type = bool
28 | default = false
29 | }
30 |
31 | variable "eks_oidc_provider_arn" {
32 | description = "The OIDC Provider ARN of AWS EKS cluster"
33 | type = string
34 | default = ""
35 | }
36 |
37 | variable "create_cloudwatch_observability_irsa_role" {
38 | type = bool
39 | default = true
40 | description = "Create a Cloudwatch Observability IRSA"
41 | }
42 |
43 | variable "create_cloudwatch_application_signals_role" {
44 | type = bool
45 | default = true
46 | description = "Create a Cloudwatch Application Signals service-linked role"
47 | }
48 |
49 | variable "tags" {
50 | description = "Additional tags (e.g. `map('BusinessUnit`,`XYZ`)"
51 | type = map(string)
52 | default = {}
53 | }
54 |
--------------------------------------------------------------------------------
/modules/eks-container-insights/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 5.0.0"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/adot-operator/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "adot"
3 | eks_addon_role_name = "eks:addon-manager"
4 | eks_addon_clusterrole_name = "eks:addon-manager-otel"
5 | addon_namespace = "opentelemetry-operator-system"
6 | }
7 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/adot-operator/outputs.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-observability/terraform-aws-observability-accelerator/c432af44ee1df1b4ccd654e401922b99cea2ada5/modules/eks-monitoring/add-ons/adot-operator/outputs.tf
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/adot-operator/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config for cert-manager"
3 | type = any
4 | default = { version = "v1.8.2" }
5 | }
6 |
7 | variable "addon_context" {
8 | description = "Input configuration for the addon"
9 | type = object({
10 | aws_caller_identity_account_id = string
11 | aws_caller_identity_arn = string
12 | aws_eks_cluster_endpoint = string
13 | aws_partition_id = string
14 | aws_region_name = string
15 | eks_cluster_id = string
16 | eks_oidc_issuer_url = string
17 | eks_oidc_provider_arn = string
18 | irsa_iam_role_path = string
19 | irsa_iam_permissions_boundary = string
20 | tags = map(string)
21 | })
22 | }
23 |
24 | variable "enable_cert_manager" {
25 | description = "Enable cert-manager, a requirement for ADOT Operator"
26 | type = bool
27 | default = true
28 | }
29 |
30 | variable "kubernetes_version" {
31 | description = "EKS Cluster version"
32 | type = string
33 | }
34 |
35 | variable "addon_config" {
36 | description = "Amazon EKS Managed ADOT Add-on config"
37 | type = any
38 | default = {}
39 | }
40 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/adot-operator/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.10"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/aws-for-fluentbit/README.md:
--------------------------------------------------------------------------------
1 | # AWS for Fluent Bit
2 |
3 | Fluent Bit is an open source Log Processor and Forwarder which allows you to collect any data like metrics and logs from different sources, enrich them with filters and send them to multiple destinations.
4 | AWS provides a Fluent Bit image with plugins for CloudWatch Logs, Kinesis Data Firehose, Kinesis Data Stream and Amazon OpenSearch Service.
5 |
6 | This add-on is configured to stream the worker node logs to CloudWatch Logs by default. It can be configured to stream the logs to additional destinations like Kinesis Data Firehose, Kinesis Data Streams and Amazon OpenSearch Service by passing the custom `values.yaml`.
7 | See this [Helm Chart](https://github.com/aws/eks-charts/tree/master/stable/aws-for-fluent-bit) for more details.
8 |
9 |
10 | ## Requirements
11 |
12 | | Name | Version |
13 | |------|---------|
14 | | [terraform](#requirement\_terraform) | >= 1.0.0 |
15 | | [aws](#requirement\_aws) | >= 3.72 |
16 |
17 | ## Providers
18 |
19 | | Name | Version |
20 | |------|---------|
21 | | [aws](#provider\_aws) | >= 3.72 |
22 |
23 | ## Modules
24 |
25 | | Name | Source | Version |
26 | |------|--------|---------|
27 | | [helm\_addon](#module\_helm\_addon) | github.com/aws-ia/terraform-aws-eks-blueprints//modules/kubernetes-addons/helm-addon | v4.32.1 |
28 |
29 | ## Resources
30 |
31 | | Name | Type |
32 | |------|------|
33 | | [aws_iam_policy.aws_for_fluent_bit](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
34 | | [aws_iam_policy_document.irsa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
35 |
36 | ## Inputs
37 |
38 | | Name | Description | Type | Default | Required |
39 | |------|-------------|------|---------|:--------:|
40 | | [addon\_context](#input\_addon\_context) | Input configuration for the addon | object({
aws_caller_identity_account_id = string
aws_caller_identity_arn = string
aws_eks_cluster_endpoint = string
aws_partition_id = string
aws_region_name = string
eks_cluster_id = string
eks_oidc_issuer_url = string
eks_oidc_provider_arn = string
tags = map(string)
irsa_iam_role_path = string
irsa_iam_permissions_boundary = string
})
| n/a | yes |
41 | | [cw\_log\_retention\_days](#input\_cw\_log\_retention\_days) | FluentBit CloudWatch Log group retention period | `number` | `90` | no |
42 | | [helm\_config](#input\_helm\_config) | Helm provider config aws\_for\_fluent\_bit. | `any` | `{}` | no |
43 | | [irsa\_policies](#input\_irsa\_policies) | Additional IAM policies for a IAM role for service accounts | `list(string)` | `[]` | no |
44 | | [manage\_via\_gitops](#input\_manage\_via\_gitops) | Determines if the add-on should be managed via GitOps. | `bool` | `false` | no |
45 | | [refresh\_interval](#input\_refresh\_interval) | FluentBit input refresh interval | `number` | `60` | no |
46 |
47 | ## Outputs
48 |
49 | | Name | Description |
50 | |------|-------------|
51 | | [irsa\_arn](#output\_irsa\_arn) | IAM role ARN for the service account |
52 | | [irsa\_name](#output\_irsa\_name) | IAM role name for the service account |
53 | | [release\_metadata](#output\_release\_metadata) | Map of attributes of the Helm release metadata |
54 | | [service\_account](#output\_service\_account) | Name of Kubernetes service account |
55 |
56 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/aws-for-fluentbit/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_iam_policy_document" "irsa" {
2 | statement {
3 | sid = "PutLogEvents"
4 | effect = "Allow"
5 | resources = ["arn:${var.addon_context.aws_partition_id}:logs:${var.addon_context.aws_region_name}:${var.addon_context.aws_caller_identity_account_id}:log-group:*:log-stream:*"]
6 | actions = ["logs:PutLogEvents"]
7 | }
8 |
9 | statement {
10 | sid = "CreateCWLogs"
11 | effect = "Allow"
12 | resources = ["arn:${var.addon_context.aws_partition_id}:logs:${var.addon_context.aws_region_name}:${var.addon_context.aws_caller_identity_account_id}:log-group:*"]
13 |
14 | actions = [
15 | "logs:CreateLogGroup",
16 | "logs:CreateLogStream",
17 | "logs:DescribeLogGroups",
18 | "logs:DescribeLogStreams",
19 | "logs:PutRetentionPolicy",
20 | ]
21 | }
22 |
23 | }
24 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/aws-for-fluentbit/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "aws-for-fluent-bit"
3 | service_account = try(var.helm_config.service_account, "${local.name}-sa")
4 |
5 | set_values = [
6 | {
7 | name = "serviceAccount.name"
8 | value = local.service_account
9 | },
10 | {
11 | name = "serviceAccount.create"
12 | value = false
13 | }
14 | ]
15 |
16 | # https://github.com/aws/eks-charts/blob/master/stable/aws-for-fluent-bit/Chart.yaml
17 | default_helm_config = {
18 | name = local.name
19 | chart = local.name
20 | repository = "https://aws.github.io/eks-charts"
21 | version = "0.1.27"
22 | namespace = local.name
23 | values = local.default_helm_values
24 | description = "aws-for-fluentbit Helm Chart deployment configuration"
25 | }
26 |
27 | helm_config = merge(
28 | local.default_helm_config,
29 | var.helm_config
30 | )
31 |
32 | default_helm_values = [templatefile("${path.module}/values.yaml", {
33 | aws_region = var.addon_context.aws_region_name
34 | cluster_name = var.addon_context.eks_cluster_id
35 | log_retention_days = var.cw_log_retention_days
36 | refresh_interval = var.refresh_interval
37 | service_account = local.service_account
38 | })]
39 |
40 | irsa_config = {
41 | kubernetes_namespace = local.helm_config["namespace"]
42 | kubernetes_service_account = local.service_account
43 | create_kubernetes_namespace = try(local.helm_config["create_namespace"], true)
44 | create_kubernetes_service_account = true
45 | create_service_account_secret_token = try(local.helm_config["create_service_account_secret_token"], false)
46 | irsa_iam_policies = concat([aws_iam_policy.aws_for_fluent_bit.arn], var.irsa_policies)
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/aws-for-fluentbit/main.tf:
--------------------------------------------------------------------------------
1 | module "helm_addon" {
2 | source = "github.com/aws-ia/terraform-aws-eks-blueprints//modules/kubernetes-addons/helm-addon?ref=v4.32.1"
3 | manage_via_gitops = var.manage_via_gitops
4 | set_values = local.set_values
5 | helm_config = local.helm_config
6 | irsa_config = local.irsa_config
7 | addon_context = var.addon_context
8 | }
9 |
10 | resource "aws_iam_policy" "aws_for_fluent_bit" {
11 | name = "${var.addon_context.eks_cluster_id}-fluentbit"
12 | description = "IAM Policy for AWS for FluentBit"
13 | policy = data.aws_iam_policy_document.irsa.json
14 | tags = var.addon_context.tags
15 | }
16 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/aws-for-fluentbit/outputs.tf:
--------------------------------------------------------------------------------
1 | output "release_metadata" {
2 | description = "Map of attributes of the Helm release metadata"
3 | value = module.helm_addon.release_metadata
4 | }
5 |
6 | output "irsa_arn" {
7 | description = "IAM role ARN for the service account"
8 | value = module.helm_addon.irsa_arn
9 | }
10 |
11 | output "irsa_name" {
12 | description = "IAM role name for the service account"
13 | value = module.helm_addon.irsa_name
14 | }
15 |
16 | output "service_account" {
17 | description = "Name of Kubernetes service account"
18 | value = module.helm_addon.service_account
19 | }
20 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/aws-for-fluentbit/values.yaml:
--------------------------------------------------------------------------------
1 | serviceAccount:
2 | create: false
3 | name: ${service_account}
4 |
5 | cloudWatch:
6 | enabled: false
7 |
8 | cloudWatchLogs:
9 | enabled: true
10 | region: ${aws_region}
11 | # logGroupName is a fallback to failed parsing
12 | logGroupName: /aws/eks/observability-accelerator/workloads
13 | logGroupTemplate: /aws/eks/observability-accelerator/${cluster_name}/$kubernetes['namespace_name']
14 | logStreamTemplate: $kubernetes['container_name'].$kubernetes['pod_name']
15 | logKey: log
16 | logRetentionDays: ${log_retention_days}
17 |
18 | input:
19 | enabled: false
20 |
21 | additionalInputs: |
22 | [INPUT]
23 | Name tail
24 | Tag kube.*
25 | Path /var/log/containers/*.log
26 | DB /var/log/flb_kube.db
27 | Mem_Buf_Limit 5MB
28 | Skip_Long_Lines On
29 | Refresh_Interval ${refresh_interval}
30 | multiline.parser cri, docker, go, java, python
31 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/aws-for-fluentbit/variables.tf:
--------------------------------------------------------------------------------
1 | variable "helm_config" {
2 | description = "Helm provider config aws_for_fluent_bit."
3 | type = any
4 | default = {}
5 | }
6 |
7 | variable "cw_log_retention_days" {
8 | description = "FluentBit CloudWatch Log group retention period"
9 | type = number
10 | default = 90
11 | }
12 |
13 | variable "refresh_interval" {
14 | description = "FluentBit input refresh interval"
15 | type = number
16 | default = 60
17 | }
18 |
19 |
20 | variable "manage_via_gitops" {
21 | type = bool
22 | description = "Determines if the add-on should be managed via GitOps."
23 | default = false
24 | }
25 |
26 | variable "irsa_policies" {
27 | description = "Additional IAM policies for a IAM role for service accounts"
28 | type = list(string)
29 | default = []
30 | }
31 |
32 | variable "addon_context" {
33 | description = "Input configuration for the addon"
34 | type = object({
35 | aws_caller_identity_account_id = string
36 | aws_caller_identity_arn = string
37 | aws_eks_cluster_endpoint = string
38 | aws_partition_id = string
39 | aws_region_name = string
40 | eks_cluster_id = string
41 | eks_oidc_issuer_url = string
42 | eks_oidc_provider_arn = string
43 | tags = map(string)
44 | irsa_iam_role_path = string
45 | irsa_iam_permissions_boundary = string
46 | })
47 | }
48 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/aws-for-fluentbit/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.72"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/external-secrets/README.md:
--------------------------------------------------------------------------------
1 | # External Secrets Operator Kubernetes addon
2 |
3 | This deploys an EKS Cluster with the External Secrets Operator. The cluster is populated with a ClusterSecretStore and ExternalSecret using Grafana API Key secret from AWS SSM Parameter Store. A secret store for each AWS SSM Parameter Store is created. Store use IRSA (IAM Roles For Service Account) to retrieve the secret values from AWS.
4 |
5 |
6 | ## Requirements
7 |
8 | | Name | Version |
9 | |------|---------|
10 | | [terraform](#requirement\_terraform) | >= 1.0.0 |
11 | | [aws](#requirement\_aws) | >= 3.72 |
12 | | [kubectl](#requirement\_kubectl) | >= 2.0.3 |
13 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 |
14 | | [random](#requirement\_random) | >= 3.6.1 |
15 |
16 | ## Providers
17 |
18 | | Name | Version |
19 | |------|---------|
20 | | [aws](#provider\_aws) | >= 3.72 |
21 | | [kubectl](#provider\_kubectl) | >= 2.0.3 |
22 | | [random](#provider\_random) | >= 3.6.1 |
23 |
24 | ## Modules
25 |
26 | | Name | Source | Version |
27 | |------|--------|---------|
28 | | [cluster\_secretstore\_role](#module\_cluster\_secretstore\_role) | github.com/aws-ia/terraform-aws-eks-blueprints//modules/irsa | v4.32.1 |
29 | | [external\_secrets](#module\_external\_secrets) | github.com/aws-ia/terraform-aws-eks-blueprints//modules/kubernetes-addons/external-secrets | v4.32.1 |
30 |
31 | ## Resources
32 |
33 | | Name | Type |
34 | |------|------|
35 | | [aws_iam_policy.cluster_secretstore](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
36 | | [aws_kms_key.secrets](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
37 | | [aws_ssm_parameter.secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource |
38 | | [kubectl_manifest.cluster_secretstore](https://registry.terraform.io/providers/alekc/kubectl/latest/docs/resources/manifest) | resource |
39 | | [kubectl_manifest.secret](https://registry.terraform.io/providers/alekc/kubectl/latest/docs/resources/manifest) | resource |
40 | | [random_uuid.grafana_key_suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/uuid) | resource |
41 | | [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
42 |
43 | ## Inputs
44 |
45 | | Name | Description | Type | Default | Required |
46 | |------|-------------|------|---------|:--------:|
47 | | [addon\_context](#input\_addon\_context) | Input configuration for the addon | object({
aws_caller_identity_account_id = string
aws_caller_identity_arn = string
aws_eks_cluster_endpoint = string
aws_partition_id = string
aws_region_name = string
eks_cluster_id = string
eks_oidc_issuer_url = string
eks_oidc_provider_arn = string
irsa_iam_role_path = string
irsa_iam_permissions_boundary = string
tags = map(string)
})
| n/a | yes |
48 | | [enable\_external\_secrets](#input\_enable\_external\_secrets) | Enable external-secrets | `bool` | `true` | no |
49 | | [grafana\_api\_key](#input\_grafana\_api\_key) | Grafana API key for the Amazon Managed Grafana workspace | `string` | n/a | yes |
50 | | [helm\_config](#input\_helm\_config) | Helm provider config for external secrets | `any` | `{}` | no |
51 | | [target\_secret\_name](#input\_target\_secret\_name) | Name to store the secret for Grafana API Key | `string` | n/a | yes |
52 | | [target\_secret\_namespace](#input\_target\_secret\_namespace) | Namespace to store the secret for Grafana API Key | `string` | n/a | yes |
53 |
54 | ## Outputs
55 |
56 | No outputs.
57 |
58 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/external-secrets/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name = "external-secrets"
3 | namespace = "external-secrets"
4 | cluster_secretstore_name = "cluster-secretstore-sm"
5 | cluster_secretstore_sa = "cluster-secretstore-sa"
6 | }
7 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/add-ons/external-secrets/main.tf:
--------------------------------------------------------------------------------
1 | module "external_secrets" {
2 | source = "github.com/aws-ia/terraform-aws-eks-blueprints//modules/kubernetes-addons/external-secrets?ref=v4.32.1"
3 | count = var.enable_external_secrets ? 1 : 0
4 |
5 | helm_config = var.helm_config
6 | addon_context = var.addon_context
7 | }
8 |
9 | data "aws_region" "current" {}
10 |
11 | #---------------------------------------------------------------
12 | # External Secrets Operator - Secret
13 | #---------------------------------------------------------------
14 |
15 | resource "aws_kms_key" "secrets" {
16 | enable_key_rotation = true
17 | }
18 |
19 | module "cluster_secretstore_role" {
20 | source = "github.com/aws-ia/terraform-aws-eks-blueprints//modules/irsa?ref=v4.32.1"
21 | kubernetes_namespace = local.namespace
22 | create_kubernetes_namespace = false
23 | kubernetes_service_account = local.cluster_secretstore_sa
24 | irsa_iam_policies = [aws_iam_policy.cluster_secretstore.arn]
25 | eks_cluster_id = var.addon_context.eks_cluster_id
26 | eks_oidc_provider_arn = var.addon_context.eks_oidc_provider_arn
27 | depends_on = [module.external_secrets]
28 | }
29 |
30 | resource "aws_iam_policy" "cluster_secretstore" {
31 | name_prefix = local.cluster_secretstore_sa
32 | policy = <
9 | ## Requirements
10 |
11 | | Name | Version |
12 | |------|---------|
13 | | [terraform](#requirement\_terraform) | >= 1.1.0 |
14 | | [aws](#requirement\_aws) | >= 4.0.0 |
15 | | [helm](#requirement\_helm) | >= 2.4.1 |
16 | | [kubectl](#requirement\_kubectl) | >= 2.0.3 |
17 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 |
18 |
19 | ## Providers
20 |
21 | | Name | Version |
22 | |------|---------|
23 | | [aws](#provider\_aws) | >= 4.0.0 |
24 | | [kubectl](#provider\_kubectl) | >= 2.0.3 |
25 |
26 | ## Modules
27 |
28 | No modules.
29 |
30 | ## Resources
31 |
32 | | Name | Type |
33 | |------|------|
34 | | [aws_prometheus_rule_group_namespace.alerting_rules](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/prometheus_rule_group_namespace) | resource |
35 | | [aws_prometheus_rule_group_namespace.recording_rules](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/prometheus_rule_group_namespace) | resource |
36 | | [kubectl_manifest.flux_kustomization](https://registry.terraform.io/providers/alekc/kubectl/latest/docs/resources/manifest) | resource |
37 |
38 | ## Inputs
39 |
40 | | Name | Description | Type | Default | Required |
41 | |------|-------------|------|---------|:--------:|
42 | | [pattern\_config](#input\_pattern\_config) | Configuration object for ISTIO monitoring | object({
enable_alerting_rules = bool
enable_recording_rules = bool
enable_dashboards = bool
scrape_sample_limit = number
flux_gitrepository_name = string
flux_gitrepository_url = string
flux_gitrepository_branch = string
flux_kustomization_name = string
flux_kustomization_path = string
managed_prometheus_workspace_id = string
prometheus_metrics_endpoint = string
dashboards = object({
cp = string
mesh = string
performance = string
service = string
})
})
| n/a | yes |
43 |
44 | ## Outputs
45 |
46 | No outputs.
47 |
48 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/patterns/istio/outputs.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-observability/terraform-aws-observability-accelerator/c432af44ee1df1b4ccd654e401922b99cea2ada5/modules/eks-monitoring/patterns/istio/outputs.tf
--------------------------------------------------------------------------------
/modules/eks-monitoring/patterns/istio/variables.tf:
--------------------------------------------------------------------------------
1 | variable "pattern_config" {
2 | description = "Configuration object for ISTIO monitoring"
3 | type = object({
4 | enable_alerting_rules = bool
5 | enable_recording_rules = bool
6 | enable_dashboards = bool
7 | scrape_sample_limit = number
8 |
9 | flux_gitrepository_name = string
10 | flux_gitrepository_url = string
11 | flux_gitrepository_branch = string
12 | flux_kustomization_name = string
13 | flux_kustomization_path = string
14 |
15 | managed_prometheus_workspace_id = string
16 | prometheus_metrics_endpoint = string
17 |
18 | dashboards = object({
19 | cp = string
20 | mesh = string
21 | performance = string
22 | service = string
23 | })
24 | })
25 | nullable = false
26 | }
27 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/patterns/istio/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.0.0"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.10"
12 | }
13 | kubectl = {
14 | source = "alekc/kubectl"
15 | version = ">= 2.0.3"
16 | }
17 | helm = {
18 | source = "hashicorp/helm"
19 | version = ">= 2.4.1"
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/patterns/java/README.md:
--------------------------------------------------------------------------------
1 | # Java patterns module
2 |
3 | Provides monitoring for Java based workloads with the following resources:
4 |
5 | - AWS Managed Grafana Dashboard and data source
6 | - Alerts and recording rules with AWS Managed Service for Prometheus
7 |
8 |
9 | ## Requirements
10 |
11 | | Name | Version |
12 | |------|---------|
13 | | [terraform](#requirement\_terraform) | >= 1.1.0 |
14 | | [aws](#requirement\_aws) | >= 4.0.0 |
15 | | [helm](#requirement\_helm) | >= 2.4.1 |
16 | | [kubectl](#requirement\_kubectl) | >= 2.0.3 |
17 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 |
18 |
19 | ## Providers
20 |
21 | | Name | Version |
22 | |------|---------|
23 | | [aws](#provider\_aws) | >= 4.0.0 |
24 | | [kubectl](#provider\_kubectl) | >= 2.0.3 |
25 |
26 | ## Modules
27 |
28 | No modules.
29 |
30 | ## Resources
31 |
32 | | Name | Type |
33 | |------|------|
34 | | [aws_prometheus_rule_group_namespace.alerting_rules](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/prometheus_rule_group_namespace) | resource |
35 | | [aws_prometheus_rule_group_namespace.recording_rules](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/prometheus_rule_group_namespace) | resource |
36 | | [kubectl_manifest.flux_kustomization](https://registry.terraform.io/providers/alekc/kubectl/latest/docs/resources/manifest) | resource |
37 |
38 | ## Inputs
39 |
40 | | Name | Description | Type | Default | Required |
41 | |------|-------------|------|---------|:--------:|
42 | | [pattern\_config](#input\_pattern\_config) | Configuration object for Java/JMX monitoring | object({
enable_alerting_rules = bool
enable_recording_rules = bool
scrape_sample_limit = number
enable_dashboards = bool
flux_gitrepository_name = string
flux_gitrepository_url = string
flux_gitrepository_branch = string
flux_kustomization_name = string
flux_kustomization_path = string
managed_prometheus_workspace_id = string
prometheus_metrics_endpoint = string
grafana_dashboard_url = string
})
| n/a | yes |
43 |
44 | ## Outputs
45 |
46 | No outputs.
47 |
48 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/patterns/java/main.tf:
--------------------------------------------------------------------------------
1 | resource "aws_prometheus_rule_group_namespace" "recording_rules" {
2 | count = var.pattern_config.enable_recording_rules ? 1 : 0
3 |
4 | name = "accelerator-java-rules"
5 | workspace_id = var.pattern_config.managed_prometheus_workspace_id
6 | data = < 80
26 | for: 1m
27 | labels:
28 | severity: warning
29 | annotations:
30 | summary: "JVM heap warning"
31 | description: "JVM heap of instance `{{$labels.instance}}` from application `{{$labels.application}}` is above 80% for one minute. (current=`{{$value}}%`)"
32 | EOF
33 | }
34 |
35 | resource "kubectl_manifest" "flux_kustomization" {
36 | count = var.pattern_config.enable_dashboards ? 1 : 0
37 |
38 | yaml_body = <
11 | ## Requirements
12 |
13 | | Name | Version |
14 | |------|---------|
15 | | [terraform](#requirement\_terraform) | >= 1.1.0 |
16 | | [aws](#requirement\_aws) | >= 4.0.0 |
17 | | [kubectl](#requirement\_kubectl) | >= 2.0.3 |
18 | | [kubernetes](#requirement\_kubernetes) | >= 2.10 |
19 |
20 | ## Providers
21 |
22 | | Name | Version |
23 | |------|---------|
24 | | [aws](#provider\_aws) | >= 4.0.0 |
25 | | [kubectl](#provider\_kubectl) | >= 2.0.3 |
26 |
27 | ## Modules
28 |
29 | No modules.
30 |
31 | ## Resources
32 |
33 | | Name | Type |
34 | |------|------|
35 | | [aws_prometheus_rule_group_namespace.alerting_rules](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/prometheus_rule_group_namespace) | resource |
36 | | [kubectl_manifest.flux_kustomization](https://registry.terraform.io/providers/alekc/kubectl/latest/docs/resources/manifest) | resource |
37 |
38 | ## Inputs
39 |
40 | | Name | Description | Type | Default | Required |
41 | |------|-------------|------|---------|:--------:|
42 | | [pattern\_config](#input\_pattern\_config) | Configuration object for Java/JMX monitoring | object({
enable_alerting_rules = bool
enable_recording_rules = bool
scrape_sample_limit = number
enable_dashboards = bool
flux_gitrepository_name = string
flux_gitrepository_url = string
flux_gitrepository_branch = string
flux_kustomization_name = string
flux_kustomization_path = string
managed_prometheus_workspace_id = string
prometheus_metrics_endpoint = string
grafana_dashboard_url = string
})
| n/a | yes |
43 |
44 | ## Outputs
45 |
46 | No outputs.
47 |
48 |
--------------------------------------------------------------------------------
/modules/eks-monitoring/patterns/nginx/main.tf:
--------------------------------------------------------------------------------
1 | resource "aws_prometheus_rule_group_namespace" "alerting_rules" {
2 | count = var.pattern_config.enable_alerting_rules ? 1 : 0
3 |
4 | name = "accelerator-nginx-alerting"
5 | workspace_id = var.pattern_config.managed_prometheus_workspace_id
6 | data = < 5
12 | for: 1m
13 | labels:
14 | severity: critical
15 | annotations:
16 | summary: Nginx high HTTP 4xx error rate (instance {{ $labels.instance }})
17 | description: "Too many HTTP requests with status 4xx (> 5%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
18 | - name: Nginx-HTTP-5xx-error-rate
19 | rules:
20 | - alert: metric:alerting_rule
21 | expr: sum(rate(nginx_http_requests_total{status=~"^5.."}[1m])) / sum(rate(nginx_http_requests_total[1m])) * 100 > 5
22 | for: 1m
23 | labels:
24 | severity: critical
25 | annotations:
26 | summary: Nginx high HTTP 5xx error rate (instance {{ $labels.instance }})
27 | description: "Too many HTTP requests with status 5xx (> 5%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
28 | - name: Nginx-high-latency
29 | rules:
30 | - alert: metric:alerting_rule
31 | expr: histogram_quantile(0.99, sum(rate(nginx_http_request_duration_seconds_bucket[2m])) by (host, node)) > 3
32 | for: 2m
33 | labels:
34 | severity: warning
35 | annotations:
36 | summary: Nginx latency high (instance {{ $labels.instance }})
37 | description: "Nginx p99 latency is higher than 3 seconds\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
38 | EOF
39 | }
40 |
41 | resource "kubectl_manifest" "flux_kustomization" {
42 | count = var.pattern_config.enable_dashboards ? 1 : 0
43 |
44 | yaml_body = <
14 | ## Requirements
15 |
16 | | Name | Version |
17 | |------|---------|
18 | | [terraform](#requirement\_terraform) | >= 1.1.0, < 1.3.0 |
19 | | [aws](#requirement\_aws) | >= 4.0.0 |
20 | | [grafana](#requirement\_grafana) | >= 1.25.0 |
21 |
22 | ## Providers
23 |
24 | | Name | Version |
25 | |------|---------|
26 | | [aws](#provider\_aws) | >= 4.0.0 |
27 | | [grafana](#provider\_grafana) | >= 1.25.0 |
28 |
29 | ## Modules
30 |
31 | No modules.
32 |
33 | ## Resources
34 |
35 | | Name | Type |
36 | |------|------|
37 | | [aws_cloudwatch_metric_alarm.active-series-metrics](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_metric_alarm) | resource |
38 | | [aws_cloudwatch_metric_alarm.ingestion_rate](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_metric_alarm) | resource |
39 | | [grafana_dashboard.this](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/dashboard) | resource |
40 | | [grafana_data_source.cloudwatch](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source) | resource |
41 |
42 | ## Inputs
43 |
44 | | Name | Description | Type | Default | Required |
45 | |------|-------------|------|---------|:--------:|
46 | | [active\_series\_threshold](#input\_active\_series\_threshold) | Threshold for active series metric alarm | `number` | `1000000` | no |
47 | | [aws\_region](#input\_aws\_region) | AWS Region | `string` | n/a | yes |
48 | | [dashboards\_folder\_id](#input\_dashboards\_folder\_id) | Grafana folder ID for automatic dashboards | `string` | n/a | yes |
49 | | [ingestion\_rate\_threshold](#input\_ingestion\_rate\_threshold) | Threshold for active series metric alarm | `number` | `70000` | no |
50 | | [managed\_prometheus\_workspace\_id](#input\_managed\_prometheus\_workspace\_id) | Amazon Managed Service for Prometheus Workspace ID to create Alarms for | `string` | n/a | yes |
51 |
52 | ## Outputs
53 |
54 | No outputs.
55 |
56 |
--------------------------------------------------------------------------------
/modules/managed-prometheus-monitoring/alarms.tf:
--------------------------------------------------------------------------------
1 | #CloudWatch Alerts on AMP Usage
2 | resource "aws_cloudwatch_metric_alarm" "active_series_metrics" {
3 | for_each = local.amp_list
4 | alarm_name = "active-series-metrics"
5 | comparison_operator = "GreaterThanOrEqualToThreshold"
6 | evaluation_periods = "2"
7 | threshold = var.active_series_threshold
8 | alarm_description = "This metric monitors AMP active series metrics"
9 | insufficient_data_actions = []
10 | metric_query {
11 | id = "m1"
12 | return_data = true
13 | metric {
14 | metric_name = "ResourceCount"
15 | namespace = "AWS/Usage"
16 | period = "120"
17 | stat = "Average"
18 | unit = "None"
19 |
20 | dimensions = {
21 | Type = "Resource"
22 | ResourceId = each.key
23 | Resource = "ActiveSeries"
24 | Service = "Prometheus"
25 | Class = "None"
26 | }
27 | }
28 | }
29 | }
30 |
31 | resource "aws_cloudwatch_metric_alarm" "ingestion_rate" {
32 | for_each = local.amp_list
33 | alarm_name = "ingestion_rate"
34 | comparison_operator = "GreaterThanOrEqualToThreshold"
35 | evaluation_periods = "2"
36 | threshold = var.ingestion_rate_threshold
37 | alarm_description = "This metric monitors AMP ingestion rate"
38 | insufficient_data_actions = []
39 | metric_query {
40 | id = "m1"
41 | return_data = true
42 |
43 | metric {
44 | metric_name = "ResourceCount"
45 | namespace = "AWS/Usage"
46 | period = "120"
47 | stat = "Average"
48 | unit = "None"
49 |
50 | dimensions = {
51 | Type = "Resource"
52 | ResourceId = each.key
53 | Resource = "IngestionRate"
54 | Service = "Prometheus"
55 | Class = "None"
56 | }
57 | }
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/modules/managed-prometheus-monitoring/billing/main.tf:
--------------------------------------------------------------------------------
1 | resource "aws_cloudwatch_metric_alarm" "amp_billing_anomaly_detection" {
2 | alarm_name = "amp_billing_anomaly"
3 | comparison_operator = "GreaterThanUpperThreshold"
4 | evaluation_periods = "2"
5 | threshold_metric_id = "e1"
6 | alarm_description = "This monitors AMP charges and alarms on anomaly detection"
7 | insufficient_data_actions = []
8 |
9 | metric_query {
10 | id = "e1"
11 | expression = "ANOMALY_DETECTION_BAND(m1)"
12 | label = "Expected AMP Charges"
13 | return_data = "true"
14 | }
15 |
16 | metric_query {
17 | id = "m1"
18 | return_data = "true"
19 | metric {
20 | metric_name = "Estimated Charges"
21 | namespace = "AWS/Billing"
22 | period = "21600"
23 | stat = "Maximum"
24 | unit = "Count"
25 |
26 | dimensions = {
27 | ServiceName = "Prometheus"
28 | Currencty = "USD"
29 | }
30 | }
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/modules/managed-prometheus-monitoring/billing/outputs.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-observability/terraform-aws-observability-accelerator/c432af44ee1df1b4ccd654e401922b99cea2ada5/modules/managed-prometheus-monitoring/billing/outputs.tf
--------------------------------------------------------------------------------
/modules/managed-prometheus-monitoring/billing/variables.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-observability/terraform-aws-observability-accelerator/c432af44ee1df1b4ccd654e401922b99cea2ada5/modules/managed-prometheus-monitoring/billing/variables.tf
--------------------------------------------------------------------------------
/modules/managed-prometheus-monitoring/billing/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.0.0"
8 | }
9 | grafana = {
10 | source = "grafana/grafana"
11 | version = ">= 1.25.0"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/modules/managed-prometheus-monitoring/locals.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-observability/terraform-aws-observability-accelerator/c432af44ee1df1b4ccd654e401922b99cea2ada5/modules/managed-prometheus-monitoring/locals.tf
--------------------------------------------------------------------------------
/modules/managed-prometheus-monitoring/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-east-1"
3 | alias = "billing_region"
4 | }
5 |
6 | locals {
7 | name = "aws-observability-accelerator-cloudwatch"
8 | amp_list = toset(split(",", var.managed_prometheus_workspace_ids))
9 | }
10 |
11 | resource "grafana_data_source" "cloudwatch" {
12 | type = "cloudwatch"
13 | name = local.name
14 |
15 | # Giving priority to Managed Prometheus datasources
16 | is_default = false
17 | json_data_encoded = jsonencode({
18 | default_region = var.aws_region
19 | sigv4_auth = true
20 | sigv4_auth_type = "workspace-iam-role"
21 | sigv4_region = var.aws_region
22 | })
23 | }
24 |
25 | data "http" "dashboard" {
26 | url = "https://raw.githubusercontent.com/aws-observability/aws-observability-accelerator/a72787328e493c4628680487e3c885fc395d1c56/artifacts/grafana-dashboards/amp/amp-dashboard.json"
27 |
28 | request_headers = {
29 | Accept = "application/json"
30 | }
31 | }
32 |
33 | resource "grafana_dashboard" "this" {
34 | folder = var.dashboards_folder_id
35 | config_json = data.http.dashboard.response_body
36 | }
37 |
38 | module "billing" {
39 | source = "./billing"
40 | providers = {
41 | aws = aws.billing_region
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/modules/managed-prometheus-monitoring/outputs.tf:
--------------------------------------------------------------------------------
1 | output "grafana_dashboard_urls" {
2 | value = [grafana_dashboard.this.url]
3 | description = "URLs for dashboards created"
4 | }
5 |
--------------------------------------------------------------------------------
/modules/managed-prometheus-monitoring/variables.tf:
--------------------------------------------------------------------------------
1 | variable "aws_region" {
2 | description = "AWS Region"
3 | type = string
4 | }
5 |
6 | variable "managed_prometheus_workspace_ids" {
7 | description = "Amazon Managed Service for Prometheus Workspace ID to create Alarms for"
8 | type = string
9 | }
10 |
11 | variable "active_series_threshold" {
12 | description = "Threshold for active series metric alarm"
13 | type = number
14 | default = 8000000
15 | }
16 |
17 | variable "ingestion_rate_threshold" {
18 | description = "Threshold for active series metric alarm"
19 | type = number
20 | default = 136000
21 | }
22 |
23 | variable "dashboards_folder_id" {
24 | description = "Grafana folder ID for automatic dashboards"
25 | default = "0"
26 | type = string
27 | }
28 |
--------------------------------------------------------------------------------
/modules/managed-prometheus-monitoring/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.1.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.0.0"
8 | }
9 | helm = {
10 | source = "hashicorp/helm"
11 | version = ">= 2.4.1"
12 | }
13 | grafana = {
14 | source = "grafana/grafana"
15 | version = ">= 1.25.0"
16 | }
17 | http = {
18 | source = "hashicorp/http"
19 | version = ">= 3.3.0"
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/test/examples_basic_test.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/gruntwork-io/terratest/modules/terraform"
7 | )
8 |
9 | func TestExamplesBasic(t *testing.T) {
10 |
11 | terraformOptions := &terraform.Options{
12 | TerraformDir: "../examples/basic",
13 | // Vars: map[string]interface{}{
14 | // "myvar": "test",
15 | // "mylistvar": []string{"list_item_1"},
16 | // },
17 | }
18 |
19 | defer terraform.Destroy(t, terraformOptions)
20 | terraform.InitAndApply(t, terraformOptions)
21 | }
22 |
--------------------------------------------------------------------------------
/tfsec.yaml:
--------------------------------------------------------------------------------
1 | exclude:
2 | - aws-observabilitym-no-policy-wildcards # Wildcards required in addon IAM policies
3 | - aws-vpc-no-excessive-port-access # VPC settings left up to user implementation for recommended practices
4 | - aws-vpc-no-public-ingress-acl # VPC settings left up to user implementation for recommended practices
5 | - aws-eks-no-public-cluster-access-to-cidr # Public access enabled for better example usability, users are recommended to disable if possible
6 | - aws-eks-no-public-cluster-access # Public access enabled for better example usability, users are recommended to disable if possible
7 | - aws-eks-encrypt-secrets # Module defaults to encrypting secrets with CMK, but this is not hardcoded and therefore a spurious error
8 | - aws-vpc-no-public-egress-sgr # Added in v1.22
9 |
--------------------------------------------------------------------------------