├── .dockerignore ├── .env.example ├── .github └── workflows │ └── release.yml ├── .gitignore ├── .husky ├── .gitignore └── pre-commit ├── .npmignore ├── .pre-commit-config.yaml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── ami ├── LICENSE ├── Makefile ├── amazon-eks-node-ubuntu2110.json ├── copy-image.sh ├── files │ ├── functions.sh │ └── gitpod │ │ ├── airgap.sh │ │ ├── bootstrap.sh │ │ ├── containerd-stargz-grpc.toml │ │ ├── containerd.toml │ │ ├── kubelet.service │ │ └── sysctl │ │ ├── 11-network-security.conf │ │ ├── 89-gce.conf │ │ └── 99-defaults.conf └── scripts │ ├── shared │ ├── cis-eks.sh │ ├── docker.sh │ └── eks.sh │ └── ubuntu2004 │ ├── boilerplate.sh │ ├── cleanup.sh │ ├── hardening.sh │ └── shiftfs.sh ├── auth-providers-patch.yaml ├── bin └── provision.ts ├── cdk.json ├── eks-cluster.yaml ├── images └── gitpod-login.png ├── jest.config.js ├── lib ├── addons.ts ├── charts │ ├── assets │ │ ├── ingress.yaml │ │ └── jaeger-gitpod.yaml │ ├── cert-manager.ts │ ├── cluster-autoscaler.ts │ ├── cluster-utils.ts │ ├── container-insights.ts │ ├── external-dns.ts │ ├── jaeger.ts │ ├── load-balancer.ts │ ├── metrics-server.ts │ └── utils.ts ├── database.ts ├── gitpod.ts ├── registry.ts ├── services.ts └── setup.ts ├── package.json ├── setup.sh ├── tsconfig.json └── yarn.lock /.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | 3 | cdk.context.json 4 | 5 | .env 6 | gitpod-values.yaml 7 | gitpod-ingress.yaml 8 | 9 | .kubeconfig* 10 | 11 | eks-cluster.yaml 12 | 13 | .git 14 | .husky 15 | .github 16 | 17 | config.json 18 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Base domain 2 | DOMAIN=mygitpod.example.com 3 | 4 | # AWS Certificate Manager certificate 5 | # Setting this value implies TLS termination in the load balancer 6 | CERTIFICATE_ARN=arn:aws:acm:::certificate/ 7 | 8 | # The AWS credentials profile name (optional) 9 | # Leave empty or remove if you only set up the default one 10 | AWS_PROFILE=ekspod 11 | 12 | # The Route53 Zone ID (optional) 13 | # If the DNS domain is managed by and you want to enable external-dns, please set the route53 zone ID 14 | # This enables the update of the DNS records required to get gitpod running using the Ingress rule 15 | # definition as the source of truth. 16 | ROUTE53_ZONEID=XXXXXXXXX 17 | 18 | # The name of the S3 bucket where the container images that gitpod creates are stored 19 | # If there is no value we create a new bucket with the name "container-registry--" 20 | CONTAINER_REGISTRY_BUCKET= 21 | 22 | # The path to the file containing the credentials to pull images from private container registries. 23 | # https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 24 | IMAGE_PULL_SECRET_FILE= 25 | 26 | # List of registries (hostnames) that users get access to by default allowed to be used in base images. 27 | # Default: only images from docker.io 28 | IMAGE_REGISTRY_WHITELIST= 29 | 30 | # Allow to define internal or internet-facing ALB for gitpod proxy component. 31 | # https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/#scheme 32 | # https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/how-elastic-load-balancing-works.html#load-balancer-scheme 33 | USE_INTERNAL_ALB=false 34 | 35 | # Configure custom Availability Zone/s that ALB will route traffic to. 36 | # https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/#subnets 37 | # Default: use auto discovery (empty) 38 | ALB_SUBNETS= 39 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "*" 7 | 8 | jobs: 9 | image: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout Code 13 | uses: actions/checkout@v2 14 | 15 | - name: Login to GitHub Container Registry 16 | uses: docker/login-action@v1 17 | with: 18 | registry: ghcr.io 19 | username: ${{ github.repository_owner }} 20 | password: ${{ secrets.GITHUB_TOKEN }} 21 | 22 | - name: Build and Push Docker Image 23 | uses: docker/build-push-action@v2 24 | with: 25 | push: true 26 | tags: | 27 | ghcr.io/${{ github.repository }}:${{ github.sha }} 28 | ghcr.io/${{ github.repository }}:latest 29 | 30 | - name: Export tgz 31 | run: | 32 | docker save ghcr.io/${{ github.repository }}:latest | gzip > gitpod-eks-guide.tgz 33 | 34 | - name: Release 35 | uses: ncipollo/release-action@v1 36 | if: startsWith(github.ref, 'refs/tags/') 37 | with: 38 | artifacts: gitpod-eks-guide.tgz 39 | allowUpdates: true 40 | artifactErrorsFailBuild: true 41 | token: ${{ secrets.GITHUB_TOKEN }} 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.js 2 | !jest.config.js 3 | *.d.ts 4 | node_modules 5 | 6 | # CDK asset staging directory 7 | .cdk.staging 8 | cdk.out 9 | 10 | cdk.context.json 11 | 12 | package-lock.json 13 | 14 | gitpod-ingress.yaml 15 | .env 16 | 17 | .kubeconfig* 18 | 19 | logs 20 | 21 | config.json 22 | -------------------------------------------------------------------------------- /.husky/.gitignore: -------------------------------------------------------------------------------- 1 | _ 2 | -------------------------------------------------------------------------------- /.husky/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . "$(dirname "$0")/_/husky.sh" 3 | 4 | npm test 5 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | *.ts 2 | !*.d.ts 3 | 4 | # CDK asset staging directory 5 | .cdk.staging 6 | cdk.out 7 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fail_fast: true 3 | 4 | repos: 5 | # Generic repos 6 | - repo: https://github.com/pre-commit/pre-commit-hooks 7 | rev: v4.0.0 8 | hooks: 9 | - id: check-yaml 10 | args: [--allow-multiple-documents] 11 | - id: end-of-file-fixer 12 | - id: trailing-whitespace 13 | # This prevents issues if not managing git-lfs 14 | - id: check-added-large-files 15 | # This prevents problems for multi OS 16 | - id: check-case-conflict 17 | # This prevents having executables that cannot be executed 18 | - id: check-executables-have-shebangs 19 | # Check JSON, XML and YAML 20 | - id: check-json 21 | # Prevents commiting unmergeable files 22 | - id: check-merge-conflict 23 | # Removes spaces at end of lines 24 | - id: trailing-whitespace 25 | args: [--markdown-linebreak-ext=md] 26 | # AWS Creds 27 | - id: detect-aws-credentials 28 | args: [--allow-missing-credentials] 29 | # Prevents commiting to master 30 | # - id: no-commit-to-branch 31 | 32 | # Change tabs for spaces 33 | - repo: https://github.com/Lucas-C/pre-commit-hooks 34 | rev: v1.1.9 35 | hooks: 36 | - id: remove-tabs 37 | args: [--whitespaces-count, "2"] 38 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:edge 2 | 3 | RUN apk add --no-cache \ 4 | bash \ 5 | curl \ 6 | nodejs \ 7 | python3 \ 8 | py3-pip \ 9 | yarn \ 10 | jq \ 11 | npm \ 12 | yq \ 13 | openssl \ 14 | && pip3 install --upgrade pip \ 15 | && pip3 install \ 16 | awscli \ 17 | && rm -rf /root/.cache 18 | 19 | RUN curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.12.0/yq_linux_amd64 -o /usr/local/bin/yq \ 20 | && chmod +x /usr/local/bin/yq 21 | 22 | RUN curl -fsSL https://github.com/weaveworks/eksctl/releases/download/v0.100.0/eksctl_Linux_amd64.tar.gz | tar -xz -C /usr/local/bin 23 | 24 | RUN curl -fsSL "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" -o /usr/local/bin/kubectl \ 25 | && chmod +x /usr/local/bin/kubectl 26 | 27 | WORKDIR /gitpod 28 | 29 | COPY . /gitpod 30 | 31 | RUN yarn --pure-lockfile --non-interactive \ 32 | && rm -rf /usr/local/share/.cache/yarn 33 | 34 | RUN npm install -g aws-cdk ts-node 35 | 36 | ENTRYPOINT ["/gitpod/setup.sh"] 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Gitpod GmbH 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL:=help 2 | 3 | # set default shell 4 | SHELL=/bin/bash -o pipefail -o errexit 5 | 6 | IMG=ghcr.io/gitpod-io/gitpod-eks-guide:latest 7 | 8 | # load .env file 9 | ifneq (,$(wildcard ./.env)) 10 | include .env 11 | export 12 | endif 13 | 14 | ifneq ($(IMAGE_PULL_SECRET_FILE),) 15 | IMAGE_PULL_SECRET=--volume $(shell realpath ${IMAGE_PULL_SECRET_FILE}):/gitpod/config.json 16 | else 17 | IMAGE_PULL_SECRET= 18 | endif 19 | 20 | build: ## Build docker image containing the required tools for the installation 21 | @docker build . -t ${IMG} 22 | @mkdir -p ${PWD}/logs 23 | 24 | DOCKER_RUN_CMD = docker run -it \ 25 | --env-file ${PWD}/.env \ 26 | --env NODE_ENV=production \ 27 | --volume ${PWD}/.kubeconfig:/gitpod/.kubeconfig \ 28 | $(IMAGE_PULL_SECRET) \ 29 | --volume ${PWD}/eks-cluster.yaml:/gitpod/eks-cluster.yaml \ 30 | --volume ${PWD}/logs:/root/.npm/_logs \ 31 | --volume ${PWD}/gitpod-config.yaml:/gitpod/gitpod-config.yaml \ 32 | --volume ${PWD}/cdk-outputs.json:/gitpod/cdk-outputs.json \ 33 | --volume ${HOME}/.aws:/root/.aws \ 34 | ${IMG} $(1) 35 | 36 | install: ## Install Gitpod 37 | @echo "Starting install process..." 38 | @touch ${PWD}/.kubeconfig 39 | @touch ${PWD}/gitpod-config.yaml 40 | @touch ${PWD}/cdk-outputs.json 41 | @$(call DOCKER_RUN_CMD, --install) 42 | 43 | uninstall: ## Uninstall Gitpod 44 | @echo "Starting uninstall process..." 45 | @$(call DOCKER_RUN_CMD, --uninstall) 46 | 47 | auth: ## Install OAuth providers 48 | @echo "Installing auth providers..." 49 | @$(call DOCKER_RUN_CMD, --auth) 50 | 51 | help: ## Display this help 52 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 53 | 54 | .PHONY: build install uninstall auth help 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ## 📣 [IMPORTANT] This repo is being deprecated in favor of the [single cluster reference architecture](https://www.gitpod.io/docs/self-hosted/latest/reference-architecture/single-cluster-ref-arch) and the corresponding [Terraform config](https://github.com/gitpod-io/gitpod/tree/main/install/infra/single-cluster/aws). 3 | 4 | **What?** 5 | 6 | We are deprecating this guide in favor of our [reference architectures](https://www.gitpod.io/docs/self-hosted/latest/reference-architecture) (specifically the [single cluster variant](https://www.gitpod.io/docs/self-hosted/latest/reference-architecture/single-cluster-ref-arch)) that include both a guided walk-through and a `Terraform` configuration. 7 | 8 | **Why?** 9 | 10 | From your feedback, we’ve learned that the guide has several shortcomings: 11 | 12 | - It is not obvious what the guide does: it is more a black box than a sensible starting point for creating the infrastructure that works for you. 13 | - One size fits all: it was not flexible enough if you wish to customize the infrastructure being created. 14 | - No incremental upgrades: If a version of a component changes, you’d have to recreate the infrastructure. 15 | 16 | Due to the feedback above we’ve decided to move to a more open and industry-standard way of speaking about the recommended infrastructure in the form of our new [reference architectures](https://www.gitpod.io/docs/self-hosted/latest/reference-architecture/single-cluster-ref-arch). These are descriptions of what the ideal infrastructure for Gitpod looks like depending on your circumstances. They include both a text version as well as a Terraform configuration that helps you create this infrastructure automatically - similarly to this guide. We believe these provide the following benefits: 17 | 18 | - They are based on a popular `Infrastructure as Code (IaC)` solution (`Terraform`), which should facilitate maintenance for you (and us) via features such as incremental upgrades. 19 | - They are easier to parse, as they are configuration files rather than a script. This should make customizations easier. 20 | - They provide a detailed walkthrough for those that do not want to use Terraform. 21 | - We already leverage these in our nightly testing to provide further validation and reliability of them when used to run Gitpod. 22 | 23 | **Impact?** 24 | 25 | Going forward, Gitpod will only officially support the [reference architectures](https://www.gitpod.io/docs/self-hosted/latest/reference-architecture/single-cluster-ref-arch). If you can, we would advise you to switch towards using these - this would require you to recreate your infrastructure using the new Terraform configurations or guide. Staying on infrastructure created by this guide *should* work going forward, however, we cannot guarantee this in perpetuity. 26 | 27 | —> The Reference Architectures are still in `beta` or `alpha` while we gather more feedback. Please do reach out to us on Discord or via [support](https://www.gitpod.io/support) with any problems or feedback. 28 | 29 | ------ 30 | ## Running Gitpod in [Amazon EKS](https://aws.amazon.com/en/eks/) 31 | 32 | > **IMPORTANT** This guide exists as a simple and reliable way of creating required AWS infrastructure. It 33 | > is not designed to cater for every situation. If you find that it does not meet your exact needs, 34 | > please fork this guide and amend it to your own needs. 35 | 36 | This guide exists as a simple and reliable way of creating an environment in AWS (EKS) that [Gitpod can 37 | be installed](https://www.gitpod.io/docs/self-hosted/latest/getting-started#step-4-install-gitpod) into. Upon completion, it will print the config for the resources created (including passwords) and create the necessary credential files that will allow you to connect the components created to your Gitpod instance during the [next installation step](https://www.gitpod.io/docs/self-hosted/latest/getting-started#step-4-install-gitpod). 38 | 39 | ### Provision an EKS cluster 40 | 41 | Before starting the installation process, you need: 42 | 43 | - An AWS account with Administrator access 44 | - [Create one now by clicking here](https://aws.amazon.com/getting-started/) 45 | - AWS credentials set up. By default, those configs are present in `$HOME/.aws/`. 46 | - [eksctl](https://eksctl.io/) config file describing the cluster. 47 | - Here is an [eks-cluster.yaml](eks-cluster.yaml) you can use as example. 48 | - A `.env` file with basic details about the environment. 49 | - We provide an example of such file [here](.env.example). 50 | - [Docker](https://docs.docker.com/engine/install/) installed on your machine, or better, a Gitpod workspace :) 51 | 52 | ### SSL Certificate 53 | 54 | Create a public SSL/TLS certificate with [AWS Certificate Manager](https://aws.amazon.com/en/certificate-manager/), 55 | valid for the ``, `*.ws.` and `*.` Domain names. 56 | 57 | Once the certificate is issued and verified, Update the `CERTIFICATE_ARN` field in the `.env` file accordingly. 58 | 59 | ### Choose an Amazon Machine Image (AMI) 60 | 61 | Please update the `ami` field in the [eks-cluster.yaml](eks-cluster.yaml) file with the proper AMI ID for the region of the cluster. 62 | 63 | | Region | AMI | 64 | | ------------ | --------------------- | 65 | | us-west-1 | ami-04e9afc0a981cac90 | 66 | | us-west-2 | ami-009935ddbb32a7f3c | 67 | | eu-west-1 | ami-0f08b4b1a4fd3ebe3 | 68 | | eu-west-2 | ami-05f027fd3d0187541 | 69 | | eu-central-1 | ami-04a8127c830f27712 | 70 | | us-east-1 | ami-076db8ca29c04327b | 71 | | us-east-2 | ami-0ad574da759c55c17 | 72 | 73 | **To start the installation, execute:** 74 | 75 | ```shell 76 | make install 77 | ``` 78 | 79 | **Important: DNS propagation can take several minutes until the configured domain is available!** 80 | 81 | The whole process takes around forty minutes. In the end, the following resources are created: 82 | 83 | - an EKS cluster running Kubernetes v1.21 84 | - Kubernetes nodes using a custom [AMI image](https://github.com/gitpod-io/amazon-eks-custom-amis/tree/gitpod): 85 | - Ubuntu 21.10 86 | - Linux kernel v5.13 87 | - containerd v1.5.8 88 | - runc: v1.0.1 89 | - CNI plugins: v0.9.1 90 | - Stargz Snapshotter: v0.10.0 91 | 92 | - ALB load balancer with TLS termination and re-encryption 93 | - RDS Mysql database 94 | - Two autoscaling groups, one for gitpod components and another for workspaces 95 | - In-cluster docker registry using S3 as storage backend 96 | - IAM account with S3 access (docker-registry and gitpod user content) 97 | - [calico](https://docs.projectcalico.org) as CNI and NetworkPolicy implementation 98 | - [cert-manager](https://cert-manager.io/) for self-signed SSL certificates 99 | - [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) 100 | - [Jaeger operator](https://github.com/jaegertracing/helm-charts/tree/main/charts/jaeger-operator) - and Jaeger deployment for gitpod distributed tracing 101 | - [metrics-server](https://github.com/kubernetes-sigs/metrics-server) 102 | - A public DNS zone managed by Route53 (if `ROUTE53_ZONEID` env variable is configured) 103 | 104 | 105 | ## Update Gitpod auth providers 106 | 107 | Please check the [OAuth providers integration documentation](https://www.gitpod.io/docs/self-hosted/latest/configuration/authentication) expected format. 108 | 109 | We provide an [example here](./auth-providers-patch.yaml). Fill it with your OAuth providers data. 110 | 111 | ```console 112 | make auth 113 | ``` 114 | 115 | > We are aware of the limitation of this approach, and we are working to improve the helm chart to avoid this step. 116 | 117 | ## Destroy the cluster and AWS resources 118 | 119 | Remove Cloudformation stacks and EKS cluster running: 120 | 121 | ```shell 122 | make uninstall 123 | ``` 124 | 125 | > The command asks for a confirmation: 126 | > `Are you sure you want to delete: Gitpod, Services/Registry, Services/RDS, Services, Addons, Setup (y/n)?` 127 | 128 | > Please make sure you delete the S3 bucket used to store the docker registry images! 129 | -------------------------------------------------------------------------------- /ami/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 10 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 11 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 12 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 13 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | 16 | -------------------------------------------------------------------------------- /ami/Makefile: -------------------------------------------------------------------------------- 1 | 2 | PACKER_VARIABLES := binary_bucket_name binary_bucket_region eks_version eks_build_date root_volume_size data_volume_size hardening_flag http_proxy https_proxy no_proxy 3 | VPC_ID := vpc-0e8cf1ce122b1b059 4 | SUBNET_ID := subnet-0eddf1d7d0f9f9772 5 | AWS_REGION := us-west-2 6 | PACKER_FILE := 7 | 8 | EKS_BUILD_DATE := 2020-11-02 9 | EKS_120_VERSION := 1.20.4 10 | EKS_121_VERSION := 1.21.2 11 | 12 | build: 13 | packer build \ 14 | --var 'aws_region=$(AWS_REGION)' \ 15 | $(foreach packerVar,$(PACKER_VARIABLES), $(if $($(packerVar)),--var $(packerVar)='$($(packerVar))',)) \ 16 | $(PACKER_FILE) 17 | 18 | # Ubuntu 21.10 19 | #----------------------------------------------------- 20 | # https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html 21 | 22 | build-ubuntu2110-1.20: 23 | $(MAKE) build PACKER_FILE=amazon-eks-node-ubuntu2110.json eks_version=$(EKS_120_VERSION) eks_build_date=2021-04-12 24 | 25 | build-ubuntu2110-1.21: 26 | $(MAKE) build PACKER_FILE=amazon-eks-node-ubuntu2110.json eks_version=$(EKS_121_VERSION) eks_build_date=2021-07-05 27 | -------------------------------------------------------------------------------- /ami/amazon-eks-node-ubuntu2110.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables":{ 3 | "aws_region":"us-west-2", 4 | "ami_description":"EKS Kubernetes Worker AMI on Ubuntu 21.10 (k8s: {{user `eks_version`}})", 5 | "eks_version":"", 6 | "eks_build_date":"", 7 | "binary_bucket_name": "amazon-eks", 8 | "binary_bucket_region": "us-west-2", 9 | "hardening_flag": "false", 10 | "root_volume_size": "30", 11 | "data_volume_size": "10", 12 | "vpc_id":"", 13 | "subnet_id":"", 14 | "http_proxy": "", 15 | "https_proxy": "", 16 | "no_proxy": "", 17 | 18 | "source_ami_owner": "099720109477", 19 | "source_ami_owner_govcloud": "513442679011", 20 | "source_ami_ssh_user": "ubuntu", 21 | "source_ami_arch":"x86_64", 22 | "source_ami_name":"ubuntu/images/hvm-ssd/ubuntu-impish-21.10-amd64-server-*", 23 | "target_ami_name": "amazon-eks-node-{{user `eks_version`}}-ubuntu-21.10-{{ timestamp }}" 24 | }, 25 | "builders":[ 26 | { 27 | "type":"amazon-ebs", 28 | "region":"{{user `aws_region`}}", 29 | "source_ami_filter":{ 30 | "filters":{ 31 | "name":"{{user `source_ami_name`}}", 32 | "root-device-type":"ebs", 33 | "state":"available", 34 | "virtualization-type":"hvm", 35 | "architecture":"{{user `source_ami_arch`}}" 36 | }, 37 | "owners":[ 38 | "{{user `source_ami_owner`}}", 39 | "{{user `source_ami_owner_govcloud`}}" 40 | ], 41 | "most_recent":true 42 | }, 43 | "instance_type":"m6i.xlarge", 44 | "ssh_username":"{{user `source_ami_ssh_user`}}", 45 | "ssh_pty":true, 46 | "subnet_id":"{{user `subnet_id`}}", 47 | "launch_block_device_mappings":[ 48 | { 49 | "device_name":"/dev/sda1", 50 | "volume_size": "{{user `root_volume_size`}}", 51 | "volume_type":"gp3", 52 | "delete_on_termination":true 53 | }, 54 | { 55 | "device_name":"/dev/sdb", 56 | "volume_size":"{{user `data_volume_size`}}", 57 | "volume_type":"gp3", 58 | "delete_on_termination":true 59 | } 60 | ], 61 | "ami_block_device_mappings":[ 62 | { 63 | "device_name":"/dev/sdb", 64 | "volume_size":"{{user `data_volume_size`}}", 65 | "volume_type":"gp3", 66 | "delete_on_termination":true 67 | } 68 | ], 69 | "tags":{ 70 | "Name":"{{user `target_ami_name`}}", 71 | "BuildDate":"{{ isotime }}" 72 | }, 73 | "ami_name":"{{user `target_ami_name`}}", 74 | "ami_description":"{{user `ami_description` }}", 75 | "ami_virtualization_type":"hvm", 76 | "run_tags":{ 77 | "Name":"packer-{{user `target_ami_name`}}" 78 | } 79 | } 80 | ], 81 | "provisioners":[ 82 | { 83 | "type": "shell", 84 | "inline": [ 85 | "mkdir -p /etc/packer/files", 86 | "chown -R {{user `source_ami_ssh_user`}}:{{user `source_ami_ssh_user`}} /etc/packer/files" 87 | ], 88 | "execute_command":"echo 'packer' | {{.Vars}} sudo -S -E bash -eux '{{.Path}}'" 89 | }, 90 | { 91 | "type": "file", 92 | "source": "./files/", 93 | "destination": "/etc/packer/files" 94 | }, 95 | { 96 | "type":"shell", 97 | "scripts": [ 98 | "./scripts/ubuntu2004/boilerplate.sh" 99 | ], 100 | "environment_vars": [ 101 | "HTTP_PROXY={{user `http_proxy`}}", 102 | "HTTPS_PROXY={{user `https_proxy`}}", 103 | "NO_PROXY={{user `no_proxy`}}", 104 | "KUBERNETES_VERSION={{user `eks_version`}}", 105 | "KUBERNETES_BUILD_DATE={{user `eks_build_date`}}", 106 | "BINARY_BUCKET_NAME={{user `binary_bucket_name`}}", 107 | "BINARY_BUCKET_REGION={{user `binary_bucket_region`}}", 108 | "HARDENING_FLAG={{user `hardening_flag`}}" 109 | ], 110 | "execute_command":"echo 'packer' | {{.Vars}} sudo -S -E bash -eux '{{.Path}}'", 111 | "expect_disconnect":true, 112 | "pause_after":"30s" 113 | }, 114 | { 115 | "type":"shell", 116 | "scripts":[ 117 | "./scripts/shared/docker.sh", 118 | "./scripts/shared/eks.sh", 119 | "./scripts/shared/cis-eks.sh" 120 | ], 121 | "environment_vars": [ 122 | "HTTP_PROXY={{user `http_proxy`}}", 123 | "HTTPS_PROXY={{user `https_proxy`}}", 124 | "NO_PROXY={{user `no_proxy`}}", 125 | "KUBERNETES_VERSION={{user `eks_version`}}", 126 | "KUBERNETES_BUILD_DATE={{user `eks_build_date`}}", 127 | "BINARY_BUCKET_NAME={{user `binary_bucket_name`}}", 128 | "BINARY_BUCKET_REGION={{user `binary_bucket_region`}}", 129 | "HARDENING_FLAG={{user `hardening_flag`}}" 130 | ], 131 | "execute_command":"echo 'packer' | {{.Vars}} sudo -S -E bash -eux '{{.Path}}'", 132 | "expect_disconnect":true, 133 | "pause_after":"30s" 134 | }, 135 | { 136 | "type":"shell", 137 | "scripts":[ 138 | "./scripts/ubuntu2004/shiftfs.sh" 139 | ], 140 | "execute_command":"echo 'packer' | {{.Vars}} sudo -S -E bash -eux '{{.Path}}'", 141 | "expect_disconnect":true, 142 | "pause_after":"30s" 143 | }, 144 | { 145 | "type":"shell", 146 | "scripts": [ 147 | "./scripts/ubuntu2004/hardening.sh", 148 | "./scripts/ubuntu2004/cleanup.sh" 149 | ], 150 | "environment_vars": [ 151 | "HTTP_PROXY={{user `http_proxy`}}", 152 | "HTTPS_PROXY={{user `https_proxy`}}", 153 | "NO_PROXY={{user `no_proxy`}}", 154 | "KUBERNETES_VERSION={{user `eks_version`}}", 155 | "KUBERNETES_BUILD_DATE={{user `eks_build_date`}}", 156 | "BINARY_BUCKET_NAME={{user `binary_bucket_name`}}", 157 | "BINARY_BUCKET_REGION={{user `binary_bucket_region`}}", 158 | "HARDENING_FLAG={{user `hardening_flag`}}" 159 | ], 160 | "execute_command":"echo 'packer' | {{.Vars}} sudo -S -E bash -eux '{{.Path}}'" 161 | } 162 | ] 163 | } 164 | -------------------------------------------------------------------------------- /ami/copy-image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SOURCE_AMI=$1 4 | if [ -z "$SOURCE_AMI" ]; then 5 | echo -e "Please provider a valid AMI image" 6 | exit 1 7 | fi 8 | 9 | set -euo pipefail 10 | 11 | SOURCE_REGION=us-west-2 12 | TARGET_REGIONS=( 13 | us-west-1 14 | eu-west-1 15 | eu-west-2 16 | eu-central-1 17 | us-east-1 18 | us-east-2 19 | ) 20 | 21 | if ! aws ec2 describe-images --region us-west-2 --image-ids "${SOURCE_AMI}" >/dev/null 2>&1; then 22 | echo "The AMI image with ID ${SOURCE_AMI} does not exist." 23 | exit 1 24 | fi 25 | 26 | NAME=$(aws ec2 describe-images --region us-west-2 --image-ids "${SOURCE_AMI}" --query 'Images[*].[Name]' --output text) 27 | 28 | for TO_REGION in ${TARGET_REGIONS[*]};do 29 | aws ec2 copy-image \ 30 | --name "$NAME" \ 31 | --source-image-id "${SOURCE_AMI}" \ 32 | --source-region "${SOURCE_REGION}" \ 33 | --region "${TO_REGION}" \ 34 | --output text 35 | done 36 | -------------------------------------------------------------------------------- /ami/files/functions.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ################################################################ 4 | # Wait for the cloud-init process to finish before moving 5 | # to the next step. 6 | # 7 | # Globals: 8 | # None 9 | # Arguments: 10 | # None 11 | # Outputs: 12 | # 0 finishes when the cloud-init process is complete 13 | ################################################################ 14 | wait_for_cloudinit() { 15 | cloud-init status --wait 16 | } 17 | 18 | get_arch() { 19 | local machine_arch=$(uname -m) 20 | 21 | if [ "$machine_arch" == "x86_64" ]; then 22 | echo "amd64" 23 | elif [ "$machine_arch" == "aarch64" ]; then 24 | echo "arm64" 25 | else 26 | echo "Unknown machine architecture '$MACHINE'" >&2 27 | exit 1 28 | fi 29 | } 30 | 31 | ################################################################ 32 | # Install the AWS CLI based on the CPU architecture. 33 | # 34 | # Globals: 35 | # None 36 | # Arguments: 37 | # None 38 | # Outputs: 39 | # 0 after a successful installation 40 | ################################################################ 41 | install_awscliv2() { 42 | local awscli_package_name="awscliv2.zip" 43 | 44 | # download the awscli package from aws 45 | curl -sL -o $awscli_package_name "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" 46 | 47 | # unzip the package 48 | unzip $awscli_package_name 49 | 50 | # install the aws cli package 51 | ./aws/install -i /usr/local/aws-cli -b /usr/bin 52 | 53 | # cleanup the installer 54 | rm -f $awscli_package_name 55 | } 56 | 57 | ################################################################ 58 | # Test if it is Amazon Linux 2 59 | # 60 | # Globals: 61 | # None 62 | # Arguments: 63 | # None 64 | # Outputs: 65 | # 0 - true 66 | # 1 - false 67 | ################################################################ 68 | is_amazonlinux2() { 69 | [[ $(lsb_release -sd) == "\"Amazon Linux release 2"* ]] 70 | } 71 | 72 | ################################################################ 73 | # Test if it is Ubuntu based released 74 | # 75 | # Globals: 76 | # None 77 | # Arguments: 78 | # None 79 | # Outputs: 80 | # 0 - true 81 | # 1 - false 82 | ################################################################ 83 | is_ubuntu() { 84 | [[ $(lsb_release -sd) == "Ubuntu"* ]] 85 | } 86 | 87 | ################################################################ 88 | # Test if it is Ubuntu 18.04 89 | # 90 | # Globals: 91 | # None 92 | # Arguments: 93 | # None 94 | # Outputs: 95 | # 0 - true 96 | # 1 - false 97 | ################################################################ 98 | is_ubuntu_18() { 99 | [[ $(lsb_release -sd) == "Ubuntu 18.04"* ]] 100 | } 101 | 102 | ################################################################ 103 | # Test if it is Ubuntu 20.04 104 | # 105 | # Globals: 106 | # None 107 | # Arguments: 108 | # None 109 | # Outputs: 110 | # 0 - true 111 | # 1 - false 112 | ################################################################ 113 | is_ubuntu_20() { 114 | [[ $(lsb_release -sd) = "Ubuntu 20.04"* ]] 115 | } 116 | 117 | ################################################################ 118 | # Test if it is Red Hat Enterprise Linux 119 | # 120 | # Globals: 121 | # None 122 | # Arguments: 123 | # None 124 | # Outputs: 125 | # 0 - true 126 | # 1 - false 127 | ################################################################ 128 | is_rhel() { 129 | [[ $(lsb_release -sd) == "\"Red Hat"* ]] 130 | } 131 | 132 | ################################################################ 133 | # Test if it is Red Hat Enterprise Linux 7 134 | # 135 | # Globals: 136 | # None 137 | # Arguments: 138 | # None 139 | # Outputs: 140 | # 0 - true 141 | # 1 - false 142 | ################################################################ 143 | is_rhel_7() { 144 | [[ $(lsb_release -sd) == "\"Red Hat Enterprise Linux Server release 7"* ]] 145 | } 146 | 147 | ################################################################ 148 | # Test if it is Red Hat Enterprise Linux 8 149 | # 150 | # Globals: 151 | # None 152 | # Arguments: 153 | # None 154 | # Outputs: 155 | # 0 - true 156 | # 1 - false 157 | ################################################################ 158 | is_rhel_8() { 159 | [[ $(lsb_release -sd) == "\"Red Hat Enterprise Linux release 8"* ]] 160 | } 161 | 162 | ################################################################ 163 | # Test if it is CentOS based release 164 | # 165 | # Globals: 166 | # None 167 | # Arguments: 168 | # None 169 | # Outputs: 170 | # 0 - true 171 | # 1 - false 172 | ################################################################ 173 | is_centos() { 174 | [[ $(lsb_release -sd) == "\"CentOS"* ]] 175 | } 176 | 177 | ################################################################ 178 | # Test if it is CentOS 7 release 179 | # 180 | # Globals: 181 | # None 182 | # Arguments: 183 | # None 184 | # Outputs: 185 | # 0 - true 186 | # 1 - false 187 | ################################################################ 188 | is_centos_7() { 189 | [[ $(lsb_release -sd) == "\"CentOS Linux release 7"* ]] 190 | } 191 | 192 | ################################################################ 193 | # Test if it is CentOS 8 release 194 | # 195 | # Globals: 196 | # None 197 | # Arguments: 198 | # None 199 | # Outputs: 200 | # 0 - true 201 | # 1 - false 202 | ################################################################ 203 | is_centos_8() { 204 | [[ $(lsb_release -sd) == "\"CentOS Linux release 8"* ]] 205 | } 206 | 207 | ################################################################ 208 | # Install the AWS SSM agent based on the operating system 209 | # 210 | # Globals: 211 | # None 212 | # Arguments: 213 | # None 214 | # Outputs: 215 | # 0 after a successful installation 216 | ################################################################ 217 | install_ssmagent() { 218 | if is_ubuntu; then 219 | snap install amazon-ssm-agent --classic 220 | systemctl enable snap.amazon-ssm-agent.amazon-ssm-agent.service 221 | systemctl start snap.amazon-ssm-agent.amazon-ssm-agent.service 222 | else 223 | yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm 224 | systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent 225 | fi 226 | 227 | } 228 | 229 | ################################################################ 230 | # Install the OpenSCAP based on the operating system 231 | # 232 | # Globals: 233 | # None 234 | # Arguments: 235 | # None 236 | # Outputs: 237 | # 0 after a successful installation 238 | ################################################################ 239 | install_openscap() { 240 | if is_rhel || is_centos; then 241 | yum install -y openscap openscap-scanner scap-security-guide 242 | elif is_amazonlinux2; then 243 | yum install -y openscap openscap-scanner scap-security-guide 244 | elif is_ubuntu; then 245 | apt-get install -y libopenscap8 ssg-debian ssg-debderived 246 | else 247 | echo "failed to install the openscap libraries" 248 | exit 1 249 | fi 250 | } 251 | 252 | ################################################################ 253 | # Install jq based on the operating system 254 | # 255 | # Globals: 256 | # None 257 | # Arguments: 258 | # None 259 | # Outputs: 260 | # None 261 | ################################################################ 262 | install_jq() { 263 | curl -sL -o /usr/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 264 | chmod +x /usr/bin/jq 265 | } 266 | 267 | ################################################################ 268 | # Install iptables-restore service 269 | # 270 | # Globals: 271 | # None 272 | # Arguments: 273 | # None 274 | # Outputs: 275 | # None 276 | ################################################################ 277 | install_iptables_restore() { 278 | mkdir -p /etc/sysconfig 279 | bash -c "/sbin/iptables-save > /etc/sysconfig/iptables" 280 | curl -sL -o /etc/systemd/system/iptables-restore.service https://raw.githubusercontent.com/awslabs/amazon-eks-ami/master/files/iptables-restore.service 281 | systemctl daemon-reload && systemctl enable iptables-restore 282 | } 283 | 284 | ################################################################ 285 | # Generate the OpenSCAP fix shell script to harden to the 286 | # operating system 287 | # 288 | # Globals: 289 | # None 290 | # Arguments: 291 | # 1 - the openscap data source file path 292 | # 2 - the openscap profile name 293 | # 3 - (optional) the openscap tailoring file 294 | # Outputs: 295 | # None 296 | ################################################################ 297 | oscap_generate_fix() { 298 | local oscap_source=$1 299 | local oscap_profile=$2 300 | local oscap_tailoring_file=${3:-} 301 | 302 | # install openscap dependencies 303 | install_openscap 304 | 305 | # check if the tailoring file is provided 306 | if [ ! -z "${oscap_tailoring_file}" ]; then 307 | 308 | oscap xccdf generate fix \ 309 | --output /etc/packer/hardening.sh \ 310 | --tailoring-file $oscap_tailoring_file \ 311 | --profile $oscap_profile \ 312 | --fetch-remote-resources $oscap_source 313 | 314 | else 315 | 316 | oscap xccdf generate fix \ 317 | --output /etc/packer/hardening.sh \ 318 | --profile $oscap_profile \ 319 | --fetch-remote-resources $oscap_source 320 | fi 321 | } 322 | 323 | ################################################################ 324 | # Migrate existing folder to a new partition 325 | # 326 | # Globals: 327 | # None 328 | # Arguments: 329 | # 1 - the path of the disk or partition 330 | # 2 - the folder path to migration 331 | # 3 - the mount options to use. 332 | # Outputs: 333 | # None 334 | ################################################################ 335 | migrate_and_mount_disk() { 336 | local disk_name=$1 337 | local folder_path=$2 338 | local mount_options=$3 339 | local temp_path="/mnt${folder_path}" 340 | local old_path="${folder_path}-old" 341 | 342 | # install an xfs filesystem to the disk 343 | mkfs -t xfs ${disk_name} 344 | 345 | # check if the folder already exists 346 | if [ -d "${folder_path}" ]; then 347 | mkdir -p ${temp_path} 348 | mount ${disk_name} ${temp_path} 349 | cp -Rax ${folder_path}/* ${temp_path} 350 | mv ${folder_path} ${old_path} 351 | umount ${disk_name} 352 | fi 353 | 354 | # create the folder 355 | mkdir -p ${folder_path} 356 | 357 | # add the mount point to fstab and mount the disk 358 | echo "UUID=$(blkid -s UUID -o value "${disk_name}") ${folder_path} xfs ${mount_options} 0 1" >> /etc/fstab 359 | mount -a 360 | } 361 | 362 | ################################################################ 363 | # Partition the disks based on the standard layout for common 364 | # hardening frameworks 365 | # 366 | # Globals: 367 | # None 368 | # Arguments: 369 | # 1 - the name of the disk 370 | # Outputs: 371 | # None 372 | ################################################################ 373 | partition_disks() { 374 | local disk_name=$1 375 | 376 | # partition the disk 377 | parted -a optimal -s "$disk_name" \ 378 | mklabel gpt \ 379 | mkpart home xfs 0% 90% 380 | 381 | # wait for the disks to settle 382 | sleep 5 383 | 384 | # migrate and mount the existing 385 | migrate_and_mount_disk "${disk_name}p1" /home defaults,nofail,nodev,nosuid 386 | } 387 | 388 | ################################################################ 389 | # Configure the host with HTTP_PROXY, HTTPS_PROXY, and NO_PROXY 390 | # by setting values in /etc/environment 391 | # 392 | # Globals: 393 | # None 394 | # Arguments: 395 | # None 396 | # Outputs: 397 | # None 398 | ################################################################ 399 | configure_http_proxy() { 400 | touch /etc/environment 401 | 402 | if [ -z "${HTTP_PROXY}" ]; then 403 | echo "http_proxy=${HTTP_PROXY}" >> /etc/environment 404 | echo "HTTP_PROXY=${HTTP_PROXY}" >> /etc/environment 405 | fi 406 | 407 | if [ -z "${HTTPS_PROXY}" ]; then 408 | echo "https_proxy=${HTTPS_PROXY}" >> /etc/environment 409 | echo "HTTPS_PROXY=${HTTPS_PROXY}" >> /etc/environment 410 | fi 411 | 412 | if [ -z "${NO_PROXY}" ]; then 413 | echo "no_proxy=${NO_PROXY}" >> /etc/environment 414 | echo "NO_PROXY=${NO_PROXY}" >> /etc/environment 415 | fi 416 | } 417 | 418 | configure_docker_environment() { 419 | local docker_dir="/etc/systemd/system/docker.service.d" 420 | local docker_env_file="${docker_dir}/environment.conf" 421 | 422 | mkdir -p "${docker_dir}" 423 | echo "[Service]" >> "${docker_env_file}" 424 | echo "EnvironmentFile=/etc/environment" >> "${docker_env_file}" 425 | } 426 | 427 | configure_kubelet_environment() { 428 | local kubelet_dir="/etc/systemd/system/kubelet.service.d" 429 | local kubelet_env_file="${kubelet_dir}/environment.conf" 430 | 431 | mkdir -p "${kubelet_dir}" 432 | echo "[Service]" >> "${kubelet_env_file}" 433 | echo "EnvironmentFile=/etc/environment" >> "${kubelet_env_file}" 434 | } 435 | 436 | ################################################################ 437 | # Enable FIPS 140-2 mode on the operating system 438 | # 439 | # Globals: 440 | # None 441 | # Arguments: 442 | # None 443 | # Outputs: 444 | # None 445 | ################################################################ 446 | enable_fips() { 447 | if is_rhel_7; then 448 | 449 | # install dependencies 450 | yum install -y dracut-fips-aesni dracut-fips 451 | 452 | # we will configure FIPS ourselves as the generated STIG locks the OS 453 | # configure dracut-fips 454 | dracut -f 455 | 456 | # udpate the kernel settings 457 | grubby --update-kernel=ALL --args="fips=1" 458 | 459 | # configure this to meet the stig checker 460 | sed -i "/^GRUB_CMDLINE_LINUX/ s/\"$/ fips=1\"/" /etc/default/grub 461 | 462 | # set the ssh ciphers 463 | sed -i 's/^Cipher.*/Ciphers aes128-ctr,aes192-ctr,aes256-ctr/' /etc/ssh/sshd_config 464 | sed -i 's/^MACs.*/MACs hmac-sha2-256,hmac-sha2-512/' /etc/ssh/sshd_config 465 | 466 | elif is_rhel_8; then 467 | fips-mode-setup --enable 468 | else 469 | echo "FIPS 140-2 is not supported on this operating system." 470 | exit 1 471 | fi 472 | } 473 | -------------------------------------------------------------------------------- /ami/files/gitpod/airgap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail 4 | set -o nounset 5 | set -o errexit 6 | 7 | DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P) 8 | 9 | # Extract images from nodes: 10 | # kubectl get nodes -o json | jq --raw-output '.items[].status.images[].names | .[]' 11 | IMAGES=$(cat "${DIR}/airgap-images.txt") 12 | 13 | # Download images 14 | xargs -n1 nerdctl --namespace k8s.io pull <<< "${IMAGES}" 15 | -------------------------------------------------------------------------------- /ami/files/gitpod/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail 4 | set -o nounset 5 | set -o errexit 6 | 7 | err_report() { 8 | echo "Exited with error on line $1" 9 | } 10 | trap 'err_report $LINENO' ERR 11 | 12 | IFS=$'\n\t' 13 | 14 | function print_help { 15 | echo "usage: $0 [options] " 16 | echo "Bootstraps an instance into an EKS cluster" 17 | echo "" 18 | echo "-h,--help print this help" 19 | echo "--use-max-pods Sets --max-pods for the kubelet when true. (default: true)" 20 | echo "--b64-cluster-ca The base64 encoded cluster CA content. Only valid when used with --apiserver-endpoint. Bypasses calling \"aws eks describe-cluster\"" 21 | echo "--apiserver-endpoint The EKS cluster API Server endpoint. Only valid when used with --b64-cluster-ca. Bypasses calling \"aws eks describe-cluster\"" 22 | echo "--kubelet-extra-args Extra arguments to add to the kubelet. Useful for adding labels or taints." 23 | echo "--enable-docker-bridge Restores the docker default bridge network. (default: false)" 24 | echo "--aws-api-retry-attempts Number of retry attempts for AWS API call (DescribeCluster) (default: 3)" 25 | echo "--docker-config-json The contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI" 26 | echo "--dns-cluster-ip Overrides the IP address to use for DNS queries within the cluster. Defaults to 10.100.0.10 or 172.20.0.10 based on the IP address of the primary interface" 27 | echo "--pause-container-account The AWS account (number) to pull the pause container from" 28 | echo "--pause-container-version The tag of the pause container" 29 | echo "--container-runtime Specify a container runtime (default: dockerd)" 30 | } 31 | 32 | POSITIONAL=() 33 | 34 | while [[ $# -gt 0 ]]; do 35 | key="$1" 36 | case $key in 37 | -h|--help) 38 | print_help 39 | exit 1 40 | ;; 41 | --use-max-pods) 42 | USE_MAX_PODS="$2" 43 | shift 44 | shift 45 | ;; 46 | --b64-cluster-ca) 47 | B64_CLUSTER_CA=$2 48 | shift 49 | shift 50 | ;; 51 | --apiserver-endpoint) 52 | APISERVER_ENDPOINT=$2 53 | shift 54 | shift 55 | ;; 56 | --kubelet-extra-args) 57 | KUBELET_EXTRA_ARGS=$2 58 | shift 59 | shift 60 | ;; 61 | --enable-docker-bridge) 62 | ENABLE_DOCKER_BRIDGE=$2 63 | shift 64 | shift 65 | ;; 66 | --aws-api-retry-attempts) 67 | API_RETRY_ATTEMPTS=$2 68 | shift 69 | shift 70 | ;; 71 | --docker-config-json) 72 | DOCKER_CONFIG_JSON=$2 73 | shift 74 | shift 75 | ;; 76 | --pause-container-account) 77 | PAUSE_CONTAINER_ACCOUNT=$2 78 | shift 79 | shift 80 | ;; 81 | --pause-container-version) 82 | PAUSE_CONTAINER_VERSION=$2 83 | shift 84 | shift 85 | ;; 86 | --dns-cluster-ip) 87 | DNS_CLUSTER_IP=$2 88 | shift 89 | shift 90 | ;; 91 | --container-runtime) 92 | CONTAINER_RUNTIME=$2 93 | shift 94 | shift 95 | ;; 96 | *) # unknown option 97 | POSITIONAL+=("$1") # save it in an array for later 98 | shift # past argument 99 | ;; 100 | esac 101 | done 102 | 103 | set +u 104 | set -- "${POSITIONAL[@]}" # restore positional parameters 105 | CLUSTER_NAME="$1" 106 | set -u 107 | 108 | USE_MAX_PODS="${USE_MAX_PODS:-false}" 109 | B64_CLUSTER_CA="${B64_CLUSTER_CA:-}" 110 | APISERVER_ENDPOINT="${APISERVER_ENDPOINT:-}" 111 | SERVICE_IPV4_CIDR="${SERVICE_IPV4_CIDR:-}" 112 | DNS_CLUSTER_IP="${DNS_CLUSTER_IP:-}" 113 | KUBELET_EXTRA_ARGS="${KUBELET_EXTRA_ARGS:-}" 114 | ENABLE_DOCKER_BRIDGE="${ENABLE_DOCKER_BRIDGE:-false}" 115 | API_RETRY_ATTEMPTS="${API_RETRY_ATTEMPTS:-3}" 116 | DOCKER_CONFIG_JSON="${DOCKER_CONFIG_JSON:-}" 117 | PAUSE_CONTAINER_VERSION="${PAUSE_CONTAINER_VERSION:-3.1-eksbuild.1}" 118 | CONTAINER_RUNTIME="${CONTAINER_RUNTIME:-containerd}" 119 | 120 | function get_pause_container_account_for_region () { 121 | local region="$1" 122 | case "${region}" in 123 | ap-east-1) 124 | echo "${PAUSE_CONTAINER_ACCOUNT:-800184023465}";; 125 | me-south-1) 126 | echo "${PAUSE_CONTAINER_ACCOUNT:-558608220178}";; 127 | cn-north-1) 128 | echo "${PAUSE_CONTAINER_ACCOUNT:-918309763551}";; 129 | cn-northwest-1) 130 | echo "${PAUSE_CONTAINER_ACCOUNT:-961992271922}";; 131 | us-gov-west-1) 132 | echo "${PAUSE_CONTAINER_ACCOUNT:-013241004608}";; 133 | us-gov-east-1) 134 | echo "${PAUSE_CONTAINER_ACCOUNT:-151742754352}";; 135 | af-south-1) 136 | echo "${PAUSE_CONTAINER_ACCOUNT:-877085696533}";; 137 | eu-south-1) 138 | echo "${PAUSE_CONTAINER_ACCOUNT:-590381155156}";; 139 | *) 140 | echo "${PAUSE_CONTAINER_ACCOUNT:-602401143452}";; 141 | esac 142 | } 143 | 144 | function _get_token() { 145 | local token_result= 146 | local http_result= 147 | 148 | token_result=$(curl -s -w "\n%{http_code}" -X PUT -H "X-aws-ec2-metadata-token-ttl-seconds: 600" "http://169.254.169.254/latest/api/token") 149 | http_result=$(echo "$token_result" | tail -n 1) 150 | if [[ "$http_result" != "200" ]] 151 | then 152 | echo -e "Failed to get token:\n$token_result" 153 | return 1 154 | else 155 | echo "$token_result" | head -n 1 156 | return 0 157 | fi 158 | } 159 | 160 | function get_token() { 161 | local token= 162 | local retries=20 163 | local result=1 164 | 165 | while [[ retries -gt 0 && $result -ne 0 ]] 166 | do 167 | retries=$[$retries-1] 168 | token=$(_get_token) 169 | result=$? 170 | [[ $result != 0 ]] && sleep 5 171 | done 172 | [[ $result == 0 ]] && echo "$token" 173 | return $result 174 | } 175 | 176 | function _get_meta_data() { 177 | local path=$1 178 | local metadata_result= 179 | 180 | metadata_result=$(curl -s -w "\n%{http_code}" -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/$path) 181 | http_result=$(echo "$metadata_result" | tail -n 1) 182 | if [[ "$http_result" != "200" ]] 183 | then 184 | echo -e "Failed to get metadata:\n$metadata_result\nhttp://169.254.169.254/$path\n$TOKEN" 185 | return 1 186 | else 187 | local lines=$(echo "$metadata_result" | wc -l) 188 | echo "$metadata_result" | head -n $(( lines - 1 )) 189 | return 0 190 | fi 191 | } 192 | 193 | function get_meta_data() { 194 | local metadata= 195 | local path=$1 196 | local retries=20 197 | local result=1 198 | 199 | while [[ retries -gt 0 && $result -ne 0 ]] 200 | do 201 | retries=$[$retries-1] 202 | metadata=$(_get_meta_data $path) 203 | result=$? 204 | [[ $result != 0 ]] && TOKEN=$(get_token) 205 | done 206 | [[ $result == 0 ]] && echo "$metadata" 207 | return $result 208 | } 209 | 210 | # Helper function which calculates the amount of the given resource (either CPU or memory) 211 | # to reserve in a given resource range, specified by a start and end of the range and a percentage 212 | # of the resource to reserve. Note that we return zero if the start of the resource range is 213 | # greater than the total resource capacity on the node. Additionally, if the end range exceeds the total 214 | # resource capacity of the node, we use the total resource capacity as the end of the range. 215 | # Args: 216 | # $1 total available resource on the worker node in input unit (either millicores for CPU or Mi for memory) 217 | # $2 start of the resource range in input unit 218 | # $3 end of the resource range in input unit 219 | # $4 percentage of range to reserve in percent*100 (to allow for two decimal digits) 220 | # Return: 221 | # amount of resource to reserve in input unit 222 | get_resource_to_reserve_in_range() { 223 | local total_resource_on_instance=$1 224 | local start_range=$2 225 | local end_range=$3 226 | local percentage=$4 227 | resources_to_reserve="0" 228 | if (( $total_resource_on_instance > $start_range )); then 229 | resources_to_reserve=$(((($total_resource_on_instance < $end_range ? \ 230 | $total_resource_on_instance : $end_range) - $start_range) * $percentage / 100 / 100)) 231 | fi 232 | echo $resources_to_reserve 233 | } 234 | 235 | # Calculates the amount of memory to reserve for kubeReserved in mebibytes. KubeReserved is a function of pod 236 | # density so we are calculating the amount of memory to reserve for Kubernetes systems daemons by 237 | # considering the maximum number of pods this instance type supports. 238 | # Args: 239 | # $1 the max number of pods per instance type (MAX_PODS) based on values from /etc/eks/eni-max-pods.txt 240 | # Return: 241 | # memory to reserve in Mi for the kubelet 242 | get_memory_mebibytes_to_reserve() { 243 | local max_num_pods=$1 244 | memory_to_reserve=$((11 * $max_num_pods + 255)) 245 | echo $memory_to_reserve 246 | } 247 | 248 | # Calculates the amount of CPU to reserve for kubeReserved in millicores from the total number of vCPUs available on the instance. 249 | # From the total core capacity of this worker node, we calculate the CPU resources to reserve by reserving a percentage 250 | # of the available cores in each range up to the total number of cores available on the instance. 251 | # We are using these CPU ranges from GKE (https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture#node_allocatable): 252 | # 6% of the first core 253 | # 1% of the next core (up to 2 cores) 254 | # 0.5% of the next 2 cores (up to 4 cores) 255 | # 0.25% of any cores above 4 cores 256 | # Return: 257 | # CPU resources to reserve in millicores (m) 258 | get_cpu_millicores_to_reserve() { 259 | local total_cpu_on_instance=$(($(nproc) * 1000)) 260 | local cpu_ranges=(0 1000 2000 4000 $total_cpu_on_instance) 261 | local cpu_percentage_reserved_for_ranges=(600 100 50 25) 262 | cpu_to_reserve="0" 263 | for i in ${!cpu_percentage_reserved_for_ranges[@]}; do 264 | local start_range=${cpu_ranges[$i]} 265 | local end_range=${cpu_ranges[(($i+1))]} 266 | local percentage_to_reserve_for_range=${cpu_percentage_reserved_for_ranges[$i]} 267 | cpu_to_reserve=$(($cpu_to_reserve + \ 268 | $(get_resource_to_reserve_in_range $total_cpu_on_instance $start_range $end_range $percentage_to_reserve_for_range))) 269 | done 270 | echo $cpu_to_reserve 271 | } 272 | 273 | if [ -z "$CLUSTER_NAME" ]; then 274 | echo "CLUSTER_NAME is not defined" 275 | exit 1 276 | fi 277 | 278 | 279 | TOKEN=$(get_token) 280 | AWS_DEFAULT_REGION=$(get_meta_data 'latest/dynamic/instance-identity/document' | jq .region -r) 281 | AWS_SERVICES_DOMAIN=$(get_meta_data '2018-09-24/meta-data/services/domain') 282 | 283 | MACHINE=$(uname -m) 284 | if [[ "$MACHINE" != "x86_64" && "$MACHINE" != "aarch64" ]]; then 285 | echo "Unknown machine architecture '$MACHINE'" >&2 286 | exit 1 287 | fi 288 | 289 | PAUSE_CONTAINER_ACCOUNT=$(get_pause_container_account_for_region "${AWS_DEFAULT_REGION}") 290 | PAUSE_CONTAINER_IMAGE=${PAUSE_CONTAINER_IMAGE:-$PAUSE_CONTAINER_ACCOUNT.dkr.ecr.$AWS_DEFAULT_REGION.$AWS_SERVICES_DOMAIN/eks/pause} 291 | PAUSE_CONTAINER="$PAUSE_CONTAINER_IMAGE:$PAUSE_CONTAINER_VERSION" 292 | 293 | ### kubelet kubeconfig 294 | 295 | CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki 296 | CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt 297 | mkdir -p $CA_CERTIFICATE_DIRECTORY 298 | if [[ -z "${B64_CLUSTER_CA}" ]] || [[ -z "${APISERVER_ENDPOINT}" ]]; then 299 | DESCRIBE_CLUSTER_RESULT="/tmp/describe_cluster_result.txt" 300 | 301 | # Retry the DescribeCluster API for API_RETRY_ATTEMPTS 302 | for attempt in `seq 0 $API_RETRY_ATTEMPTS`; do 303 | rc=0 304 | if [[ $attempt -gt 0 ]]; then 305 | echo "Attempt $attempt of $API_RETRY_ATTEMPTS" 306 | fi 307 | 308 | aws eks wait cluster-active \ 309 | --region=${AWS_DEFAULT_REGION} \ 310 | --name=${CLUSTER_NAME} 311 | 312 | aws eks describe-cluster \ 313 | --region=${AWS_DEFAULT_REGION} \ 314 | --name=${CLUSTER_NAME} \ 315 | --output=text \ 316 | --query 'cluster.{certificateAuthorityData: certificateAuthority.data, endpoint: endpoint, kubernetesNetworkConfig: kubernetesNetworkConfig.serviceIpv4Cidr}' > $DESCRIBE_CLUSTER_RESULT || rc=$? 317 | if [[ $rc -eq 0 ]]; then 318 | break 319 | fi 320 | if [[ $attempt -eq $API_RETRY_ATTEMPTS ]]; then 321 | exit $rc 322 | fi 323 | jitter=$((1 + RANDOM % 10)) 324 | sleep_sec="$(( $(( 5 << $((1+$attempt)) )) + $jitter))" 325 | sleep $sleep_sec 326 | done 327 | B64_CLUSTER_CA=$(cat $DESCRIBE_CLUSTER_RESULT | awk '{print $1}') 328 | APISERVER_ENDPOINT=$(cat $DESCRIBE_CLUSTER_RESULT | awk '{print $2}') 329 | SERVICE_IPV4_CIDR=$(cat $DESCRIBE_CLUSTER_RESULT | awk '{print $3}') 330 | fi 331 | 332 | echo $B64_CLUSTER_CA | base64 -d > $CA_CERTIFICATE_FILE_PATH 333 | 334 | sed -i s,CLUSTER_NAME,$CLUSTER_NAME,g /var/lib/kubelet/kubeconfig 335 | sed -i s,MASTER_ENDPOINT,$APISERVER_ENDPOINT,g /var/lib/kubelet/kubeconfig 336 | sed -i s,AWS_REGION,$AWS_DEFAULT_REGION,g /var/lib/kubelet/kubeconfig 337 | ### kubelet.service configuration 338 | 339 | if [[ -z "${DNS_CLUSTER_IP}" ]]; then 340 | if [[ ! -z "${SERVICE_IPV4_CIDR}" ]] && [[ "${SERVICE_IPV4_CIDR}" != "None" ]] ; then 341 | #Sets the DNS Cluster IP address that would be chosen from the serviceIpv4Cidr. (x.y.z.10) 342 | DNS_CLUSTER_IP=${SERVICE_IPV4_CIDR%.*}.10 343 | else 344 | MAC=$(get_meta_data 'latest/meta-data/network/interfaces/macs/' | head -n 1 | sed 's/\/$//') 345 | TEN_RANGE=$(get_meta_data "latest/meta-data/network/interfaces/macs/$MAC/vpc-ipv4-cidr-blocks" | grep -c '^10\..*' || true ) 346 | DNS_CLUSTER_IP=10.100.0.10 347 | if [[ "$TEN_RANGE" != "0" ]]; then 348 | DNS_CLUSTER_IP=172.20.0.10 349 | fi 350 | fi 351 | else 352 | DNS_CLUSTER_IP="${DNS_CLUSTER_IP}" 353 | fi 354 | 355 | KUBELET_CONFIG=/etc/kubernetes/kubelet/kubelet-config.json 356 | echo "$(jq ".clusterDNS=[\"$DNS_CLUSTER_IP\"]" $KUBELET_CONFIG)" > $KUBELET_CONFIG 357 | 358 | INTERNAL_IP=$(get_meta_data 'latest/meta-data/local-ipv4') 359 | INSTANCE_TYPE=$(get_meta_data 'latest/meta-data/instance-type') 360 | 361 | # Sets kubeReserved and evictionHard in /etc/kubernetes/kubelet/kubelet-config.json for worker nodes. The following two function 362 | # calls calculate the CPU and memory resources to reserve for kubeReserved based on the instance type of the worker node. 363 | # Note that allocatable memory and CPU resources on worker nodes is calculated by the Kubernetes scheduler 364 | # with this formula when scheduling pods: Allocatable = Capacity - Reserved - Eviction Threshold. 365 | 366 | #calculate the max number of pods per instance type 367 | MAX_PODS_FILE="/etc/eks/eni-max-pods.txt" 368 | set +o pipefail 369 | MAX_PODS=$(cat $MAX_PODS_FILE | awk "/^${INSTANCE_TYPE:-unset}/"' { print $2 }') 370 | set -o pipefail 371 | if [ -z "$MAX_PODS" ] || [ -z "$INSTANCE_TYPE" ]; then 372 | echo "No entry for type '$INSTANCE_TYPE' in $MAX_PODS_FILE" 373 | exit 1 374 | fi 375 | 376 | # calculates the amount of each resource to reserve 377 | mebibytes_to_reserve=$(get_memory_mebibytes_to_reserve $MAX_PODS) 378 | cpu_millicores_to_reserve=$(get_cpu_millicores_to_reserve) 379 | # writes kubeReserved and evictionHard to the kubelet-config using the amount of CPU and memory to be reserved 380 | echo "$(jq '. += {"evictionHard": {"memory.available": "100Mi", "nodefs.available": "10%", "nodefs.inodesFree": "5%"}}' $KUBELET_CONFIG)" > $KUBELET_CONFIG 381 | echo "$(jq --arg mebibytes_to_reserve "${mebibytes_to_reserve}Mi" --arg cpu_millicores_to_reserve "${cpu_millicores_to_reserve}m" \ 382 | '. += {kubeReserved: {"cpu": $cpu_millicores_to_reserve, "ephemeral-storage": "1Gi", "memory": $mebibytes_to_reserve}}' $KUBELET_CONFIG)" > $KUBELET_CONFIG 383 | 384 | if [[ "$USE_MAX_PODS" = "true" ]]; then 385 | echo "$(jq ".maxPods=$MAX_PODS" $KUBELET_CONFIG)" > $KUBELET_CONFIG 386 | fi 387 | 388 | mkdir -p /etc/systemd/system/kubelet.service.d 389 | 390 | cat < /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf 391 | [Service] 392 | Environment='KUBELET_ARGS=--node-ip=$INTERNAL_IP --pod-infra-container-image=$PAUSE_CONTAINER --v=2' 393 | EOF 394 | 395 | if [[ -n "$KUBELET_EXTRA_ARGS" ]]; then 396 | cat < /etc/systemd/system/kubelet.service.d/30-kubelet-extra-args.conf 397 | [Service] 398 | Environment='KUBELET_EXTRA_ARGS=$KUBELET_EXTRA_ARGS' 399 | EOF 400 | fi 401 | 402 | if [[ "$CONTAINER_RUNTIME" = "containerd" ]]; then 403 | sudo mkdir -p /etc/containerd 404 | sudo mkdir -p /etc/cni/net.d 405 | systemctl daemon-reload 406 | systemctl enable containerd 407 | systemctl start containerd 408 | elif [[ "$CONTAINER_RUNTIME" = "dockerd" ]]; then 409 | mkdir -p /etc/docker 410 | bash -c "/sbin/iptables-save > /etc/sysconfig/iptables" 411 | mv /etc/eks/iptables-restore.service /etc/systemd/system/iptables-restore.service 412 | sudo chown root:root /etc/systemd/system/iptables-restore.service 413 | systemctl daemon-reload 414 | systemctl enable iptables-restore 415 | 416 | if [[ -n "$DOCKER_CONFIG_JSON" ]]; then 417 | echo "$DOCKER_CONFIG_JSON" > /etc/docker/daemon.json 418 | fi 419 | if [[ "$ENABLE_DOCKER_BRIDGE" = "true" ]]; then 420 | # Enabling the docker bridge network. We have to disable live-restore as it 421 | # prevents docker from recreating the default bridge network on restart 422 | echo "$(jq '.bridge="docker0" | ."live-restore"=false' /etc/docker/daemon.json)" > /etc/docker/daemon.json 423 | fi 424 | systemctl daemon-reload 425 | systemctl enable docker 426 | systemctl start docker 427 | else 428 | echo "Container runtime ${CONTAINER_RUNTIME} is not supported." 429 | exit 1 430 | fi 431 | 432 | 433 | systemctl enable kubelet 434 | systemctl start kubelet 435 | 436 | # gpu boost clock 437 | if command -v nvidia-smi &>/dev/null ; then 438 | echo "nvidia-smi found" 439 | 440 | nvidia-smi -q > /tmp/nvidia-smi-check 441 | if [[ "$?" == "0" ]]; then 442 | sudo nvidia-smi -pm 1 # set persistence mode 443 | sudo nvidia-smi --auto-boost-default=0 444 | 445 | GPUNAME=$(nvidia-smi -L | head -n1) 446 | echo $GPUNAME 447 | 448 | # set application clock to maximum 449 | if [[ $GPUNAME == *"A100"* ]]; then 450 | nvidia-smi -ac 1215,1410 451 | elif [[ $GPUNAME == *"V100"* ]]; then 452 | nvidia-smi -ac 877,1530 453 | elif [[ $GPUNAME == *"K80"* ]]; then 454 | nvidia-smi -ac 2505,875 455 | elif [[ $GPUNAME == *"T4"* ]]; then 456 | nvidia-smi -ac 5001,1590 457 | elif [[ $GPUNAME == *"M60"* ]]; then 458 | nvidia-smi -ac 2505,1177 459 | else 460 | echo "unsupported gpu" 461 | fi 462 | else 463 | cat /tmp/nvidia-smi-check 464 | fi 465 | else 466 | echo "nvidia-smi not found" 467 | fi 468 | -------------------------------------------------------------------------------- /ami/files/gitpod/containerd-stargz-grpc.toml: -------------------------------------------------------------------------------- 1 | [cri_keychain] 2 | enable_keychain = true 3 | image_service_path = "/run/containerd/containerd.sock" 4 | -------------------------------------------------------------------------------- /ami/files/gitpod/containerd.toml: -------------------------------------------------------------------------------- 1 | # explicitly use v2 config format 2 | version = 2 3 | 4 | [proxy_plugins] 5 | # stargz is used for lazy pulling 6 | [proxy_plugins.stargz] 7 | type = "snapshot" 8 | address = "/run/containerd-stargz-grpc/containerd-stargz-grpc.sock" 9 | 10 | [plugins."io.containerd.grpc.v1.cri".containerd] 11 | # save disk space when using a single snapshotter 12 | discard_unpacked_layers = true 13 | # enable stargz snapshotter 14 | snapshotter = "stargz" 15 | # pass additional snapshotter labels to remote snapshotter 16 | disable_snapshot_annotations = false 17 | # explicit default here, as we're configuring it below 18 | default_runtime_name = "runc" 19 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] 20 | # set default runtime handler to v2, which has a per-pod shim 21 | runtime_type = "io.containerd.runc.v2" 22 | 23 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] 24 | SystemdCgroup = true 25 | 26 | # allow pulling from registries using self-signed SSL certificates 27 | [plugins."io.containerd.grpc.v1.cri".registry] 28 | config_path = "/etc/containerd/certs.d" 29 | 30 | [plugins."io.containerd.grpc.v1.cri"] 31 | max_concurrent_downloads = 20 32 | # use fixed sandbox image 33 | sandbox_image = "k8s.gcr.io/pause:3.6" 34 | # allow hugepages controller to be missing 35 | # see https://github.com/containerd/cri/pull/1501 36 | tolerate_missing_hugepages_controller = true 37 | # restrict_oom_score_adj needs to be true when running inside UserNS (rootless) 38 | restrict_oom_score_adj = false 39 | -------------------------------------------------------------------------------- /ami/files/gitpod/kubelet.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | After=containerd.service 5 | Requires=containerd.service 6 | 7 | [Service] 8 | ExecStartPre=/sbin/iptables -P FORWARD ACCEPT -w 5 9 | ExecStart=/usr/bin/kubelet --cloud-provider aws \ 10 | --config /etc/kubernetes/kubelet/kubelet-config.json \ 11 | --kubeconfig /var/lib/kubelet/kubeconfig \ 12 | --resolv-conf=/run/systemd/resolve/resolv.conf \ 13 | --container-runtime remote \ 14 | --container-runtime-endpoint unix:///run/containerd/containerd.sock \ 15 | --image-service-endpoint=unix:///run/containerd-stargz-grpc/containerd-stargz-grpc.sock \ 16 | $KUBELET_ARGS $KUBELET_EXTRA_ARGS 17 | 18 | Restart=on-failure 19 | RestartForceExitStatus=SIGPIPE 20 | RestartSec=5 21 | KillMode=process 22 | 23 | [Install] 24 | WantedBy=multi-user.target 25 | -------------------------------------------------------------------------------- /ami/files/gitpod/sysctl/11-network-security.conf: -------------------------------------------------------------------------------- 1 | # Turn on SYN-flood protections. Starting with 2.6.26, there is no loss 2 | # of TCP functionality/features under normal conditions. When flood 3 | # protections kick in under high unanswered-SYN load, the system 4 | # should remain more stable, with a trade off of some loss of TCP 5 | # functionality/features (e.g. TCP Window scaling). 6 | net.ipv4.tcp_syncookies=1 7 | 8 | # Ignore source-routed packets 9 | net.ipv4.conf.all.accept_source_route=0 10 | net.ipv4.conf.default.accept_source_route=0 11 | 12 | # Ignore ICMP redirects from non-GW hosts 13 | net.ipv4.conf.all.accept_redirects=0 14 | net.ipv4.conf.default.accept_redirects=0 15 | net.ipv4.conf.all.secure_redirects=1 16 | net.ipv4.conf.default.secure_redirects=1 17 | 18 | # Don't pass traffic between networks or act as a router 19 | net.ipv4.ip_forward=0 20 | net.ipv4.conf.all.send_redirects=0 21 | net.ipv4.conf.default.send_redirects=0 22 | 23 | # Turn on Source Address Verification in all interfaces to 24 | # prevent some spoofing attacks. 25 | net.ipv4.conf.all.rp_filter=1 26 | net.ipv4.conf.default.rp_filter=1 27 | 28 | # Ignore ICMP broadcasts to avoid participating in Smurf attacks 29 | net.ipv4.icmp_echo_ignore_broadcasts=1 30 | 31 | # Ignore bad ICMP errors 32 | net.ipv4.icmp_ignore_bogus_error_responses=1 33 | 34 | # Log spoofed, source-routed, and redirect packets 35 | net.ipv4.conf.all.log_martians=1 36 | net.ipv4.conf.default.log_martians=1 37 | 38 | # RFC 1337 fix 39 | net.ipv4.tcp_rfc1337=1 40 | 41 | # Addresses of mmap base, heap, stack and VDSO page are randomized 42 | kernel.randomize_va_space=2 43 | 44 | # Reboot the machine soon after a kernel panic. 45 | kernel.panic=10 46 | -------------------------------------------------------------------------------- /ami/files/gitpod/sysctl/89-gce.conf: -------------------------------------------------------------------------------- 1 | net.ipv4.conf.all.rp_filter = 1 2 | 3 | # reverse path filtering - IP spoofing protection 4 | net.ipv4.conf.default.rp_filter = 1 5 | 6 | # ignores ICMP broadcasts to avoid participating in Smurf attacks 7 | net.ipv4.icmp_echo_ignore_broadcasts = 1 8 | 9 | # ignores bad ICMP errors 10 | net.ipv4.icmp_ignore_bogus_error_responses = 1 11 | 12 | # logs spoofed, source-routed, and redirect packets 13 | net.ipv4.conf.all.log_martians = 1 14 | 15 | # log spoofed, source-routed, and redirect packets 16 | net.ipv4.conf.default.log_martians = 1 17 | 18 | # implements RFC 1337 fix 19 | net.ipv4.tcp_rfc1337 = 1 20 | 21 | # randomizes addresses of mmap base, heap, stack and VDSO page 22 | kernel.randomize_va_space = 2 23 | 24 | # provides protection from ToCToU races 25 | fs.protected_hardlinks=1 26 | 27 | # provides protection from ToCToU races 28 | fs.protected_symlinks=1 29 | 30 | # makes locating kernel addresses more difficult 31 | kernel.kptr_restrict=1 32 | 33 | # set ptrace protections 34 | kernel.yama.ptrace_scope=1 35 | 36 | # set perf only available to root 37 | kernel.perf_event_paranoid=2 38 | -------------------------------------------------------------------------------- /ami/files/gitpod/sysctl/99-defaults.conf: -------------------------------------------------------------------------------- 1 | fs.inotify.max_user_instances=8192 2 | fs.inotify.max_user_watches=12288 3 | kernel.keys.root_maxbytes=25000000 4 | kernel.keys.root_maxkeys=1000000 5 | kernel.panic=10 6 | kernel.panic_on_oops=1 7 | kernel.pid_max=4194304 8 | net.core.netdev_budget=300 9 | net.core.netdev_budget_usecs=2000 10 | net.core.netdev_max_backlog=1000 11 | net.core.optmem_max=20480 12 | net.core.rmem_default=212992 13 | net.core.rmem_max=212992 14 | net.core.somaxconn=1024 15 | net.core.wmem_default=212992 16 | net.core.wmem_max=212992 17 | net.ipv4.conf.all.accept_redirects=0 18 | net.ipv4.conf.all.forwarding=1 19 | net.ipv4.conf.all.route_localnet=1 20 | net.ipv4.conf.default.forwarding=1 21 | net.ipv4.conf.docker0.forwarding=1 22 | net.ipv4.conf.eth0.forwarding=1 23 | net.ipv4.conf.lo.forwarding=1 24 | net.ipv4.ip_forward=1 25 | net.ipv4.tcp_fin_timeout=60 26 | net.ipv4.tcp_keepalive_intvl=75 27 | net.ipv4.tcp_keepalive_probes=9 28 | net.ipv4.tcp_keepalive_time=7200 29 | net.ipv4.tcp_rmem=4096 87380 6291456 30 | net.ipv4.tcp_syn_retries=6 31 | net.ipv4.tcp_tw_reuse=0 32 | net.ipv4.tcp_wmem=4096 16384 4194304 33 | net.ipv4.udp_rmem_min=4096 34 | net.ipv4.udp_wmem_min=4096 35 | net.ipv6.conf.default.accept_ra=0 36 | net.netfilter.nf_conntrack_generic_timeout=600 37 | net.netfilter.nf_conntrack_max=524288 38 | net.netfilter.nf_conntrack_tcp_be_liberal=1 39 | net.netfilter.nf_conntrack_tcp_timeout_close_wait=3600 40 | net.netfilter.nf_conntrack_tcp_timeout_established=86400 41 | net.nf_conntrack_max=524288 42 | vm.overcommit_memory=1 43 | vm.panic_on_oom=0 44 | -------------------------------------------------------------------------------- /ami/scripts/shared/cis-eks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail 4 | set -o nounset 5 | set -o errexit 6 | 7 | echo "1 - control plane configuration" 8 | echo "[not scored] - not applicable for worker node" 9 | 10 | echo "2 - control plane configuration" 11 | echo "[not scored] - not applicable for worker node" 12 | 13 | echo "3.1.1 - ensure that the kubeconfig file permissions are set to 644 or more restrictive" 14 | chmod 644 /var/lib/kubelet/kubeconfig 15 | 16 | echo "3.1.2 - ensure that the kubelet kubeconfig file ownership is set to root:root" 17 | chown root:root /var/lib/kubelet/kubeconfig 18 | 19 | echo "3.2 - kubelet" 20 | cat > /etc/kubernetes/kubelet/kubelet-config.json < /etc/modules-load.d/k8s.conf 27 | ena 28 | overlay 29 | fuse 30 | br_netfilter 31 | EOF 32 | 33 | # Disable modules 34 | cat < /etc/modprobe.d/kubernetes-blacklist.conf 35 | blacklist dccp 36 | blacklist sctp 37 | EOF 38 | 39 | # Disable cgroups2 40 | sed -i 's/GRUB_CMDLINE_LINUX="\(.*\)"/GRUB_CMDLINE_LINUX="systemd.unified_cgroup_hierarchy=0 \1"/g' /etc/default/grub 41 | update-grub 42 | 43 | # Install containerd 44 | curl -sSL https://github.com/containerd/nerdctl/releases/download/v0.14.0/nerdctl-full-0.14.0-linux-amd64.tar.gz -o - | tar -xz -C /usr/local 45 | 46 | # copy the portmap plugin to support hostport 47 | mkdir -p /opt/cni/bin 48 | ln -s /usr/local/libexec/cni/portmap /opt/cni/bin 49 | 50 | cp /usr/local/lib/systemd/system/* /lib/systemd/system/ 51 | 52 | # Configure containerd 53 | mkdir -p /etc/containerd/ 54 | cp /etc/packer/files/gitpod/containerd.toml /etc/containerd/config.toml 55 | # Enable stargz-snapshotter plugin 56 | mkdir -p /etc/containerd-stargz-grpc 57 | cp /etc/packer/files/gitpod/containerd-stargz-grpc.toml /etc/containerd-stargz-grpc/config.toml 58 | cp /etc/packer/files/gitpod/stargz-snapshotter.service /lib/systemd/system/stargz-snapshotter.service 59 | 60 | # Reload systemd 61 | systemctl daemon-reload 62 | 63 | # Start containerd and stargz 64 | systemctl enable containerd 65 | systemctl enable stargz-snapshotter 66 | 67 | echo "image-endpoint: unix:///run/containerd-stargz-grpc/containerd-stargz-grpc.sock" >> /etc/crictl.yaml 68 | 69 | systemctl start containerd 70 | systemctl start stargz-snapshotter 71 | 72 | # Prepare images airgap tgz 73 | chmod +x /etc/packer/files/gitpod/airgap.sh 74 | /etc/packer/files/gitpod/airgap.sh 75 | 76 | sleep 60 77 | systemctl stop containerd 78 | -------------------------------------------------------------------------------- /ami/scripts/shared/eks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail 4 | set -o nounset 5 | set -o errexit 6 | 7 | source /etc/packer/files/functions.sh 8 | 9 | ################################################################################ 10 | ### Machine Architecture ####################################################### 11 | ################################################################################ 12 | ARCH=$(get_arch) 13 | 14 | # install dependencies 15 | apt-get install -y \ 16 | conntrack \ 17 | curl \ 18 | socat \ 19 | unzip \ 20 | nfs-common 21 | 22 | ################################################################################ 23 | ### iptables ################################################################### 24 | ################################################################################ 25 | install_iptables_restore 26 | 27 | ################################################################################ 28 | ### Logrotate ################################################################## 29 | ################################################################################ 30 | 31 | curl -sL -o /etc/logrotate.d/kube-proxy https://raw.githubusercontent.com/awslabs/amazon-eks-ami/master/files/logrotate-kube-proxy 32 | curl -sL -o /etc/logrotate.conf https://raw.githubusercontent.com/awslabs/amazon-eks-ami/master/files/logrotate.conf 33 | chown root:root /etc/logrotate.d/kube-proxy 34 | chown root:root /etc/logrotate.conf 35 | mkdir -p /var/log/journal 36 | 37 | ################################################################################ 38 | ### Kubernetes ################################################################# 39 | ################################################################################ 40 | 41 | mkdir -p /etc/kubernetes/manifests 42 | mkdir -p /var/lib/kubernetes 43 | mkdir -p /var/lib/kubelet 44 | mkdir -p /opt/cni/bin 45 | 46 | echo "Downloading binaries from: s3://$BINARY_BUCKET_NAME" 47 | S3_DOMAIN="amazonaws.com" 48 | if [ "$BINARY_BUCKET_REGION" = "cn-north-1" ] || [ "$BINARY_BUCKET_REGION" = "cn-northwest-1" ]; then 49 | S3_DOMAIN="amazonaws.com.cn" 50 | elif [ "$BINARY_BUCKET_REGION" = "us-iso-east-1" ]; then 51 | S3_DOMAIN="c2s.ic.gov" 52 | elif [ "$BINARY_BUCKET_REGION" = "us-isob-east-1" ]; then 53 | S3_DOMAIN="sc2s.sgov.gov" 54 | fi 55 | 56 | S3_URL_BASE="https://$BINARY_BUCKET_NAME.s3.$BINARY_BUCKET_REGION.$S3_DOMAIN/$KUBERNETES_VERSION/$KUBERNETES_BUILD_DATE/bin/linux/$ARCH" 57 | S3_PATH="s3://$BINARY_BUCKET_NAME/$KUBERNETES_VERSION/$KUBERNETES_BUILD_DATE/bin/linux/$ARCH" 58 | 59 | BINARIES=( 60 | kubelet 61 | aws-iam-authenticator 62 | ) 63 | 64 | for binary in ${BINARIES[*]} ; do 65 | echo "AWS cli missing - using wget to fetch binaries from s3. Note: This won't work for private bucket." 66 | curl -sL -o $binary $S3_URL_BASE/$binary 67 | curl -sL -o $binary.sha256 $S3_URL_BASE/$binary.sha256 68 | 69 | sha256sum -c $binary.sha256 70 | chmod +x $binary 71 | mv $binary /usr/bin/ 72 | done 73 | 74 | mkdir -p /etc/kubernetes/kubelet 75 | mkdir -p /etc/systemd/system/kubelet.service.d 76 | 77 | curl -sL -o /var/lib/kubelet/kubeconfig https://raw.githubusercontent.com/awslabs/amazon-eks-ami/master/files/kubelet-kubeconfig 78 | chown root:root /var/lib/kubelet/kubeconfig 79 | 80 | cp /etc/packer/files/gitpod/kubelet.service /etc/systemd/system/kubelet.service 81 | chown root:root /etc/systemd/system/kubelet.service 82 | 83 | configure_kubelet_environment 84 | 85 | systemctl daemon-reload && systemctl disable kubelet 86 | 87 | ################################################################################ 88 | ### EKS ######################################################################## 89 | ################################################################################ 90 | 91 | mkdir -p /etc/eks 92 | curl -sL -o /etc/eks/eni-max-pods.txt https://raw.githubusercontent.com/awslabs/amazon-eks-ami/master/files/eni-max-pods.txt 93 | 94 | cp /etc/packer/files/gitpod/bootstrap.sh /etc/eks/bootstrap.sh 95 | chown root:root /etc/eks/bootstrap.sh 96 | 97 | ################################################################################ 98 | ### Stuff required by "protectKernelDefaults=true" ############################# 99 | ################################################################################ 100 | 101 | cat > /etc/sysctl.d/99-amazon.conf </auth/github/callback", 14 | "settingsUrl": "hhttps://mygithub.com/settings/applications/" 15 | }, 16 | "description": "", 17 | "icon": "" 18 | } 19 | ] 20 | -------------------------------------------------------------------------------- /bin/provision.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import 'source-map-support/register'; 4 | 5 | import { App } from '@aws-cdk/core'; 6 | import { ServicesStack } from '../lib/services'; 7 | import { AddonsStack } from '../lib/addons'; 8 | import { GitpodStack } from '../lib/gitpod'; 9 | import { SetupStack } from '../lib/setup'; 10 | 11 | const app = new App({}); 12 | 13 | const region = app.node.tryGetContext('region'); 14 | if (!region) { 15 | throw new Error("region is not defined."); 16 | } 17 | 18 | const domain = app.node.tryGetContext('domain'); 19 | if (!domain) { 20 | throw new Error("domain is not defined."); 21 | } 22 | 23 | const identityoidcissuer = app.node.tryGetContext('identityoidcissuer'); 24 | if (!identityoidcissuer) { 25 | throw new Error("identityoidcissuer is not defined."); 26 | } 27 | 28 | const certificateArn = app.node.tryGetContext('certificatearn'); 29 | if (!certificateArn) { 30 | throw new Error("certificateArn is not defined."); 31 | } 32 | 33 | const env = { 34 | account: process.env.CDK_DEPLOY_ACCOUNT || process.env.CDK_DEFAULT_ACCOUNT, 35 | region, 36 | }; 37 | 38 | const setup = new SetupStack(app, 'Setup', { 39 | env, 40 | identityoidcissuer, 41 | }); 42 | 43 | const addons = new AddonsStack(app, 'Addons', { 44 | env, 45 | }); 46 | addons.node.addDependency(setup); 47 | 48 | const services = new ServicesStack(app, 'Services', { 49 | env, 50 | }) 51 | services.node.addDependency(setup); 52 | 53 | const gitpod = new GitpodStack(app, 'Gitpod', { 54 | env, 55 | domain, 56 | certificateArn, 57 | }) 58 | gitpod.node.addDependency(services); 59 | gitpod.node.addDependency(addons); 60 | -------------------------------------------------------------------------------- /cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "npx ts-node --prefer-ts-exts bin/provision.ts", 3 | "context": { 4 | "@aws-cdk/aws-apigateway:usagePlanKeyOrderInsensitiveId": true, 5 | "@aws-cdk/core:enableStackNameDuplicates": "true", 6 | "aws-cdk:enableDiffNoFail": "true", 7 | "@aws-cdk/core:stackRelativeExports": "true", 8 | "@aws-cdk/aws-ecr-assets:dockerIgnoreSupport": true, 9 | "@aws-cdk/aws-secretsmanager:parseOwnedSecretName": true, 10 | "@aws-cdk/aws-kms:defaultKeyPolicies": true, 11 | "@aws-cdk/aws-s3:grantWriteWithoutAcl": true, 12 | "@aws-cdk/aws-ecs-patterns:removeDefaultDesiredCount": true, 13 | "@aws-cdk/aws-rds:lowercaseDbIdentifier": true, 14 | "@aws-cdk/aws-efs:defaultEncryptionAtRest": true, 15 | "@aws-cdk/aws-lambda:recognizeVersionProps": true 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /eks-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | metadata: 4 | # Template, please change 5 | # Please make sure you also update the definition of the variable 6 | # CLUSTERNAME= in the overrideBootstrapCommand section 7 | # and k8s.io/cluster-autoscaler/: "owned" 8 | # cluster-autoscaler will not be require additional labels in a future release. 9 | # https://github.com/kubernetes/autoscaler/pull/3968 10 | name: gitpod 11 | # Template, please change 12 | region: us-west-2 13 | version: "1.21" 14 | 15 | iam: 16 | withOIDC: true 17 | 18 | serviceAccounts: 19 | - metadata: 20 | name: aws-load-balancer-controller 21 | namespace: kube-system 22 | wellKnownPolicies: 23 | awsLoadBalancerController: true 24 | - metadata: 25 | name: ebs-csi-controller-sa 26 | namespace: kube-system 27 | wellKnownPolicies: 28 | ebsCSIController: true 29 | - metadata: 30 | name: cluster-autoscaler 31 | namespace: kube-system 32 | wellKnownPolicies: 33 | autoScaler: true 34 | 35 | availabilityZones: 36 | - us-west-2a 37 | - us-west-2b 38 | - us-west-2c 39 | 40 | # By default we create a dedicated VPC for the cluster 41 | # You can use an existing VPC by supplying private and/or public subnets. Please check 42 | # https://eksctl.io/usage/vpc-networking/#use-existing-vpc-other-custom-configuration 43 | vpc: 44 | autoAllocateIPv6: false 45 | nat: 46 | # For production environments user HighlyAvailable 47 | # https://eksctl.io/usage/vpc-networking/#nat-gateway 48 | gateway: Single 49 | 50 | # Enable EKS control plane logging 51 | # https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html 52 | cloudWatch: 53 | clusterLogging: 54 | enableTypes: ["audit", "authenticator"] 55 | 56 | privateCluster: 57 | enabled: false 58 | additionalEndpointServices: 59 | - "autoscaling" 60 | - "logs" 61 | 62 | managedNodeGroups: 63 | - name: workspaces 64 | desiredCapacity: 1 65 | minSize: 1 66 | maxSize: 10 67 | # because of AWS addons 68 | disableIMDSv1: false 69 | # Please configure the size of the volume and additional features 70 | # https://eksctl.io/usage/schema/#nodeGroups-volumeType 71 | # https://aws.amazon.com/es/ebs/pricing/ 72 | volumeSize: 300 73 | volumeType: gp3 74 | volumeIOPS: 6000 75 | volumeThroughput: 500 76 | ebsOptimized: true 77 | # Use private subnets for nodes 78 | # https://eksctl.io/usage/vpc-networking/#use-private-subnets-for-initial-nodegroup 79 | privateNetworking: true 80 | ami: ami-009935ddbb32a7f3c 81 | 82 | tags: 83 | # EC2 tags required for cluster-autoscaler auto-discovery 84 | k8s.io/cluster-autoscaler/enabled: "true" 85 | k8s.io/cluster-autoscaler/gitpod: "owned" 86 | iam: 87 | attachPolicyARNs: &attachPolicyARNs 88 | - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly 89 | - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy 90 | - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy 91 | - arn:aws:iam::aws:policy/ElasticLoadBalancingFullAccess 92 | - arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore 93 | withAddonPolicies: &withAddonPolicies 94 | albIngress: true 95 | autoScaler: true 96 | cloudWatch: true 97 | certManager: true 98 | ebs: true 99 | # Using custom AMI images require the definition of overrideBootstrapCommand 100 | # to ensure that nodes are able to join the cluster https://eksctl.io/usage/custom-ami-support/ 101 | overrideBootstrapCommand: | 102 | #!/bin/bash 103 | 104 | export CLUSTERNAME=gitpod 105 | export NODEGROUP=workspaces 106 | 107 | declare -a LABELS=( 108 | eks.amazonaws.com/nodegroup="${NODEGROUP}" 109 | gitpod.io/workload_workspace_services=true 110 | gitpod.io/workload_workspace_regular=true 111 | gitpod.io/workload_workspace_headless=true 112 | ) 113 | 114 | export KUBELET_EXTRA_ARGS="$(printf -- "--max-pods=110 --node-labels=%s" $(IFS=$','; echo "${LABELS[*]}"))" 115 | /etc/eks/bootstrap.sh ${CLUSTERNAME} 116 | 117 | spot: false 118 | # https://eksctl.io/usage/instance-selector/ 119 | #instanceSelector: 120 | # vCPUs: 8 121 | # memory: 64Gib 122 | # or use a custom list 123 | instanceTypes: ["m6i.xlarge", "m6i.2xlarge"] 124 | 125 | - name: services 126 | desiredCapacity: 1 127 | minSize: 1 128 | maxSize: 3 129 | # because of AWS addons 130 | disableIMDSv1: false 131 | # Please configure the size of the volume and additional features 132 | # https://eksctl.io/usage/schema/#nodeGroups-volumeType 133 | # https://aws.amazon.com/es/ebs/pricing/ 134 | volumeSize: 100 135 | volumeType: gp3 136 | volumeIOPS: 6000 137 | volumeThroughput: 500 138 | ebsOptimized: true 139 | # Use private subnets for nodes 140 | # https://eksctl.io/usage/vpc-networking/#use-private-subnets-for-initial-nodegroup 141 | privateNetworking: true 142 | ami: ami-009935ddbb32a7f3c 143 | 144 | tags: 145 | # EC2 tags required for cluster-autoscaler auto-discovery 146 | k8s.io/cluster-autoscaler/enabled: "true" 147 | k8s.io/cluster-autoscaler/gitpod: "owned" 148 | iam: 149 | attachPolicyARNs: *attachPolicyARNs 150 | withAddonPolicies: *withAddonPolicies 151 | # Using custom AMI images require the definition of overrideBootstrapCommand 152 | # to ensure that nodes are able to join the cluster https://eksctl.io/usage/custom-ami-support/ 153 | overrideBootstrapCommand: | 154 | #!/bin/bash 155 | 156 | export CLUSTERNAME=gitpod 157 | export NODEGROUP=services 158 | 159 | declare -a LABELS=( 160 | eks.amazonaws.com/nodegroup="${NODEGROUP}" 161 | gitpod.io/workload_meta=true 162 | gitpod.io/workload_ide=true 163 | ) 164 | 165 | export KUBELET_EXTRA_ARGS="$(printf -- "--max-pods=110 --node-labels=%s" $(IFS=$','; echo "${LABELS[*]}"))" 166 | /etc/eks/bootstrap.sh ${CLUSTERNAME} 167 | 168 | spot: false 169 | # https://eksctl.io/usage/instance-selector/ 170 | #instanceSelector: 171 | # vCPUs: 4 172 | # memory: 16Gib 173 | # or use a custom list 174 | instanceTypes: ["m6i.xlarge", "m6i.2xlarge"] 175 | -------------------------------------------------------------------------------- /images/gitpod-login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gitpod-io/gitpod-eks-guide/d2b5fca8db3e20997f08cdeefc4bea4ff938dfe6/images/gitpod-login.png -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | testEnvironment: 'node', 3 | roots: ['/test'], 4 | testMatch: ['**/*.test.ts'], 5 | transform: { 6 | '^.+\\.tsx?$': 'ts-jest' 7 | } 8 | }; 9 | -------------------------------------------------------------------------------- /lib/addons.ts: -------------------------------------------------------------------------------- 1 | 2 | import * as cdk from '@aws-cdk/core' 3 | 4 | import { AWSLoadBalancerController } from './charts/load-balancer'; 5 | import { MetricsServer } from './charts/metrics-server'; 6 | import { CertManager } from './charts/cert-manager'; 7 | import { Jaeger } from './charts/jaeger'; 8 | import { ContainerInsights } from './charts/container-insights'; 9 | import { ClusterAutoscaler } from './charts/cluster-autoscaler'; 10 | import { ExternalDNS } from './charts/external-dns'; 11 | 12 | export class AddonsStack extends cdk.Stack { 13 | 14 | constructor(scope: cdk.Construct, id: string, props: cdk.StackProps) { 15 | super(scope, id, props) 16 | 17 | new ContainerInsights(this, 'container-insights', {}); 18 | new ClusterAutoscaler(this, 'cluster-autoscaler', {}); 19 | new AWSLoadBalancerController(this, 'aws-load-balancer', {}); 20 | new MetricsServer(this, 'metrics-server', {}); 21 | new CertManager(this, 'cert-manager', { 22 | baseDomain: process.env.DOMAIN, 23 | email: process.env.LETSENCRYPT_EMAIL, 24 | }); 25 | new Jaeger(this, 'jaeger', {}); 26 | new ExternalDNS(this, 'external-dns',{}); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /lib/charts/assets/ingress.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: gitpod 6 | annotations: 7 | kubernetes.io/ingress.class: "alb" 8 | alb.ingress.kubernetes.io/actions.ssl-redirect: >- 9 | { 10 | "Type": "redirect", 11 | "RedirectConfig": { 12 | "Protocol": "HTTPS", 13 | "Port": "443", 14 | "StatusCode": "HTTP_301" 15 | } 16 | } 17 | alb.ingress.kubernetes.io/tags: Purpose=Gitpod,Service=proxy 18 | alb.ingress.kubernetes.io/target-node-labels: gitpod.io/workload_workspace_services=true 19 | alb.ingress.kubernetes.io/healthcheck-protocol: HTTPS 20 | alb.ingress.kubernetes.io/backend-protocol: HTTPS 21 | alb.ingress.kubernetes.io/listen-ports: >- 22 | [{ 23 | "HTTP": 80 24 | }, { 25 | "HTTPS": 443 26 | }] 27 | alb.ingress.kubernetes.io/target-group-attributes: deregistration_delay.timeout_seconds=30 28 | alb.ingress.kubernetes.io/load-balancer-attributes: idle_timeout.timeout_seconds=3600 29 | spec: 30 | rules: 31 | - http: 32 | paths: 33 | - path: / 34 | pathType: Prefix 35 | backend: 36 | service: 37 | name: proxy 38 | port: 39 | number: 443 40 | -------------------------------------------------------------------------------- /lib/charts/assets/jaeger-gitpod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: jaegertracing.io/v1 2 | kind: Jaeger 3 | metadata: 4 | name: jaeger 5 | namespace: default 6 | spec: 7 | affinity: 8 | nodeAffinity: 9 | requiredDuringSchedulingIgnoredDuringExecution: 10 | nodeSelectorTerms: 11 | - matchExpressions: 12 | - key: gitpod.io/workload_meta 13 | operator: In 14 | values: 15 | - "true" 16 | strategy: allInOne 17 | storage: 18 | options: 19 | memory: 20 | max-traces: 500 21 | ingress: 22 | enabled: false 23 | -------------------------------------------------------------------------------- /lib/charts/cert-manager.ts: -------------------------------------------------------------------------------- 1 | import cdk = require('@aws-cdk/core'); 2 | 3 | import { createNamespace, readYamlDocument, loadYaml } from './utils'; 4 | import { KubernetesManifest } from '@aws-cdk/aws-eks'; 5 | import { importCluster } from './cluster-utils'; 6 | 7 | export interface CertManagerProps extends cdk.StackProps { 8 | hostedZoneID?: string 9 | 10 | baseDomain?: string 11 | email?: string 12 | } 13 | 14 | export class CertManager extends cdk.Construct { 15 | constructor(scope: cdk.Construct, id: string, props: CertManagerProps) { 16 | super(scope, id); 17 | 18 | const cluster = importCluster(this, process.env.CLUSTER_NAME); 19 | 20 | const namespace = 'cert-manager'; 21 | 22 | const ns = createNamespace(namespace, cluster); 23 | 24 | const serviceAccount = cluster.addServiceAccount('cert-manager', { 25 | name: 'cert-manager', 26 | namespace, 27 | }); 28 | serviceAccount.node.addDependency(ns); 29 | 30 | const helmChart = cluster.addHelmChart('CertManagerChart', { 31 | chart: 'cert-manager', 32 | release: 'cert-manager', 33 | version: 'v1.6.0', 34 | repository: 'https://charts.jetstack.io/', 35 | namespace, 36 | createNamespace: false, 37 | wait: true, 38 | values: { 39 | installCRDs: true, 40 | serviceAccountName: serviceAccount.serviceAccountName, 41 | serviceAccount: { 42 | create: false, 43 | }, 44 | securityContext: { 45 | enabled: true, 46 | fsGroup: 1001, 47 | }, 48 | webhook: { 49 | hostNetwork: true, 50 | securePort: 10260 51 | }, 52 | } 53 | }); 54 | helmChart.node.addDependency(serviceAccount); 55 | 56 | // only create route53 issuer if the required fields are configured 57 | if (props.hostedZoneID) { 58 | if (!props.baseDomain) { 59 | throw new Error("Unexpected error: Missing baseDomain environment variable"); 60 | } 61 | if (!props.email) { 62 | throw new Error("Unexpected error: Missing email environment variable"); 63 | } 64 | 65 | const doc = readYamlDocument(__dirname + '/assets/route53-issuer.yaml'); 66 | const docArray = doc. 67 | replace(/{{email}}/g, props.email). 68 | replace(/{{baseDomain}}/g, props.baseDomain). 69 | replace(/{{hostedZoneID}}/g, props.hostedZoneID). 70 | replace(/{{region}}/g, cluster.stack.region); 71 | 72 | const issuerManifest = docArray.split("---").map(e => loadYaml(e)); 73 | const certManagerIssuer = new KubernetesManifest(cluster.stack, "cert-manager-issuer", { 74 | cluster, 75 | overwrite: true, 76 | manifest: issuerManifest, 77 | }); 78 | 79 | certManagerIssuer.node.addDependency(helmChart); 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /lib/charts/cluster-autoscaler.ts: -------------------------------------------------------------------------------- 1 | 2 | import { StackProps } from "@aws-cdk/core"; 3 | import { importCluster } from './cluster-utils'; 4 | import cdk = require('@aws-cdk/core'); 5 | 6 | const CLUSTER_AUTOSCALER = 'cluster-autoscaler'; 7 | 8 | export class ClusterAutoscaler extends cdk.Construct { 9 | constructor(scope: cdk.Construct, id: string, props: StackProps) { 10 | super(scope, id); 11 | 12 | const cluster = importCluster(this, process.env.CLUSTER_NAME); 13 | 14 | const helmChart = cluster.addHelmChart('ClusterAutoscalerChart', { 15 | chart: CLUSTER_AUTOSCALER, 16 | release: CLUSTER_AUTOSCALER, 17 | repository: 'https://kubernetes.github.io/autoscaler', 18 | namespace: 'kube-system', 19 | version: '9.10.8', 20 | wait: true, 21 | values: { 22 | autoDiscovery: { 23 | clusterName: cluster.clusterName 24 | }, 25 | awsRegion: cluster.stack.region, 26 | serviceAccount: { 27 | create: false, 28 | name: CLUSTER_AUTOSCALER, 29 | }, 30 | replicaCount: 1, 31 | extraArgs: { 32 | 'stderrthreshold': 'info', 33 | 'v': 2, 34 | 'scale-down-utilization-threshold': 0.2, 35 | 'skip-nodes-with-local-storage': false, 36 | 'skip-nodes-with-system-pods': false, 37 | 'expander': 'least-waste', 38 | 'balance-similar-node-groups': true, 39 | 'node-group-auto-discovery': `asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/${cluster.clusterName}`, 40 | 'cordon-node-before-terminating': true, 41 | } 42 | }, 43 | }); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /lib/charts/cluster-utils.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from "@aws-cdk/core"; 2 | import * as iam from "@aws-cdk/aws-iam"; 3 | import * as eks from "@aws-cdk/aws-eks"; 4 | 5 | export function importCluster(scope: cdk.Construct, clusterName: string | undefined): eks.ICluster { 6 | if (clusterName == null) { 7 | throw new Error('Cluster Name is not defined.'); 8 | } 9 | 10 | const oidcProviderArn = cdk.Fn.importValue("OpenIdConnectProviderArn"); 11 | const openIdConnectProvider = iam.OpenIdConnectProvider.fromOpenIdConnectProviderArn( 12 | scope, "OpenIdConnectProvider", oidcProviderArn); 13 | 14 | return eks.Cluster.fromClusterAttributes(scope, "BaseCluster", { 15 | clusterName, 16 | openIdConnectProvider, 17 | kubectlRoleArn: process.env.KUBECTL_ROLE_ARN 18 | }); 19 | } 20 | -------------------------------------------------------------------------------- /lib/charts/container-insights.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from "@aws-cdk/core"; 2 | import * as eks from "@aws-cdk/aws-eks"; 3 | import * as iam from '@aws-cdk/aws-iam'; 4 | import { LogGroup, RetentionDays, ILogGroup } from '@aws-cdk/aws-logs'; 5 | import { importCluster } from './cluster-utils'; 6 | import { RemovalPolicy } from '@aws-cdk/core'; 7 | import { ServiceAccount } from "@aws-cdk/aws-eks"; 8 | import { ManagedPolicy } from '@aws-cdk/aws-iam'; 9 | 10 | export class ContainerInsights extends cdk.Construct { 11 | constructor(scope: cdk.Construct, id: string, props: cdk.StackProps) { 12 | super(scope, id); 13 | 14 | const cluster = importCluster(this, process.env.CLUSTER_NAME); 15 | 16 | const namespace = 'kube-system'; 17 | 18 | const serviceAccount = new eks.ServiceAccount(this, 'fluent-bit', { 19 | cluster, 20 | name: 'fluent-bit', 21 | namespace, 22 | }); 23 | serviceAccount.role.addManagedPolicy( 24 | iam.ManagedPolicy.fromAwsManagedPolicyName('CloudWatchAgentServerPolicy'), 25 | ); 26 | 27 | const helmChart = cluster.addHelmChart('aws-for-fluent-bit', { 28 | chart: 'aws-for-fluent-bit', 29 | release: 'aws-for-fluent-bit', 30 | repository: 'https://aws.github.io/eks-charts', 31 | namespace, 32 | version: '0.1.17', 33 | values: { 34 | serviceAccount: { 35 | create: false, 36 | name: serviceAccount.serviceAccountName, 37 | }, 38 | cloudWatch: this.parseCloudWatchOptions(`${process.env.AWS_REGION}`, serviceAccount), 39 | elasticsearch: { 40 | enabled: false 41 | }, 42 | kinesis: { 43 | enabled: false 44 | }, 45 | firehose: { 46 | enabled: false 47 | }, 48 | } 49 | }); 50 | helmChart.node.addDependency(serviceAccount); 51 | } 52 | 53 | private parseCloudWatchOptions(region: string, serviceAccount: ServiceAccount): Record { 54 | const logGroup = new LogGroup(this, 'AwsForFluentBitAddonDefaultLogGroup', { 55 | logGroupName: `/aws/eks/fluentbit/${process.env.CLUSTER_NAME}`, 56 | removalPolicy: RemovalPolicy.DESTROY, 57 | retention: RetentionDays.ONE_MONTH, 58 | }); 59 | 60 | serviceAccount.role.addManagedPolicy(ManagedPolicy.fromAwsManagedPolicyName('CloudWatchAgentServerPolicy')); 61 | 62 | return { 63 | enabled: true, 64 | region, 65 | logGroupName: logGroup.logGroupName, 66 | match: '*', 67 | autoCreateGroup: false, 68 | }; 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /lib/charts/external-dns.ts: -------------------------------------------------------------------------------- 1 | import cdk = require('@aws-cdk/core'); 2 | import * as iam from '@aws-cdk/aws-iam'; 3 | import { createNamespace } from './utils'; 4 | import { importCluster } from './cluster-utils'; 5 | 6 | const EXTERNAL_DNS_NAMESPACE = "external-dns"; 7 | 8 | export class ExternalDNS extends cdk.Construct { 9 | 10 | constructor(scope: cdk.Construct, id: string, props: cdk.StackProps) { 11 | super(scope, id); 12 | 13 | if (!process.env.ROUTE53_ZONEID) { 14 | return; 15 | } 16 | 17 | const cluster = importCluster(this, process.env.CLUSTER_NAME); 18 | 19 | const ns = createNamespace(EXTERNAL_DNS_NAMESPACE, cluster); 20 | 21 | const serviceAccount = cluster.addServiceAccount('external-dns', { 22 | name: 'external-dns', 23 | namespace: EXTERNAL_DNS_NAMESPACE, 24 | }); 25 | serviceAccount.addToPrincipalPolicy( 26 | new iam.PolicyStatement({ 27 | effect: iam.Effect.ALLOW, 28 | actions: ['route53:ChangeResourceRecordSets'], 29 | resources: ['arn:aws:route53:::hostedzone/*'], 30 | }), 31 | ); 32 | serviceAccount.addToPrincipalPolicy( 33 | new iam.PolicyStatement({ 34 | effect: iam.Effect.ALLOW, 35 | actions: ['route53:ListHostedZones', 'route53:ListResourceRecordSets'], 36 | resources: ['*'], 37 | }), 38 | ); 39 | serviceAccount.node.addDependency(ns); 40 | 41 | const helmChart = cluster.addHelmChart('external-dns', { 42 | chart: 'external-dns', 43 | release: 'external-dns', 44 | repository: 'https://charts.bitnami.com/bitnami', 45 | namespace: EXTERNAL_DNS_NAMESPACE, 46 | version: '6.5.3', 47 | values: { 48 | podSecurityContext: { 49 | fsGroup: 65534, 50 | runAsUser: 0 51 | }, 52 | logFormat: 'json', 53 | domainFilters: [], 54 | sources: ["ingress"], 55 | policy: "upsert-only", 56 | serviceAccount: { 57 | create: false, 58 | name: serviceAccount.serviceAccountName 59 | }, 60 | provider: 'aws', 61 | aws: { 62 | region: cluster.stack.region, 63 | zoneType: "public", 64 | preferCNAME: false, 65 | evaluateTargetHealth: false, 66 | }, 67 | txtOwnerId: process.env.ROUTE53_ZONEID 68 | } 69 | }); 70 | 71 | helmChart.node.addDependency(serviceAccount); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /lib/charts/jaeger.ts: -------------------------------------------------------------------------------- 1 | import { KubernetesManifest } from '@aws-cdk/aws-eks'; 2 | import { loadYaml, readYamlDocument } from './utils'; 3 | import { StackProps } from '@aws-cdk/core'; 4 | import { importCluster } from './cluster-utils'; 5 | import cdk = require('@aws-cdk/core'); 6 | import eks = require('@aws-cdk/aws-eks'); 7 | 8 | export class Jaeger extends cdk.Construct { 9 | constructor(scope: cdk.Construct, id: string, props: StackProps) { 10 | super(scope, id); 11 | 12 | const cluster = importCluster(this, process.env.CLUSTER_NAME); 13 | 14 | const helmChart = cluster.addHelmChart('jaeger-operator-chart', { 15 | chart: 'jaeger-operator', 16 | release: 'jaeger-operator', 17 | repository: 'https://jaegertracing.github.io/helm-charts', 18 | namespace: 'jaeger-operator', 19 | version: '2.27.0', 20 | wait: true, 21 | values: { 22 | rbac: { 23 | clusterRole: true, 24 | }, 25 | "affinity": { 26 | "nodeAffinity": { 27 | "requiredDuringSchedulingIgnoredDuringExecution": { 28 | "nodeSelectorTerms": [ 29 | { 30 | "matchExpressions": [ 31 | { 32 | "key": "gitpod.io/workload_meta", 33 | "operator": "In", 34 | "values": ["true"] 35 | } 36 | ] 37 | } 38 | ] 39 | } 40 | } 41 | } 42 | }, 43 | }); 44 | 45 | const doc = readYamlDocument(__dirname + '/assets/jaeger-gitpod.yaml'); 46 | const gitpodJaeger = new KubernetesManifest(cluster.stack, "gitpod-jaeger", { 47 | cluster, 48 | overwrite: true, 49 | manifest: [loadYaml(doc)], 50 | }); 51 | gitpodJaeger.node.addDependency(helmChart); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /lib/charts/load-balancer.ts: -------------------------------------------------------------------------------- 1 | import { importCluster } from './cluster-utils'; 2 | import cdk = require('@aws-cdk/core'); 3 | 4 | const AWS_LOAD_BALANCER_CONTROLLER = 'aws-load-balancer-controller'; 5 | 6 | export class AWSLoadBalancerController extends cdk.Construct { 7 | constructor(scope: cdk.Construct, id: string, props: cdk.StackProps) { 8 | super(scope, id); 9 | 10 | const cluster = importCluster(this, process.env.CLUSTER_NAME); 11 | 12 | const helmChart = cluster.addHelmChart('AWSLoadBalancerControllerChart', { 13 | chart: AWS_LOAD_BALANCER_CONTROLLER, 14 | release: AWS_LOAD_BALANCER_CONTROLLER, 15 | repository: 'https://aws.github.io/eks-charts', 16 | namespace: 'kube-system', 17 | version: '1.4.2', 18 | wait: true, 19 | values: { 20 | replicaCount: 1, 21 | hostNetwork: true, 22 | clusterName: cluster.clusterName, 23 | serviceAccount: { 24 | create: false, 25 | name: AWS_LOAD_BALANCER_CONTROLLER, 26 | }, 27 | }, 28 | }); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /lib/charts/metrics-server.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from "@aws-cdk/core"; 2 | import { StackProps } from '@aws-cdk/core'; 3 | import { importCluster } from './cluster-utils'; 4 | 5 | export class MetricsServer extends cdk.Construct { 6 | constructor(scope: cdk.Construct, id: string, props: StackProps) { 7 | super(scope, id); 8 | 9 | const cluster = importCluster(this, process.env.CLUSTER_NAME); 10 | 11 | const helmChart = cluster.addHelmChart('MetricsServerChart', { 12 | chart: 'metrics-server', 13 | release: 'metrics-server', 14 | repository: 'https://charts.bitnami.com/bitnami', 15 | namespace: 'kube-system', 16 | version: '5.10.14', 17 | wait: true, 18 | values: { 19 | hostNetwork: true, 20 | apiService: { 21 | create: true 22 | }, 23 | extraArgs: { 24 | 'v': '2', 25 | 'kubelet-preferred-address-types': 'InternalIP, ExternalIP, Hostname' 26 | } 27 | }, 28 | }); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /lib/charts/utils.ts: -------------------------------------------------------------------------------- 1 | import * as eks from '@aws-cdk/aws-eks'; 2 | import * as fs from 'fs'; 3 | import * as yaml from 'js-yaml'; 4 | import request from 'sync-request'; 5 | 6 | export function readYamlDocument(path: string): string { 7 | try { 8 | const doc = fs.readFileSync(path, 'utf8'); 9 | return doc; 10 | } catch (e) { 11 | console.log(e + ' for path: ' + path); 12 | throw e; 13 | } 14 | } 15 | 16 | export function loadYaml(document: string): any { 17 | return yaml.load(document); 18 | } 19 | 20 | export function loadExternalYaml(url: string): any[] { 21 | return yaml.loadAll(request('GET', url).getBody().toString()); 22 | } 23 | 24 | export function serializeYaml(document: any): string { 25 | return yaml.dump(document); 26 | } 27 | 28 | export function createNamespace(name: string, cluster: eks.ICluster): eks.KubernetesManifest { 29 | const resource = cluster.addManifest(`${name}Namespace`, { 30 | apiVersion: 'v1', 31 | kind: 'Namespace', 32 | metadata: { 33 | name: name 34 | } 35 | }); 36 | 37 | return resource; 38 | } 39 | -------------------------------------------------------------------------------- /lib/database.ts: -------------------------------------------------------------------------------- 1 | import * as ec2 from '@aws-cdk/aws-ec2'; 2 | import * as rds from '@aws-cdk/aws-rds'; 3 | import * as cdk from '@aws-cdk/core'; 4 | import { SecretValue } from '@aws-cdk/core'; 5 | import * as ssm from '@aws-cdk/aws-ssm'; 6 | 7 | export interface DatabaseProps extends cdk.StackProps { 8 | readonly clusterName: string; 9 | readonly vpc: ec2.IVpc; 10 | readonly databaseSubnets?: ec2.SubnetSelection; 11 | readonly instanceType?: ec2.InstanceType; 12 | readonly instanceEngine?: rds.IInstanceEngine; 13 | readonly backupRetention?: cdk.Duration; 14 | readonly username: string; 15 | } 16 | 17 | export class Database extends cdk.Stack { 18 | readonly credentials: string 19 | 20 | constructor(scope: cdk.Construct, id: string, props: DatabaseProps) { 21 | super(scope, id, props); 22 | 23 | const rdsVersion = rds.MysqlEngineVersion.VER_5_7; 24 | const parameterGroup = new rds.ParameterGroup(this, "DBParameterGroup", { 25 | engine: props.instanceEngine ?? rds.DatabaseInstanceEngine.mysql({ 26 | version: rdsVersion, 27 | }), 28 | parameters: { 29 | explicit_defaults_for_timestamp: "OFF" 30 | } 31 | }); 32 | 33 | // TODO: remove when the gitpod helm chart supports using secrets from ssm 34 | this.credentials = ssm.StringParameter.valueForStringParameter( 35 | this, `/gitpod/cluster/${props.clusterName}/region/${props.vpc.stack.region}`); 36 | 37 | const instance = new rds.DatabaseInstance(this, 'Gitpod', { 38 | vpc: props.vpc, 39 | vpcSubnets: { subnetType: ec2.SubnetType.PRIVATE_WITH_NAT }, 40 | engine: props.instanceEngine ?? rds.DatabaseInstanceEngine.mysql({ 41 | version: rdsVersion, 42 | }), 43 | storageEncrypted: true, 44 | backupRetention: props.backupRetention ?? cdk.Duration.days(7), 45 | credentials: rds.Credentials.fromPassword(props.username, SecretValue.plainText(this.credentials)), 46 | instanceType: props.instanceType ?? ec2.InstanceType.of( 47 | ec2.InstanceClass.T3, 48 | ec2.InstanceSize.MEDIUM, 49 | ), 50 | allocatedStorage: 10, 51 | // Enable multiAz for production 52 | multiAz: false, 53 | removalPolicy: cdk.RemovalPolicy.DESTROY, 54 | databaseName: 'gitpod', 55 | autoMinorVersionUpgrade: false, 56 | deletionProtection: false, 57 | parameterGroup 58 | }); 59 | 60 | // allow internally from the same security group 61 | instance.connections.allowInternally(ec2.Port.tcp(3306)); 62 | // allow from the whole vpc cidr 63 | instance.connections.allowFrom(ec2.Peer.ipv4(props.vpc.vpcCidrBlock), ec2.Port.tcp(3306)); 64 | 65 | new cdk.CfnOutput(this, "MysqlEndpoint", { 66 | value: instance.dbInstanceEndpointAddress, 67 | exportName: "MysqlEndpoint", 68 | }); 69 | new cdk.CfnOutput(this, "MysqlUsername", { 70 | value: props.username, 71 | exportName: "MysqlUsername", 72 | }); 73 | new cdk.CfnOutput(this, "MysqlPort", { 74 | value: '3306', 75 | exportName: "MysqlPort", 76 | }); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /lib/gitpod.ts: -------------------------------------------------------------------------------- 1 | import { 2 | Construct, 3 | Stack, 4 | StackProps 5 | } from '@aws-cdk/core'; 6 | 7 | import { KubernetesManifest } from '@aws-cdk/aws-eks'; 8 | 9 | import { readYamlDocument, loadYaml } from './charts/utils'; 10 | import { importCluster } from './charts/cluster-utils'; 11 | 12 | export interface GitpodProps extends StackProps { 13 | domain: string 14 | 15 | certificateArn?: string 16 | } 17 | 18 | export class GitpodStack extends Stack { 19 | constructor(scope: Construct, id: string, props: GitpodProps) { 20 | super(scope, id, props); 21 | 22 | const cluster = importCluster(this, process.env.CLUSTER_NAME); 23 | 24 | const doc = readYamlDocument(__dirname + '/charts/assets/ingress.yaml'); 25 | const manifest = loadYaml(doc) as any; 26 | 27 | // configure TLS termination in the load balancer 28 | if (props.certificateArn) { 29 | manifest.metadata.annotations["alb.ingress.kubernetes.io/certificate-arn"] = props.certificateArn; 30 | manifest.metadata.annotations["alb.ingress.kubernetes.io/ssl-policy"] = "ELBSecurityPolicy-FS-1-2-Res-2020-10"; 31 | } 32 | 33 | manifest.metadata.annotations["alb.ingress.kubernetes.io/load-balancer-name"] = `${process.env.CLUSTER_NAME}-${props.env?.region}`; 34 | 35 | // if we have a route53 zone ID, enable external-dns. 36 | if (process.env.ROUTE53_ZONEID) { 37 | manifest.metadata.annotations["external-dns.alpha.kubernetes.io/hostname"] = `${props.domain}, *.${props.domain}, *.ws.${props.domain}`; 38 | } 39 | 40 | if (process.env.USE_INTERNAL_ALB && process.env.USE_INTERNAL_ALB.toLowerCase() === 'true') { 41 | manifest.metadata.annotations["alb.ingress.kubernetes.io/scheme"] = 'internal'; 42 | } else { 43 | manifest.metadata.annotations["alb.ingress.kubernetes.io/scheme"] = 'internet-facing'; 44 | } 45 | 46 | if (process.env.ALB_SUBNETS) { 47 | manifest.metadata.annotations["alb.ingress.kubernetes.io/subnets"] = `${process.env.ALB_SUBNETS}`; 48 | } 49 | 50 | const gitpodIngress = new KubernetesManifest(this, "gitpod-ingress", { 51 | cluster, 52 | overwrite: true, 53 | manifest: [manifest], 54 | }); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /lib/registry.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from '@aws-cdk/core'; 2 | import { Bucket, IBucket, BucketEncryption, BlockPublicAccess } from '@aws-cdk/aws-s3'; 3 | import { RemovalPolicy } from '@aws-cdk/core'; 4 | import * as iam from '@aws-cdk/aws-iam'; 5 | import { CfnAccessKey } from '@aws-cdk/aws-iam'; 6 | 7 | export interface RegistryProps extends cdk.StackProps { 8 | readonly clusterName: string 9 | readonly bucketName: string 10 | readonly createBucket: boolean 11 | } 12 | 13 | export class Registry extends cdk.Stack { 14 | constructor(scope: cdk.Construct, id: string, props: RegistryProps) { 15 | super(scope, id, props); 16 | 17 | let registryBucket: IBucket; 18 | 19 | if (props.createBucket) { 20 | registryBucket = new Bucket(this, "RegistryBucket", { 21 | encryption: BucketEncryption.KMS_MANAGED, 22 | bucketName: props.bucketName, 23 | publicReadAccess: false, 24 | blockPublicAccess: BlockPublicAccess.BLOCK_ALL, 25 | removalPolicy: RemovalPolicy.RETAIN 26 | }); 27 | } else { 28 | registryBucket = Bucket.fromBucketAttributes(this, 'RegistryBucket', { 29 | bucketArn: `arn:aws:s3:::${props.bucketName}`, 30 | }); 31 | } 32 | 33 | const GitpodRegistryAccess = new iam.Policy(this, 'RegistryAccess', { 34 | policyName: 'GitpodS3Access', 35 | statements: [ 36 | new iam.PolicyStatement({ 37 | resources: ['*'], 38 | actions: [ 39 | "s3:*", 40 | ], 41 | }), 42 | new iam.PolicyStatement({ 43 | resources: [`${registryBucket.bucketArn}`], 44 | actions: [ 45 | "s3:ListBucket", 46 | "s3:GetBucketLocation", 47 | "s3:ListBucketMultipartUploads" 48 | ], 49 | }), 50 | new iam.PolicyStatement({ 51 | resources: [`${registryBucket.bucketArn}/*`], 52 | actions: [ 53 | "s3:PutObject", 54 | "s3:GetObject", 55 | "s3:DeleteObject", 56 | "s3:ListMultipartUploadParts", 57 | "s3:AbortMultipartUpload" 58 | ], 59 | }), 60 | ], 61 | }); 62 | 63 | const storage = new iam.Group(this, 'RegistryStorage', { 64 | groupName: 'RegistryStorage', 65 | }); 66 | storage.attachInlinePolicy(GitpodRegistryAccess); 67 | 68 | const userName = `registry-storage-${props.clusterName}`.toLowerCase(); 69 | const user = new iam.User(this, 'GitpodIAMUserS3', { 70 | userName, 71 | groups: [storage] 72 | }); 73 | 74 | const accessKey = new CfnAccessKey(this, `${userName}AccessKey`, { 75 | userName, 76 | }); 77 | accessKey.node.addDependency(user); 78 | 79 | new cdk.CfnOutput(this, "AccessKeyId", { 80 | value: accessKey.ref, 81 | exportName: "AccessKeyId", 82 | }); 83 | new cdk.CfnOutput(this, "SecretAccessKey", { 84 | value: accessKey.attrSecretAccessKey, 85 | exportName: "SecretAccessKey", 86 | }); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /lib/services.ts: -------------------------------------------------------------------------------- 1 | 2 | import * as cdk from '@aws-cdk/core' 3 | import * as ec2 from '@aws-cdk/aws-ec2' 4 | 5 | import { Database } from './database'; 6 | import { Registry } from './registry'; 7 | 8 | export class ServicesStack extends cdk.Stack { 9 | //readonly registry: Registry 10 | 11 | constructor(scope: cdk.Construct, id: string, props: cdk.StackProps) { 12 | super(scope, id, props) 13 | 14 | // search VPC created by eksctl 15 | const vpc = ec2.Vpc.fromLookup(this, 'vpc', { 16 | vpcName: `eksctl-${process.env.CLUSTER_NAME}-cluster/VPC`, 17 | isDefault: false 18 | }); 19 | 20 | // create RDS database for gitpod 21 | const database = new Database(this, 'RDS', { 22 | env: props.env, 23 | clusterName: `${process.env.CLUSTER_NAME}`, 24 | vpc, 25 | username: 'gitpod' 26 | }) 27 | database.node.addDependency(vpc); 28 | 29 | // create permissions to access S3 buckets 30 | const registry = new Registry(this, 'Registry', { 31 | env: props.env, 32 | clusterName: `${process.env.CLUSTER_NAME}`, 33 | bucketName: `${process.env.CONTAINER_REGISTRY_BUCKET}`, 34 | createBucket: process.env.CREATE_S3_BUCKET === 'true', 35 | }); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /lib/setup.ts: -------------------------------------------------------------------------------- 1 | import * as path from "path"; 2 | 3 | import * as cdk from '@aws-cdk/core' 4 | import * as ec2 from '@aws-cdk/aws-ec2' 5 | import * as iam from '@aws-cdk/aws-iam'; 6 | 7 | export interface SetupProps extends cdk.StackProps { 8 | identityoidcissuer: string 9 | } 10 | 11 | export class SetupStack extends cdk.Stack { 12 | constructor(scope: cdk.Construct, id: string, props: SetupProps) { 13 | super(scope, id, props) 14 | 15 | // search VPC created by eksctl (validation purposes) 16 | ec2.Vpc.fromLookup(this, 'vpc', { 17 | vpcName: `eksctl-${process.env.CLUSTER_NAME}-cluster/VPC`, 18 | isDefault: false 19 | }); 20 | 21 | // Extract the ID of the EKS cluster from the identityoidcissuer URL 22 | const clusterID = path.basename(props.identityoidcissuer); 23 | const oidcProviderArn = `arn:aws:iam::${process.env.ACCOUNT_ID}:oidc-provider/oidc.eks.${process.env.AWS_REGION}.amazonaws.com/id/${clusterID}`; 24 | const openIdConnectProvider = iam.OpenIdConnectProvider.fromOpenIdConnectProviderArn(this, "OpenIdConnectProvider", oidcProviderArn); 25 | 26 | new cdk.CfnOutput(this, "ClusterName", { 27 | value: `${process.env.CLUSTER_NAME}`, exportName: "ClusterName", 28 | }); 29 | new cdk.CfnOutput(this, "OpenIdConnectProviderArn", { 30 | value: openIdConnectProvider.openIdConnectProviderArn, 31 | exportName: "OpenIdConnectProviderArn", 32 | }); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gitpod-eks-guide", 3 | "version": "0.1.0", 4 | "bin": { 5 | "addons": "bin/provision.js" 6 | }, 7 | "scripts": { 8 | "build": "tsc", 9 | "watch": "tsc -w", 10 | "test": "jest", 11 | "cdk": "cdk" 12 | }, 13 | "devDependencies": { 14 | "@aws-cdk/assert": "^1.132.0", 15 | "@types/jest": "^27.0.3", 16 | "@types/js-yaml": "^4.0.5", 17 | "@types/node": "^16.11.9", 18 | "@types/uuid": "^8.3.3", 19 | "husky": "^7.0.4", 20 | "jest": "^27.3.1", 21 | "ts-jest": "^27.0.7", 22 | "ts-node": "^10.4.0", 23 | "typescript": "^4.5.2" 24 | }, 25 | "dependencies": { 26 | "@aws-cdk/aws-eks": "^1.132.0", 27 | "@aws-cdk/aws-iam": "^1.132.0", 28 | "@aws-cdk/aws-lambda-nodejs": "^1.133.0", 29 | "@aws-cdk/aws-rds": "^1.132.0", 30 | "@aws-cdk/aws-secretsmanager": "^1.132.0", 31 | "@aws-cdk/core": "^1.132.0", 32 | "@aws-sdk/client-secrets-manager": "^3.41.0", 33 | "js-yaml": "^4.1.0", 34 | "source-map-support": "^0.5.21", 35 | "sync-request": "^6.1.0", 36 | "uuid": "^8.3.2" 37 | }, 38 | "packageManager": "yarn@3.1.0", 39 | "type": "commonjs" 40 | } 41 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P) 6 | 7 | function variables_from_context() { 8 | # Create EKS cluster without nodes 9 | # Generate a new kubeconfig file in the local directory 10 | KUBECONFIG=".kubeconfig" 11 | 12 | # extract details form the ecktl configuration file 13 | CLUSTER_NAME=$(yq eval '.metadata.name' "${EKSCTL_CONFIG}") 14 | AWS_REGION=$(yq eval '.metadata.region' "${EKSCTL_CONFIG}") 15 | 16 | ACCOUNT_ID=$(${AWS_CMD} sts get-caller-identity | jq -r .Account) 17 | 18 | # use the default bucket? 19 | if [ -z "${CONTAINER_REGISTRY_BUCKET}" ]; then 20 | CONTAINER_REGISTRY_BUCKET="container-registry-${CLUSTER_NAME}-${ACCOUNT_ID}" 21 | fi 22 | 23 | CREATE_S3_BUCKET="false" 24 | if ! "${AWS_CMD}" s3api head-bucket --bucket "${CONTAINER_REGISTRY_BUCKET}" >/dev/null 2>&1; then 25 | CREATE_S3_BUCKET="true" 26 | fi 27 | 28 | export KUBECONFIG 29 | export CLUSTER_NAME 30 | export AWS_REGION 31 | export ACCOUNT_ID 32 | export CREATE_S3_BUCKET 33 | export CONTAINER_REGISTRY_BUCKET 34 | } 35 | 36 | function check_prerequisites() { 37 | EKSCTL_CONFIG=$1 38 | if [ ! -f "${EKSCTL_CONFIG}" ]; then 39 | echo "The eksctl configuration file ${EKSCTL_CONFIG} does not exist." 40 | exit 1 41 | else 42 | echo "Using eksctl configuration file: ${EKSCTL_CONFIG}" 43 | fi 44 | export EKSCTL_CONFIG 45 | 46 | if [ -z "${CERTIFICATE_ARN}" ]; then 47 | echo "Missing CERTIFICATE_ARN environment variable." 48 | exit 1; 49 | fi 50 | 51 | if [ -z "${DOMAIN}" ]; then 52 | echo "Missing DOMAIN environment variable." 53 | exit 1; 54 | fi 55 | 56 | AWS_CMD="aws" 57 | if [ -z "${AWS_PROFILE}" ]; then 58 | echo "Missing (optional) AWS profile." 59 | unset AWS_PROFILE 60 | else 61 | echo "Using the AWS profile: ${AWS_PROFILE}" 62 | AWS_CMD="aws --profile ${AWS_PROFILE}" 63 | fi 64 | export AWS_CMD 65 | 66 | if [ -z "${ROUTE53_ZONEID}" ]; then 67 | echo "Missing (optional) ROUTE53_ZONEID environment variable." 68 | echo "Please configure the CNAME with the URL of the load balancer manually." 69 | else 70 | echo "Using external-dns. No manual intervention required." 71 | fi 72 | } 73 | 74 | # Bootstrap AWS CDK - https://docs.aws.amazon.com/cdk/latest/guide/bootstrapping.html 75 | function ensure_aws_cdk() { 76 | pushd /tmp > /dev/null 2>&1; cdk bootstrap "aws://${ACCOUNT_ID}/${AWS_REGION}"; popd > /dev/null 2>&1 77 | } 78 | 79 | function install() { 80 | check_prerequisites "$1" 81 | variables_from_context 82 | ensure_aws_cdk 83 | 84 | # Check the certificate exists 85 | if ! ${AWS_CMD} acm describe-certificate --certificate-arn "${CERTIFICATE_ARN}" --region "${AWS_REGION}" >/dev/null 2>&1; then 86 | echo "The secret ${CERTIFICATE_ARN} does not exist." 87 | exit 1 88 | fi 89 | 90 | if ! eksctl get cluster "${CLUSTER_NAME}" > /dev/null 2>&1; then 91 | # https://eksctl.io/usage/managing-nodegroups/ 92 | eksctl create cluster --config-file "${EKSCTL_CONFIG}" --without-nodegroup --kubeconfig ${KUBECONFIG} 93 | else 94 | aws eks update-kubeconfig --name "${CLUSTER_NAME}" 95 | fi 96 | 97 | # Disable default AWS CNI provider. 98 | # The reason for this change is related to the number of containers we can have in ec2 instances 99 | # https://github.com/awslabs/amazon-eks-ami/blob/master/files/eni-max-pods.txt 100 | # https://docs.aws.amazon.com/eks/latest/userguide/pod-networking.html 101 | kubectl patch ds -n kube-system aws-node -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}' 102 | # Install Calico. 103 | kubectl apply -f https://docs.projectcalico.org/manifests/calico-vxlan.yaml 104 | 105 | # Create secret with container registry credentials 106 | if [ -n "${IMAGE_PULL_SECRET_FILE}" ] && [ -f "${IMAGE_PULL_SECRET_FILE}" ]; then 107 | kubectl create secret generic gitpod-image-pull-secret \ 108 | --from-file=.dockerconfigjson="${IMAGE_PULL_SECRET_FILE}" \ 109 | --type=kubernetes.io/dockerconfigjson >/dev/null 2>&1 || true 110 | fi 111 | 112 | if ${AWS_CMD} iam get-role --role-name "${CLUSTER_NAME}-region-${AWS_REGION}-role-eksadmin" > /dev/null 2>&1; then 113 | KUBECTL_ROLE_ARN=$(${AWS_CMD} iam get-role --role-name "${CLUSTER_NAME}-region-${AWS_REGION}-role-eksadmin" | jq -r .Role.Arn) 114 | else 115 | echo "Creating Role for EKS access" 116 | # Create IAM role and mapping to Kubernetes user and groups. 117 | POLICY=$(echo -n '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":"arn:aws:iam::'; echo -n "$ACCOUNT_ID"; echo -n ':root"},"Action":"sts:AssumeRole","Condition":{}}]}') 118 | KUBECTL_ROLE_ARN=$(${AWS_CMD} iam create-role \ 119 | --role-name "${CLUSTER_NAME}-region-${AWS_REGION}-role-eksadmin" \ 120 | --description "Kubernetes role (for AWS IAM Authenticator for Kubernetes)." \ 121 | --assume-role-policy-document "$POLICY" \ 122 | --output text \ 123 | --query 'Role.Arn') 124 | fi 125 | export KUBECTL_ROLE_ARN 126 | 127 | # check if the identity mapping already exists 128 | # Manage IAM users and roles https://eksctl.io/usage/iam-identity-mappings/ 129 | if ! eksctl get iamidentitymapping --cluster "${CLUSTER_NAME}" --arn "${KUBECTL_ROLE_ARN}" > /dev/null 2>&1; then 130 | echo "Creating mapping from IAM role ${KUBECTL_ROLE_ARN}" 131 | eksctl create iamidentitymapping \ 132 | --cluster "${CLUSTER_NAME}" \ 133 | --arn "${KUBECTL_ROLE_ARN}" \ 134 | --username eksadmin \ 135 | --group system:masters 136 | fi 137 | 138 | # Create cluster nodes defined in the configuration file 139 | eksctl create nodegroup --config-file="${EKSCTL_CONFIG}" 140 | 141 | # Restart tigera-operator 142 | kubectl delete pod -n tigera-operator -l k8s-app=tigera-operator > /dev/null 2>&1 143 | 144 | MYSQL_GITPOD_USERNAME="gitpod" 145 | MYSQL_GITPOD_PASSWORD=$(openssl rand -hex 18) 146 | MYSQL_GITPOD_SECRET="mysql-gitpod-token" 147 | MYSQL_GITPOD_ENCRYPTION_KEY='[{"name":"general","version":1,"primary":true,"material":"4uGh1q8y2DYryJwrVMHs0kWXJlqvHWWt/KJuNi04edI="}]' 148 | SECRET_STORAGE="object-storage-gitpod-token" 149 | 150 | # generated password cannot excede 41 characters (RDS limitation) 151 | SSM_KEY="/gitpod/cluster/${CLUSTER_NAME}/region/${AWS_REGION}" 152 | ${AWS_CMD} ssm put-parameter \ 153 | --overwrite \ 154 | --name "${SSM_KEY}" \ 155 | --type String \ 156 | --value "${MYSQL_GITPOD_PASSWORD}" \ 157 | --region "${AWS_REGION}" > /dev/null 2>&1 158 | 159 | # deploy CDK stacks 160 | cdk deploy \ 161 | --context clusterName="${CLUSTER_NAME}" \ 162 | --context region="${AWS_REGION}" \ 163 | --context domain="${DOMAIN}" \ 164 | --context certificatearn="${CERTIFICATE_ARN}" \ 165 | --context identityoidcissuer="$(${AWS_CMD} eks describe-cluster --name "${CLUSTER_NAME}" --query "cluster.identity.oidc.issuer" --output text --region "${AWS_REGION}")" \ 166 | --require-approval never \ 167 | --outputs-file cdk-outputs.json \ 168 | --all 169 | 170 | output_config 171 | } 172 | 173 | function output_config() { 174 | 175 | MYSQL_HOST=$(jq -r '. | to_entries[] | select(.key | startswith("ServicesRDS")).value.MysqlEndpoint ' < cdk-outputs.json) 176 | S3_ACCESS_KEY=$(jq -r '. | to_entries[] | select(.key | startswith("ServicesRegistry")).value.AccessKeyId ' < cdk-outputs.json) 177 | S3_SECRET_KEY=$(jq -r '. | to_entries[] | select(.key | startswith("ServicesRegistry")).value.SecretAccessKey ' < cdk-outputs.json) 178 | 179 | cat << EOF 180 | 181 | ========================== 182 | 🎉🥳🔥🧡🚀 183 | 184 | Your cloud infrastructure is ready to install Gitpod. Please visit 185 | https://www.gitpod.io/docs/self-hosted/latest/getting-started#step-4-install-gitpod 186 | for your next steps. 187 | 188 | Passwords may change on subsequents runs of this guide. 189 | 190 | ================= 191 | Config Parameters 192 | ================= 193 | 194 | Domain Name: ${DOMAIN} 195 | 196 | Database 197 | ======== 198 | Host: ${MYSQL_HOST} 199 | Username: ${MYSQL_GITPOD_USERNAME} 200 | Password: ${MYSQL_GITPOD_PASSWORD} 201 | Port: 3306 202 | 203 | Container Registry Storage 204 | ======== 205 | S3 BUCKET NAME: ${CONTAINER_REGISTRY_BUCKET} 206 | S3 ACCESS KEY: ${S3_ACCESS_KEY} 207 | S3 SECRET KEY: ${S3_SECRET_KEY} 208 | 209 | TLS Certificates 210 | ================ 211 | Issuer name: gitpod-selfsigned-issuer 212 | Issuer type: Issuer 213 | 214 | 215 | Once Gitpod is installed, and the DNS records are updated, Run the following commands: 216 | 217 | # remove shiftfs-module-loader container. 218 | # TODO: remove once the container is removed from the installer 219 | kubectl patch daemonset ws-daemon --type json -p='[{"op": "remove", "path": "/spec/template/spec/initContainers/3"}]' 220 | 221 | # Use the following URL for DNS 222 | kubectl get ingress gitpod -o json | jq -r .status.loadBalancer.ingress[0].hostname 223 | EOF 224 | } 225 | 226 | 227 | function uninstall() { 228 | check_prerequisites "$1" 229 | variables_from_context 230 | 231 | read -p "Are you sure you want to delete: Gitpod, Services/Registry, Services/RDS, Services, Addons, Setup (y/n)? " -n 1 -r 232 | if [[ $REPLY =~ ^[Yy]$ ]]; then 233 | if ! ${AWS_CMD} eks describe-cluster --name "${CLUSTER_NAME}" --region "${AWS_REGION}" > /dev/null; then 234 | exit 1 235 | fi 236 | 237 | KUBECTL_ROLE_ARN=$(${AWS_CMD} iam get-role --role-name "${CLUSTER_NAME}-region-${AWS_REGION}-role-eksadmin" | jq -r .Role.Arn) 238 | export KUBECTL_ROLE_ARN 239 | 240 | SSM_KEY="/gitpod/cluster/${CLUSTER_NAME}/region/${AWS_REGION}" 241 | 242 | cdk destroy \ 243 | --context clusterName="${CLUSTER_NAME}" \ 244 | --context region="${AWS_REGION}" \ 245 | --context domain="${DOMAIN}" \ 246 | --context certificatearn="${CERTIFICATE_ARN}" \ 247 | --context identityoidcissuer="$(${AWS_CMD} eks describe-cluster --name "${CLUSTER_NAME}" --query "cluster.identity.oidc.issuer" --output text --region "${AWS_REGION}")" \ 248 | --require-approval never \ 249 | --force \ 250 | --all \ 251 | && cdk context --clear \ 252 | && eksctl delete cluster "${CLUSTER_NAME}" \ 253 | && ${AWS_CMD} ssm delete-parameter --name "${SSM_KEY}" --region "${AWS_REGION}" 254 | fi 255 | } 256 | 257 | function auth() { 258 | AUTHPROVIDERS_CONFIG=${1:="auth-providers-patch.yaml"} 259 | if [ ! -f "${AUTHPROVIDERS_CONFIG}" ]; then 260 | echo "The auth provider configuration file ${AUTHPROVIDERS_CONFIG} does not exist." 261 | exit 1 262 | else 263 | echo "Using the auth providers configuration file: ${AUTHPROVIDERS_CONFIG}" 264 | fi 265 | 266 | # Patching the configuration with the user auth provider/s 267 | kubectl --kubeconfig .kubeconfig patch configmap auth-providers-config --type merge --patch "$(cat ${AUTHPROVIDERS_CONFIG})" 268 | # Restart the server component 269 | kubectl --kubeconfig .kubeconfig rollout restart deployment/server 270 | } 271 | 272 | function main() { 273 | if [[ $# -ne 1 ]]; then 274 | echo "Usage: $0 [--install|--uninstall]" 275 | exit 276 | fi 277 | 278 | case $1 in 279 | '--install') 280 | install "eks-cluster.yaml" 281 | ;; 282 | '--uninstall') 283 | uninstall "eks-cluster.yaml" 284 | ;; 285 | '--auth') 286 | auth "auth-providers-patch.yaml" 287 | ;; 288 | *) 289 | echo "Unknown command: $1" 290 | echo "Usage: $0 [--install|--uninstall]" 291 | ;; 292 | esac 293 | echo "done" 294 | } 295 | 296 | main "$@" 297 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ESNext", 4 | "module": "commonjs", 5 | "lib": ["ESNext"], 6 | "moduleResolution":"Node", 7 | "declaration": true, 8 | "strict": true, 9 | "noImplicitAny": true, 10 | "strictNullChecks": true, 11 | "noImplicitThis": true, 12 | "alwaysStrict": true, 13 | "noUnusedLocals": false, 14 | "noUnusedParameters": false, 15 | "noImplicitReturns": true, 16 | "noFallthroughCasesInSwitch": false, 17 | "inlineSourceMap": true, 18 | "inlineSources": true, 19 | "experimentalDecorators": true, 20 | "strictPropertyInitialization": false, 21 | "typeRoots": ["./node_modules/@types"], 22 | "esModuleInterop": true, 23 | }, 24 | "exclude": ["cdk.out"], 25 | "ts-node": { 26 | "transpileOnly": true, 27 | "compilerOptions": { 28 | "module": "commonjs" 29 | } 30 | }, 31 | } 32 | --------------------------------------------------------------------------------