├── .gitignore ├── LICENSE ├── README.md ├── TROUBLESHOOTING.md ├── demo.md ├── docs └── images │ └── overview.png ├── eksctl.yaml ├── examples ├── spark │ └── pi.yaml └── template-generation │ └── data-on-eks.yaml ├── packages ├── argo-workflows-sso-config │ ├── base │ │ ├── kustomization.yaml │ │ └── sa-admin.yaml │ └── dev │ │ └── kustomization.yaml ├── argo-workflows-templates │ ├── base │ │ ├── cluster-workflow-data-on-eks-cleanup.yaml │ │ ├── cluster-workflow-data-on-eks.yaml │ │ ├── cluster-workflow-spark-rbac.yaml │ │ ├── kustomization.yaml │ │ ├── sa-backstage-scaffolder.yaml │ │ └── sa-data-on-eks.yaml │ └── dev │ │ └── kustomization.yaml ├── argo-workflows │ └── dev │ │ ├── values-no-sso.yaml │ │ └── values.yaml ├── argocd │ ├── base │ │ ├── install.yaml │ │ └── kustomization.yaml │ └── dev │ │ ├── appproject-cnoe.yaml │ │ ├── appproject-demo.yaml │ │ ├── argocd-cmd-params-cm.yaml │ │ ├── cm-argocd-cm.yaml │ │ ├── cm-argocd-rbac-cm.yaml │ │ ├── kustomization.yaml │ │ └── service-argogrpc.yaml ├── backstage │ ├── base │ │ ├── install-backstage.yaml │ │ ├── install-postgresql.yaml │ │ └── kustomization.yaml │ └── dev │ │ ├── cm-backstage-config.yaml │ │ ├── kustomization.yaml │ │ ├── patches │ │ └── deployment-backstage.yaml │ │ ├── sa-backstage.yaml │ │ ├── secret-k8s-config.yaml │ │ └── user-rbac.yaml ├── cert-manager │ ├── base │ │ └── crds.yaml │ └── dev │ │ └── values.yaml ├── crossplane-compositions │ ├── base │ │ ├── kustomization.yaml │ │ ├── provider-aws-config.yaml │ │ └── provider-aws.yaml │ └── dev │ │ └── kustomization.yaml ├── crossplane │ ├── base │ │ ├── kustomization.yaml │ │ ├── provider-aws-config.yaml │ │ └── provider-aws.yaml │ └── dev │ │ ├── kustomization.yaml │ │ └── values.yaml ├── external-dns │ └── dev │ │ └── values.yaml ├── external-secrets │ └── dev │ │ └── values.yaml ├── ingress-nginx │ └── dev │ │ └── values.yaml └── keycloak │ ├── base │ ├── install.yaml │ └── kustomization.yaml │ ├── dev-external-secrets │ ├── external-secrets.yaml │ └── kustomization.yaml │ └── dev │ ├── cm-config.yaml │ ├── kustomization.yaml │ ├── ns.yaml │ ├── patches │ ├── deployment.yaml │ └── service.yaml │ ├── postgres.yaml │ └── service-admin.yaml ├── setups ├── argocd │ ├── application-set.yaml │ ├── github-secret.yaml │ ├── install-sso.sh │ ├── install.sh │ ├── secret-argocd-secret.yaml │ └── uninstall.sh ├── config.yaml ├── install.sh ├── uninstall.sh └── utils.sh └── terraform ├── argo-workflows.tf ├── argocd-ingress.tf ├── aws-load-balancer.tf ├── backstage.tf ├── cert-manager.tf ├── crossplane.tf ├── data.tf ├── external-dns.tf ├── external-secrets.tf ├── ingress-nginx.tf ├── keycloak.tf ├── main.tf ├── scripts ├── argo-workflows │ ├── config-payloads │ │ └── client-payload.json │ ├── install.sh │ ├── secret-sso.yaml │ └── uninstall.sh ├── backstage │ ├── config-payloads │ │ └── client-payload.json │ ├── install.sh │ ├── secret-env-var.yaml │ ├── secret-integrations.yaml │ └── uninstall.sh ├── crossplane │ └── uninstall.sh └── keycloak │ ├── config-payloads │ ├── client-scope-groups-payload.json │ ├── group-admin-payload.json │ ├── group-base-user-payload.json │ ├── group-mapper-payload.json │ ├── realm-payload.json │ ├── user-password.json │ ├── user-user1.json │ └── user-user2.json │ ├── install.sh │ └── uninstall.sh ├── templates ├── argocd-apps │ ├── argo-workflows-sso-config.yaml │ ├── argo-workflows-templates.yaml │ ├── argo-workflows.yaml │ ├── aws-load-balancer.yaml │ ├── backstage.yaml │ ├── cert-manager.yaml │ ├── crossplane-compositions.yaml │ ├── crossplane-provider.yaml │ ├── crossplane.yaml │ ├── external-dns.yaml │ ├── external-secrets.yaml │ ├── ingress-nginx.yaml │ └── keycloak.yaml └── manifests │ ├── cluster-issuer.yaml │ ├── crossplane-aws-controller-config.yaml │ ├── ingress-argo-workflows.yaml │ ├── ingress-argocd.yaml │ ├── ingress-backstage.yaml │ ├── ingress-keycloak.yaml │ └── keycloak-secret-store.yaml ├── variables.tf └── versions.tf /.gitignore: -------------------------------------------------------------------------------- 1 | private 2 | .DS_Store 3 | .idea 4 | .build 5 | 6 | # Local .terraform directories 7 | **/.terraform/* 8 | 9 | # Terraform lockfile 10 | .terraform.lock.hcl 11 | 12 | # .tfstate files 13 | *.tfstate 14 | *.tfstate.* 15 | *.tfplan 16 | 17 | terraform-apply.log 18 | 19 | # Crash log files 20 | crash.log 21 | 22 | # Exclude all .tfvars files, which are likely to contain sentitive data, such as 23 | # password, private keys, and other secrets. These should not be part of version 24 | # control as they are data points which are potentially sensitive and subject 25 | # to change depending on the environment. 26 | *.tfvars 27 | *.tfvars.json 28 | # Ignore override files as they are usually used to override resources locally and so 29 | # are not checked in 30 | override.tf 31 | override.tf.json 32 | *_override.tf 33 | *_override.tf.json 34 | 35 | # Ignore CLI configuration files 36 | .terraformrc 37 | terraform.rc 38 | 39 | # Locals 40 | kubeconfig* 41 | kube-config* 42 | local_tf_state/ 43 | .vscode 44 | .gitallowed 45 | site 46 | .env* 47 | 48 | # Checks 49 | .tfsec 50 | 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | 204 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | > [!CAUTION] 3 | > The current version of the AWS reference implementation is no longer maintained. A new version is currently work in progress. Find more information about the new version in issue [#49](https://github.com/cnoe-io/reference-implementation-aws/issues/49). 4 | 5 | ![overview](docs/images/overview.png) 6 | 7 | > **_NOTE:_** Applications deployed in this repository are not meant or configured for production. 8 | 9 | 10 | # Installation 11 | 12 | - Installation script must be used with a EKS cluster because we use IRSA to talk to AWS services. 13 | - Components are installed as ArgoCD Applications. 14 | - Files under the `/packages` directory are meant to be usable without any modifications. This means certain configuration options like domain name must be passed outside of this directory. e.g. use ArgoCD's Helm parameters. 15 | 16 | ## Basic installation flow 17 | 18 | The installation process follows the following pattern. 19 | 20 | 1. Create a GitHub App for Backstage integration. 21 | 2. Install ArgoCD and configure it to be able to monitor your GitHub Organization. 22 | 3. Run Terraform. Terraform is responsible for: 23 | - Managing AWS resources necessary for the Kubernetes operators to function. Mostly IAM Roles. 24 | - Install components as ArgoCD applications. Pass IAM role information where necessary. 25 | - Apply Kubernetes manifests such as secrets and ingress where information cannot easily be passed to ArgoCD. 26 | - Run all the above in an order because installation order matters for many of these components. For example, Keycloak must be installed and ready before Backstage can be installed and configured. 27 | 28 | ```mermaid 29 | --- 30 | title: Installation Process 31 | --- 32 | erDiagram 33 | "Local Machine" ||--o{ "ArgoCD" : "1. installs" 34 | "Local Machine" ||--o{ "Terraform" : "2. invokes" 35 | "Terraform" ||--o{ "AWS Resources" : "3. creates" 36 | "Terraform" ||--o{ "ArgoCD" : "4. create ArgoCD Apps" 37 | "ArgoCD" ||--o{ "This Repo" : "pulls manifests" 38 | "ArgoCD" ||--o{ "Components" : "installs to the cluster" 39 | ``` 40 | 41 | This installation pattern where some Kubernetes manifests are handled in Terraform while others are handled in GitOps manner may not be suitable for many organizations. If you can be certain about parameters such as domain name and certificate handling, it is better to utilize GitOps approach where these information are committed to a repository. The reason it is handled this way is to allow for customization for different organizations without forking this repository and committing organization specific information into the repository. 42 | 43 | ## Secret handling 44 | 45 | Currently handled outside of repository and set via bash script. Secrets such as GitHub token and TLS private keys are stored in the `${REPO_ROOT}/private` directory. 46 | 47 | We may be able to use sealed secrets with full GitOps approach in the future. 48 | 49 | ## Requirements 50 | 51 | - Github **Organization** (free to create) 52 | - An existing EKS cluster version (1.27+) 53 | - AWS CLI (2.13+) 54 | - eksctl (0.167.0+) 55 | - Kubectl CLI (1.27+) 56 | - jq 57 | - git 58 | - yq 59 | - curl 60 | - kustomize 61 | - node + npm (if you choose to create GitHub App via CLI) 62 | 63 | ## Create GitHub Apps for your GitHub Organization 64 | 65 | GitHub app is used to enable integration between Backstage and GitHub. 66 | This allows you for integration actions such as automatically importing Backstage configuration such as Organization information and templates. 67 | 68 | We strongly encourage you to create a **dedicated GitHub organization**. If you don't have an organization for this purpose, please follow [this link](https://docs.github.com/en/organizations/collaborating-with-groups-in-organizations/creating-a-new-organization-from-scratch) to create one. 69 | 70 | There are two ways to create GitHub integration with Backstage. You can use the Backstage CLI, or create it manually. See [this page](https://backstage.io/docs/integrations/github/github-apps) for more information on creating one manually. Once the app is created, place it under the private directory with the name `github-integration.yaml`. 71 | 72 | To create one with the CLI, follow the steps below. If you are using cli to create GitHub App, please make sure to select third option in the permissions prompt, if your GitHub App access needs publishing access to create GitHub repositories for your backstage templates. 73 | 74 | ```bash 75 | npx '@backstage/cli' create-github-app ${GITHUB_ORG_NAME} 76 | # If prompted, select all for permissions or select permissions listed in this page https://backstage.io/docs/integrations/github/github-apps#app-permissions 77 | # In the browser window, allow access to all repositories then install the app. 78 | 79 | ? Select permissions [required] (these can be changed later but then require approvals in all installations) (Press to select, to toggle all, to invert selection, 80 | and to proceed) 81 | ◉ Read access to content (required by Software Catalog to ingest data from repositories) 82 | ◉ Read access to members (required by Software Catalog to ingest GitHub teams) 83 | ❯◯ Read and Write to content and actions (required by Software Templates to create new repositories) 84 | 85 | # move it to a "private" location. 86 | mkdir -p private 87 | GITHUB_APP_FILE=$(ls github-app-* | head -n1) 88 | mv ${GITHUB_APP_FILE} private/github-integration.yaml 89 | ``` 90 | 91 | **The file created above contains credentials. Handle it with care.** 92 | 93 | The rest of the installation process assumes the GitHub app credentials are available at `private/github-integration.yaml` 94 | 95 | If you want to delete the GitHUb application, follow [these steps](https://docs.github.com/en/apps/maintaining-github-apps/deleting-a-github-app). 96 | 97 | ## Create a GitHub token 98 | 99 | A GitHub token is needed by ArgoCD to get information about repositories under your Organization. 100 | 101 | The following permissions are needed: 102 | - Repository access for all repositories 103 | - Read-only access to: Administration, Contents, and Metadata. 104 | Get your GitHub personal access token from: https://github.com/settings/tokens?type=beta 105 | 106 | Once you have your token, save it under the private directory with the name `github-token`. For example: 107 | 108 | ```bash 109 | # From the root of this repository. 110 | $ mkdir -p private 111 | $ vim private/github-token # paste your token 112 | # example output 113 | $ cat private/github-token 114 | github_pat_ABCDEDFEINDK.... 115 | ``` 116 | 117 | ## Install 118 | 119 | Follow the following steps to get started. 120 | 121 | 1. Create GitHub apps and GitHub token as described above. 122 | 2. Create a new EKS cluster. We do not include EKS cluster in the installation module because EKS cluster requirements vary between organizations and the focus of this is integration of different projects. If you prefer, you can create a new basic cluster with the included [`eksctl.yaml`](./eksctl.yaml) file: 123 | ```eksctl create cluster -f eksctl.yaml``` 124 | You can get eksctl from [this link](https://eksctl.io/). 125 | 3. If you don't have a public registered Route53 zone, [register a Route53 domain](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-register.html) (be sure to use Route53 as the DNS service for the domain). We **strongly encourage creating a dedicated sub domain** for this. If you'd rather manage DNS yourself, you can set `enable_dns_management` in the config file. 126 | 4. Get the host zone id and put it in the config file. 127 | ```bash 128 | aws route53 list-hosted-zones-by-name --dns-name --query 'HostedZones[0].Id' --output text | cut -d'/' -f3 129 | # in the setups/config file, update the zone id. 130 | hosted_zone_id:: ZO020111111 131 | ``` 132 | 5. Update the [`setups/config`](setups/config.yaml) file with your own values. 133 | 6. Run `setups/install.sh` and follow the prompts. See the section below about monitoring installation progress. 134 | 7. Once installation completes, navigate to `backstage.` and log in as `user1`. Password is available as a secret. You may need to wait for DNS propagation to complete to be able to login. May take ~10 minutes. 135 | ```bash 136 | kubectl get secrets -n keycloak keycloak-user-config -o go-template='{{range $k,$v := .data}}{{printf "%s: " $k}}{{if not $v}}{{$v}}{{else}}{{$v | base64decode}}{{end}}{{"\n"}}{{end}}' 137 | ``` 138 | 139 | 140 | ### Monitoring installation progress 141 | 142 | Components are installed as ArgoCD Applications. You can monitor installation progress by going to ArgoCD UI. 143 | 144 | ```bash 145 | # Get the admin password 146 | kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d 147 | 148 | kubectl port-forward svc/argocd-server -n argocd 8081:80 149 | ``` 150 | 151 | Go to [`http://localhost:8081`](http://localhost:8081) and login with the username `admin` and password obtained above. In the UI you can look at resources created, their logs, and events. 152 | 153 | 154 | ### If you installed it without automatic DNS configuration. 155 | 156 | If you set `enable_dns_management: false`, you are responsible for updating DNS records, thus external-dns is not installed. You have to set the following DNS records: 157 | - `backstage.` 158 | - `keycloak.` 159 | - `argo.` 160 | - `argocd.` 161 | 162 | Point these records to the value returned by the following command. 163 | 164 | ```bash 165 | k get svc -n ingress-nginx ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' 166 | ``` 167 | 168 | ### If you installed it without Cert Manager. 169 | 170 | If you set `MANAGED_CERT=false`, you are responsible for managing TLS certs, thus cert-manager is not installed. You must [create TLS secrets accordingly](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls). 171 | 172 | Run the following command to find where to create secrets. 173 | 174 | ```bash 175 | output=$(kubectl get ingress --all-namespaces -o json | jq -r '.items[] | "\(.metadata.namespace) \(.spec.rules[].host) \(.spec.tls[].secretName)"') 176 | echo -e "Namespace \t Hostname \t TLS Secret Name" 177 | echo -e "$output" 178 | ``` 179 | 180 | Secret format should be something like: 181 | 182 | ```yaml 183 | apiVersion: v1 184 | kind: Secret 185 | metadata: 186 | name: backstage. 187 | namespace: backstage 188 | data: 189 | tls.crt: 190 | tls.key: 191 | type: kubernetes.io/tls 192 | 193 | ``` 194 | 195 | ## What was created? 196 | 197 | The following components are installed if you chose the full installation option. 198 | 199 | | Name | Version | 200 | |---|---| 201 | | argo-workflows | v3.4.8 | 202 | | argocd | v2.7.6 | 203 | | aws-load-balancer-controller | v2.5.3 | 204 | | backstage | v1.16.0 | 205 | | cert-manager | v1.12.2 | 206 | | crossplane | v1.12.2 | 207 | | external-dns | v0.13.5 | 208 | | ingress-nginx | v1.8.0 | 209 | | keycloak | v22.0.0 | 210 | | external-secrets | v0.9.2 | 211 | 212 | ### Things created outside of the cluster 213 | 214 | If full installation is done, you should have these DNS entries available. They all point to the Network Load Balancer. 215 | 216 | - `backstage.` 217 | - `argo.` 218 | - `keycloak.` 219 | 220 | You can confirm these by querying at a register. 221 | 222 | ```bash 223 | dig A `backstage.` @1.1.1.1 224 | 225 | kubectl get svc -n ingress-nginx 226 | ``` 227 | 228 | A Network Load Balancer is also created. This is managed by the AWS Load Balancer Controller and points to ingress-nginx pod. This pod is responsible for routing requests to correct places. As a result, HTTPS endpoints are created with valid certificates. 229 | 230 | ```bash 231 | openssl s_client -showcerts -servername id. -connect id.:443 <<< "Q" 232 | curl https://backstage. 233 | ``` 234 | 235 | ## How to access the Backstage instance? 236 | 237 | When you open a browser window and go to `https://backstage.`, you should be prompted to login. 238 | Two users are created during the installation process: `user1` and `user2`. Their passwords are available in the keycloak namespace. 239 | 240 | ```bash 241 | k get secrets -n keycloak keycloak-user-config -o go-template='{{range $k,$v := .data}}{{printf "%s: " $k}}{{if not $v}}{{$v}}{{else}}{{$v | base64decode}}{{end}}{{"\n"}}{{end}}' 242 | ``` 243 | 244 | ## Uninstall 245 | 1. Run `setups/uninstall.sh` and follow the prompts. 246 | 2. Remove GitHub app from your Organization by following [these steps](https://docs.github.com/en/apps/maintaining-github-apps/deleting-a-github-app). 247 | 3. Remove token from your GitHub Organization by following [these steps](https://docs.github.com/en/organizations/managing-programmatic-access-to-your-organization/reviewing-and-revoking-personal-access-tokens-in-your-organization). 248 | 4. Remove the created GitHub Organization. 249 | 250 |
251 | Uninstall details 252 | 253 | ### Resources deleted 254 | 255 | Currently resources created by applications are not deleted. For example, if you have Spark Jobs running, they are not deleted and may block deletion of the spark-operator app. 256 | 257 |
258 | 259 | ## What can you do in Backstage? 260 | See [this doc](./demo.md) for demos! 261 | 262 | ## Possible issues 263 | 264 | ### Cert-manager 265 | - by default it uses http-01 challenge. If you'd prefer using dns-01, you can update the ingress files. TODO AUTOMATE THIS 266 | - You may get events like `Get "http:///.well-known/acme-challenge/09yldI6tVRvtWVPyMfwCwsYdOCEGGVWhmb1PWzXwhXI": dial tcp: lookup on 10.100.0.10:53: no such host`. This is due to DNS propagation delay. It may take ~10 minutes. 267 | 268 | ## Troubleshooting 269 | 270 | See [the troubleshooting doc](TROUBLESHOOTING.md) for more information. 271 | 272 | ## Creation Order notes 273 |
274 | Click to expand 275 | 276 | ## Things created outside of the cluster with Keycloak SSO enabled. 277 | 278 | - Route53 records. Route53 hosted zones are not created. You must also register it if you want to be able to access through public DNS. These are managed by the external DNS controller. 279 | 280 | - AWS Network Load Balancer. This is just the entrance to the Kubernetes cluster. This points to the default installation of Ingress Nginx and is managed by AWS Load Balancer Controller. 281 | 282 | - TLS Certificates issued by Let's Encrypt. These are managed by cert-manager based on values in Ingress. They use the production issuer which means we must be very careful with how many and often we request certificates from them. The uninstall scripts backup certificates to the `private` directory to avoid re-issuing certificates. 283 | 284 | These resources are controlled by Kubernetes controllers and thus should be deleted using controllers. 285 | 286 | ### Keycloak SSO with DNS and TLS certificates 287 | 288 | If using keycloak SSO with fully automated DNS and certificate management, it must be: 289 | 290 | 1. aws-load-balancer-controller 291 | 2. ingress-nginx 292 | 3. cert-manager 293 | 4. external-dns 294 | 5. keycloak 295 | 6. The rest of stuff 296 | 297 | 298 | ### Keycloak SSO with manual DNS and TLS Certificates 299 | 300 | If using keycloak SSO but manage DNS records and certificates manually. 301 | 302 | 1. aws-load-balancer-controller 303 | 2. ingress-nginx 304 | 3. The rest of stuff minus cert-manager and external-dns 305 | 306 | In this case, you can issue your own certs and provide them as TLS secrets as specified in the `spec.tls[0].secretName` field of Ingress objects. 307 | You can also let NLB or ALB terminate TLS instead using the LB controller. This is not covered currently, but possible. 308 | 309 | ### No SSO 310 | 311 | If no SSO, no particular installation order. Eventual consistency works. 312 | 313 |
314 | -------------------------------------------------------------------------------- /TROUBLESHOOTING.md: -------------------------------------------------------------------------------- 1 | 2 | # Troubleshooting steps 3 | 4 | All applications are deployed as ArgoCD application. The best way is to navigate to ArgoCD UI and look at logs for each application. 5 | 6 | ```bash 7 | # Get the admin ArgoCD password 8 | kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d 9 | # Port forward to 8081. 8080 could be in-use by the install / uninstall scripts. 10 | kubectl port-forward svc/argocd-server -n argocd 8081:80 11 | 12 | Go to http://localhost:8081 13 | ``` 14 | 15 | 16 | # Common issues 17 | 18 | ## Argo Workflows 19 | 20 | ### Argo Workflows controller stuck in Crash Loop. 21 | 22 | You may see error message like: 23 | 24 | ``` 25 | Error: Get "https:///realms/cnoe/.well-known/openid-configuration": dial tcp: lookup on 10.100.0.10:53: no such host 26 | ``` 27 | 28 | This is due to DNS propagation delay in the cluster. Once DNS entries are propagated (may take ~10 min), pods should start running. 29 | 30 | ## Backstage 31 | 32 | ### Backstage pod stuck in crash loop 33 | 34 | You may see error message like: 35 | 36 | ``` 37 | Error: getaddrinfo ENOTFOUND keycloak.a2.mccloman.people.aws.dev 38 | at GetAddrInfoReqWrap.onlookup [as oncomplete] (node:dns:107:26) { 39 | errno: -3008, 40 | code: 'ENOTFOUND', 41 | syscall: 'getaddrinfo', 42 | hostname: 'keycloak.' 43 | 44 | } 45 | ``` 46 | This is due to DNS propagation delay in the cluster. Once DNS entries are propagated (may take ~10 min), pods should start running. 47 | 48 | 49 | ## Certificates 50 | 51 | General steps are [outlined here](https://cert-manager.io/docs/troubleshooting/). 52 | 53 | ### Certificates not issued 54 | You may see something like this 55 | 56 | ```bash 57 | $ kubectl -n argo get certificate 58 | NAME READY SECRET AGE 59 | argo-workflows-prod-tls FALSE argo-workflows-prod-tls 3m52s 60 | 61 | $ kubectl -n argo get challenge 62 | NAME STATE DOMAIN AGE 63 | argo-workflows-prod-tls-qxfjq-1305584735-1533108683 pending argo. 91s 64 | ``` 65 | 66 | If you describe the challenge, you may see something like this. 67 | 68 | ``` 69 | Reason: Waiting for HTTP-01 challenge propagation: failed to perform self check GET request 'http://argo.DOMAIN_NAME/.well-known/acme-challenge/6AQ5cRc7J6FNQ9xGOBDI5_G1lHsNM5J5ivbS3iSHd3c': Get "http://argo.DOMAIN_NAME/.well-known/acme-challenge/6AQ5cRc7J6FNQ9xGOBDI5_G1lHsNM5J5ivbS3iSHd3c": dial tcp: lookup argo.DOMAIN_NAME on 10.100.0.10:53: no such host 70 | ``` 71 | This is due to DNS propagation delay in the cluster. Once DNS entries are propagated (may take ~10 min), certs should be issued. 72 | 73 | -------------------------------------------------------------------------------- /demo.md: -------------------------------------------------------------------------------- 1 | ## Backstage template generation 2 | 3 | Using the CNOE CLI, you can generate Backstage template input fields from Terraform modules, Kubernetes CRDs, and Crossplane XRDs. 4 | 5 | ### Terraform (imperative) 6 | 7 | For example, to generate input field from a terraform module in the Data on EKS repository: 8 | 9 | ```bash 10 | git clone https://github.com/awslabs/data-on-eks.git /tmp/data-on-eks 11 | 12 | cnoe template tf -i /tmp/data-on-eks/analytics/terraform/spark-k8s-operator -o examples/template-generation -t examples/template-generation/data-on-eks.yaml -p '.spec.parameters[0].properties.tfVars' 13 | 14 | ``` 15 | The above command takes the terraform module available at `/tmp/data-on-eks/analytics/terraform/spark-k8s-operator` then inserts generated backstage fields into a partially configured template at `examples/template-generation/data-on-eks.yaml`. This partially configure template contains fields that are common to all modules in the Data on EKS repository. 16 | 17 | You can view what fields are generated by using `diff` and `yq`: 18 | 19 | ```bash 20 | diff <(yq -P 'sort_keys(..)' -o=props examples/template-generation/spark-k8s-operator.yaml) <(yq -P 'sort_keys(..)' -o=props examples/template-generation/data-on-eks.yaml) 21 | ``` 22 | 23 | The rest of this document assumes you've used the spark-k8s-operator module to generate input fields. 24 | 25 | Once the template is hydrated, it is ready to use. You can push it to a private git repository within your organization or push it to a public repository. 26 | You can then register the template in the Backstage UI (`Create...` > `Register Existing Component`) or update your Backstage configuration. The main configuration file is a ConfigMap in the backstage namespace with the name `backstage-config`. 27 | 28 | You can use the template to create a EKS cluster with optimization for data workload using terraform. The example uses a custom Argo Workflows template to run the install script, add a IAM role to the admin group in the cluster, create a Backstage service account, then registers the cluster to Backstage for use. 29 | 30 | The Argo Workflows run is associated with the Backstage entity. So you can check on its status within Backstage. If you'd like to see detailed information about the run, you can click on it to go to the Argo Workflows UI. 31 | 32 | Once the new cluster is ready, you can submit a Spark Job to the new cluster through Backstage. 33 | 34 | ### Terraform (declarative) 35 | 36 | You can also use the terraform controller or Crossplane terraform provider to deploy terraform modules. Examples of this will be included in the future. 37 | 38 | 39 | ### Crossplane and ACK 40 | 41 | When using Crossplane, the bulk of work is done by Crossplane providers. You can use GitOps tools like ArgoCD to deploy these manifests to the cluster using Backstage scaffolder templates. Examples of this will be included in the future. 42 | 43 | 44 | -------------------------------------------------------------------------------- /docs/images/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnoe-io/reference-implementation-aws/2c571cb70d083c357e8e056d33e5f4fb625eec60/docs/images/overview.png -------------------------------------------------------------------------------- /eksctl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | metadata: 4 | name: cnoe-ref-impl 5 | region: us-west-2 6 | version: "1.28" 7 | managedNodeGroups: 8 | - name: managed-ng-1 9 | instanceType: m5.large 10 | minSize: 3 11 | maxSize: 6 12 | desiredCapacity: 4 13 | volumeSize: 100 14 | ssh: 15 | allow: false 16 | iam: 17 | withAddonPolicies: 18 | autoScaler: true 19 | labels: 20 | role: general-purpose 21 | iam: 22 | withOIDC: true 23 | addons: 24 | - name: aws-ebs-csi-driver 25 | version: "v1.28.0-eksbuild.1" 26 | attachPolicyARNs: 27 | - arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy -------------------------------------------------------------------------------- /examples/spark/pi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: sparkoperator.k8s.io/v1beta2 2 | kind: SparkApplication 3 | metadata: 4 | name: pyspark-pi-karpenter 5 | spec: 6 | type: Python 7 | pythonVersion: "3" 8 | mode: cluster 9 | image: public.ecr.aws/r1l5w1y9/spark-operator:3.2.1-hadoop-3.3.1-java-11-scala-2.12-python-3.8-latest 10 | imagePullPolicy: Always 11 | mainApplicationFile: local:///opt/spark/examples/src/main/python/pi.py 12 | sparkVersion: 3.1.1 13 | restartPolicy: 14 | type: OnFailure 15 | onFailureRetries: 1 16 | onFailureRetryInterval: 10 17 | onSubmissionFailureRetries: 5 18 | onSubmissionFailureRetryInterval: 20 19 | driver: 20 | cores: 1 21 | coreLimit: 1200m 22 | memory: 512m 23 | labels: 24 | version: 3.1.1 25 | serviceAccount: spark 26 | nodeSelector: 27 | NodeGroupType: spark-spot-ca 28 | tolerations: 29 | - key: spark-spot-ca 30 | operator: Exists 31 | effect: NoSchedule 32 | executor: 33 | cores: 1 34 | instances: 2 35 | memory: 512m 36 | serviceAccount: spark 37 | labels: 38 | version: 3.1.1 39 | nodeSelector: 40 | NodeGroupType: spark-spot-ca 41 | tolerations: 42 | - key: spark-spot-ca 43 | operator: Exists 44 | effect: NoSchedule 45 | 46 | -------------------------------------------------------------------------------- /examples/template-generation/data-on-eks.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scaffolder.backstage.io/v1beta3 2 | kind: Template 3 | metadata: 4 | description: Creates a EKS cluster with Spark Operator + extra enabled through Terraform 5 | name: data-on-eks-spark 6 | title: Data on EKS - Spark Operator 7 | spec: 8 | owner: guest 9 | type: service 10 | parameters: 11 | - title: Terraform config options 12 | properties: 13 | tfVars: # this field is to be generated. 14 | title: Terraform variables 15 | type: object 16 | - title: Configuration Options 17 | properties: 18 | name: 19 | title: name of this entry 20 | type: string 21 | namespace: 22 | title: namespace within the kubernetes cluster to deploy this 23 | type: string 24 | default: data-on-eks 25 | adminRoleName: # update the aws-config cm to give admin access to this group. 26 | title: Admin Role Name 27 | description: Name of the role to give the administrative rights on the EKS cluster. 28 | default: Admin 29 | type: string 30 | clusterName: 31 | title: Cluster to run 32 | description: The cluster to run this workflow in. 33 | type: string 34 | ui:field: KubernetesClusterPicker 35 | repoUrl: # need a place to store this entity information. 36 | title: Repository Location 37 | type: string 38 | ui:field: RepoUrlPicker 39 | ui:options: 40 | allowedHosts: 41 | - github.com 42 | steps: 43 | - id: create-repo 44 | name: Create Repository 45 | action: github:repo:create 46 | input: 47 | repoUrl: ${{ parameters.repoUrl }} 48 | - id: fetch-base 49 | name: Fetch Base 50 | action: fetch:template 51 | input: 52 | url: ./data-on-eks 53 | values: 54 | name: ${{ parameters.name }} 55 | namespace: ${{ parameters.namespace }} 56 | tfVars: ${{ parameters.tfVars }} 57 | adminRoleName: ${{parameters.adminRoleName}} 58 | clusterName: ${{ parameters.clusterName }} 59 | - id: init-repo 60 | name: Initialize Repository 61 | action: github:repo:push 62 | input: 63 | repoUrl: ${{ parameters.repoUrl }} 64 | defaultBranch: main 65 | - id: wait 66 | name: Waiting for the repo to be ready 67 | action: "roadiehq:utils:sleep" 68 | input: 69 | amount: 5 70 | - id: register 71 | name: Register 72 | action: catalog:register 73 | input: 74 | repoContentsUrl: ${{ steps['init-repo'].output.repoContentsUrl }} 75 | catalogInfoPath: '/catalog-info.yaml' 76 | - id: apply-cm 77 | name: Apply TF variables CM 78 | action: cnoe:kubernetes:apply 79 | input: 80 | manifestPath: cm.yaml # this contains tfvar json file. 81 | namespaced: true 82 | clusterName: ${{ parameters.clusterName }} 83 | - id: apply-workflow 84 | name: Start TF workflow 85 | action: cnoe:kubernetes:apply 86 | input: 87 | manifestPath: workflow.yaml # initiates an argo workflows run. 88 | namespaced: true 89 | clusterName: ${{ parameters.clusterName }} 90 | output: 91 | links: 92 | - title: Open in catalog 93 | icon: catalog 94 | entityRef: ${{ steps['register'].output.entityRef }} 95 | -------------------------------------------------------------------------------- /packages/argo-workflows-sso-config/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | namespace: argo 2 | resources: 3 | - sa-admin.yaml -------------------------------------------------------------------------------- /packages/argo-workflows-sso-config/base/sa-admin.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Used by users in the admin group 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: admin 7 | namespace: argo 8 | annotations: 9 | workflows.argoproj.io/rbac-rule: "'admin' in groups" 10 | workflows.argoproj.io/rbac-rule-precedence: "10" 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRoleBinding 14 | metadata: 15 | name: argo-admin 16 | roleRef: 17 | apiGroup: rbac.authorization.k8s.io 18 | kind: ClusterRole 19 | name: cluster-admin 20 | subjects: 21 | - kind: ServiceAccount 22 | name: admin 23 | namespace: argo 24 | --- 25 | apiVersion: v1 26 | kind: Secret 27 | metadata: 28 | name: admin.service-account-token 29 | annotations: 30 | kubernetes.io/service-account.name: admin 31 | namespace: argo 32 | type: kubernetes.io/service-account-token 33 | -------------------------------------------------------------------------------- /packages/argo-workflows-sso-config/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | namespace: argo 2 | resources: 3 | - ../base/ 4 | -------------------------------------------------------------------------------- /packages/argo-workflows-templates/base/cluster-workflow-data-on-eks-cleanup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: ClusterWorkflowTemplate 3 | metadata: 4 | annotations: 5 | workflows.argoproj.io/description: Runs Data on EKS Blueprints. 6 | name: terraform-data-on-eks-cleanup 7 | spec: 8 | activeDeadlineSeconds: 3600 9 | ttlStrategy: 10 | secondsAfterCompletion: 86400 11 | secondsAfterSuccess: 43200 12 | secondsAfterFailure: 86400 13 | serviceAccountName: data-on-eks 14 | entrypoint: main 15 | arguments: 16 | parameters: 17 | - name: backstage-entity-name 18 | - name: script-location 19 | - name: admin-role 20 | volumes: 21 | - name: tf-state 22 | secret: 23 | secretName: "{{workflow.parameters.backstage-entity-name}}-tf-state" 24 | templates: 25 | - name: main 26 | steps: 27 | - - name: remove-resources 28 | template: remove-resources 29 | - - name: cleanup-tf 30 | template: cleanup-tf 31 | - name: remove-resources 32 | inputs: 33 | artifacts: 34 | - name: go-template 35 | path: /tmp/go-template 36 | raw: 37 | data: | 38 | {{- range .items -}} 39 | {{- if not .metadata.ownerReferences -}} 40 | {{- if eq .spec.type "LoadBalancer" -}} 41 | {{.metadata.name}} {{.metadata.namespace}}{{"\n"}} 42 | {{- end }} 43 | {{- end -}} 44 | {{- end -}} 45 | script: 46 | image: public.ecr.aws/m8u6z8z4/manabu-test:tf-manager-v0.0.15 47 | volumeMounts: 48 | - name: tf-state 49 | mountPath: /var/run/tf 50 | command: 51 | - bash 52 | source: | 53 | set -e -o pipefail 54 | set +x 55 | echo 'restoring tfstate from secret' 56 | gunzip -c /var/run/tf/tfstate > /tmp/tfstate 57 | cluster_arn=$(jq -r '.resources[] | select(.module == "module.eks" and .type == "aws_eks_cluster")| .instances[0].attributes.arn' /tmp/tfstate) 58 | if [[ -z "${cluster_arn// }" ]]; then 59 | exit 0 60 | fi 61 | cluster_name=$(echo $cluster_arn | cut -d '/' -f 2) 62 | region=$(echo $cluster_arn | cut -d ':' -f 4) 63 | mkdir ~/.kube/ 64 | aws eks update-kubeconfig --name $cluster_name --region $region 65 | echo 'removing kubernetes services with loadbalancers' 66 | lbs=$(kubectl get svc -A -o go-template-file=/tmp/go-template) 67 | echo $lbs | while IFS= read -r line; do 68 | if [[ -z "${line// }" ]]; then 69 | continue 70 | fi 71 | name=$(echo ${line} | cut -f1 -d" "); 72 | namespace=$(echo ${line} | cut -f2 -d" "); 73 | echo "delete $name in $namespace"; 74 | kubectl delete svc $name -n $namespace; 75 | done 76 | 77 | echo 'removing cluster information from backstage' 78 | rm ~/.kube/config 79 | 80 | kubectl -n backstage get secrets k8s-config -o yaml | yq '.data."k8s-config.yaml"' | base64 -d > /tmp/config 81 | data=$(yq eval "del(.clusters[] | select(.name == \"$cluster_name\"))" /tmp/config | base64) 82 | kubectl -n backstage get secrets k8s-config -o yaml | yq ".data.\"k8s-config.yaml\" = \"$data\"" | kubectl apply -f - 83 | kubectl -n backstage rollout restart deployment backstage 84 | 85 | - name: cleanup-tf 86 | retryStrategy: 87 | limit: "2" 88 | retryPolicy: "Always" 89 | inputs: 90 | artifacts: 91 | - name: data-on-eks 92 | path: /src/data-on-eks 93 | git: 94 | repo: https://github.com/awslabs/data-on-eks.git 95 | revision: main 96 | script: 97 | image: public.ecr.aws/m8u6z8z4/manabu-test:tf-manager-v0.0.15 98 | volumeMounts: 99 | - mountPath: /var/run/tf 100 | name: tf-state 101 | command: 102 | - bash 103 | source: | 104 | set -e -o pipefail 105 | cd /src/data-on-eks/{{workflow.parameters.script-location}} 106 | echo 'getting tf state from secrets' 107 | gunzip -c /var/run/tf/tfstate > terraform.tfstate 108 | cluster_arn=$(jq -r '.resources[] | select(.module == "module.eks" and .type == "aws_eks_cluster")| .instances[0].attributes.arn' terraform.tfstate) 109 | region=$(echo $cluster_arn | cut -d '/' -f 4) 110 | echo 'running cleanup script' 111 | terraform init 112 | set +e +o pipefail 113 | printf "$region\n" | ./cleanup.sh 114 | status=$? 115 | gzip -k -c terraform.tfstate | base64 -w 0 > tfstate 116 | kubectl get secret -n {{workflow.namespace}} {{workflow.parameters.backstage-entity-name}}-tf-state -o yaml > secret.yaml 117 | data=$(cat tfstate) yq -i ".data.tfstate = env(data)" secret.yaml 118 | kubectl apply -f secret.yaml 119 | exit $status 120 | -------------------------------------------------------------------------------- /packages/argo-workflows-templates/base/cluster-workflow-data-on-eks.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: ClusterWorkflowTemplate 3 | metadata: 4 | annotations: 5 | workflows.argoproj.io/description: Runs Data on EKS Blueprints. 6 | name: terraform-data-on-eks 7 | spec: 8 | activeDeadlineSeconds: 3600 9 | ttlStrategy: 10 | secondsAfterCompletion: 86400 11 | secondsAfterSuccess: 43200 12 | secondsAfterFailure: 86400 13 | serviceAccountName: data-on-eks 14 | entrypoint: main 15 | arguments: 16 | parameters: 17 | - name: backstage-entity-name 18 | - name: script-location 19 | - name: admin-role 20 | templates: 21 | - name: main 22 | steps: 23 | - - name: run 24 | template: run 25 | - - name: restart-backstage 26 | template: restart-backstage 27 | - - name: cleanup 28 | template: cleanup 29 | - name: run 30 | inputs: 31 | artifacts: 32 | - name: data-on-eks 33 | path: /src/data-on-eks 34 | git: 35 | repo: https://github.com/awslabs/data-on-eks.git 36 | revision: main 37 | volumes: 38 | - name: tf-cm 39 | configMap: 40 | name: "{{workflow.parameters.backstage-entity-name}}-tf-cm" 41 | container: 42 | image: public.ecr.aws/cnoe-io/misc:tf-manager-v0.0.1 43 | tty: true 44 | stdin: true 45 | volumeMounts: 46 | - mountPath: /var/run/tf 47 | name: tf-cm 48 | env: 49 | - name: BACKSTAGE_ENT_NAME 50 | value: "{{workflow.parameters.backstage-entity-name}}" 51 | - name: SCRIPT_LOCATION 52 | value: "/src/data-on-eks/{{workflow.parameters.script-location}}" 53 | - name: ADMIN_ROLE 54 | value: "{{workflow.parameters.admin-role}}" 55 | - name: TFVAR_LOCATION 56 | value: /var/run/tf/terraform.tfvars.json 57 | - name: restart-backstage 58 | script: 59 | image: public.ecr.aws/cnoe-io/misc:tf-manager-v0.0.1 60 | command: 61 | - bash 62 | source: | 63 | kubectl -n backstage rollout restart deployment backstage 64 | - name: cleanup 65 | resource: 66 | action: delete 67 | manifest: | 68 | apiVersion: v1 69 | kind: ConfigMap 70 | metadata: 71 | name: "{{workflow.parameters.backstage-entity-name}}-tf-cm" 72 | namespace: "{{workflow.namespace}}" 73 | -------------------------------------------------------------------------------- /packages/argo-workflows-templates/base/cluster-workflow-spark-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: ClusterWorkflowTemplate 3 | metadata: 4 | annotations: 5 | workflows.argoproj.io/description: Applies RBAC configurations 6 | name: apply-rbac-spark 7 | spec: 8 | activeDeadlineSeconds: 3600 9 | ttlStrategy: 10 | secondsAfterCompletion: 86400 11 | secondsAfterSuccess: 43200 12 | secondsAfterFailure: 86400 13 | serviceAccountName: data-on-eks 14 | entrypoint: main 15 | templates: 16 | - name: main 17 | steps: 18 | - - name: apply-sa 19 | template: apply-sa 20 | - - name: apply-role 21 | template: apply-role 22 | - - name: apply-role-binding 23 | template: apply-role-binding 24 | - name: apply-sa 25 | resource: 26 | action: apply 27 | manifest: | 28 | apiVersion: v1 29 | kind: ServiceAccount 30 | metadata: 31 | name: spark 32 | namespace: "{{workflow.namespace}}" 33 | - name: apply-role 34 | resource: 35 | action: apply 36 | manifest: | 37 | apiVersion: rbac.authorization.k8s.io/v1 38 | kind: Role 39 | metadata: 40 | name: spark-role 41 | namespace: "{{workflow.namespace}}" 42 | rules: 43 | - apiGroups: 44 | - "" 45 | resources: 46 | - pods 47 | verbs: 48 | - '*' 49 | - apiGroups: 50 | - "" 51 | resources: 52 | - services 53 | verbs: 54 | - '*' 55 | - apiGroups: 56 | - "" 57 | resources: 58 | - configmaps 59 | verbs: 60 | - '*' 61 | - name: apply-role-binding 62 | resource: 63 | action: apply 64 | manifest: | 65 | apiVersion: rbac.authorization.k8s.io/v1 66 | kind: RoleBinding 67 | metadata: 68 | name: spark-role-binding 69 | namespace: "{{workflow.namespace}}" 70 | roleRef: 71 | apiGroup: rbac.authorization.k8s.io 72 | kind: Role 73 | name: spark-role 74 | subjects: 75 | - kind: ServiceAccount 76 | name: spark 77 | namespace: "{{workflow.namespace}}" 78 | -------------------------------------------------------------------------------- /packages/argo-workflows-templates/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - sa-data-on-eks.yaml 3 | - cluster-workflow-data-on-eks.yaml 4 | - cluster-workflow-data-on-eks-cleanup.yaml 5 | - cluster-workflow-spark-rbac.yaml 6 | - sa-backstage-scaffolder.yaml 7 | -------------------------------------------------------------------------------- /packages/argo-workflows-templates/base/sa-backstage-scaffolder.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: backstage-scaffolder 5 | namespace: "argo" 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRole 9 | metadata: 10 | name: backstage-scaffolder-argo-workflows 11 | rules: 12 | - apiGroups: [""] 13 | resources: ["serviceaccounts", "roles", "rolebindings"] 14 | verbs: ["*"] 15 | --- 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | kind: ClusterRoleBinding 18 | metadata: 19 | name: backstage-scaffolder-argo-workflows 20 | subjects: 21 | - kind: ServiceAccount 22 | name: backstage-scaffolder 23 | namespace: argo 24 | roleRef: 25 | kind: ClusterRole 26 | name: backstage-namespace-access 27 | apiGroup: rbac.authorization.k8s.io 28 | -------------------------------------------------------------------------------- /packages/argo-workflows-templates/base/sa-data-on-eks.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: data-on-eks 5 | labels: 6 | app: data-on-eks 7 | --- 8 | kind: Role 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: data-on-eks 12 | namespace: data-on-eks 13 | labels: 14 | app: data-on-eks 15 | rules: 16 | - apiGroups: [""] 17 | resources: ["secrets", "configmaps"] 18 | verbs: ["get", "list", "watch", "patch", "create", "update", "delete"] 19 | --- 20 | kind: RoleBinding 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | metadata: 23 | name: data-on-eks 24 | namespace: data-on-eks 25 | labels: 26 | app: data-on-eks 27 | subjects: 28 | - kind: ServiceAccount 29 | name: data-on-eks 30 | roleRef: 31 | kind: Role 32 | name: data-on-eks 33 | apiGroup: rbac.authorization.k8s.io 34 | 35 | # allow for updating backstage config 36 | --- 37 | kind: Role 38 | apiVersion: rbac.authorization.k8s.io/v1 39 | metadata: 40 | name: data-on-eks 41 | namespace: backstage 42 | labels: 43 | app: data-on-eks 44 | rules: 45 | - apiGroups: [""] 46 | resources: ["secrets"] 47 | verbs: ["get", "list", "watch", "patch", "update"] 48 | - apiGroups: [ "apps" ] 49 | resources: [ "deployments", "replicasets", "pods" ] 50 | verbs: [ "get", "patch" ] 51 | --- 52 | kind: RoleBinding 53 | apiVersion: rbac.authorization.k8s.io/v1 54 | metadata: 55 | name: data-on-eks 56 | namespace: backstage 57 | labels: 58 | app: data-on-eks 59 | subjects: 60 | - kind: ServiceAccount 61 | name: data-on-eks 62 | namespace: data-on-eks 63 | roleRef: 64 | kind: Role 65 | name: data-on-eks 66 | apiGroup: rbac.authorization.k8s.io 67 | -------------------------------------------------------------------------------- /packages/argo-workflows-templates/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../base/ 3 | -------------------------------------------------------------------------------- /packages/argo-workflows/dev/values-no-sso.yaml: -------------------------------------------------------------------------------- 1 | workflow: 2 | serviceAccount: 3 | create: false 4 | rbac: 5 | create: false 6 | server: 7 | extraArgs: 8 | - --auth-mode=client 9 | -------------------------------------------------------------------------------- /packages/argo-workflows/dev/values.yaml: -------------------------------------------------------------------------------- 1 | workflow: 2 | serviceAccount: 3 | create: false 4 | rbac: 5 | create: false 6 | server: 7 | sso: 8 | enabled: true 9 | clientId: 10 | name: keycloak-oidc 11 | key: client-id 12 | clientSecret: 13 | name: keycloak-oidc 14 | key: secret-key 15 | scopes: 16 | - openid 17 | - profile 18 | - email 19 | - groups 20 | rbac: 21 | enabled: true 22 | extraArgs: 23 | - --auth-mode=client 24 | - --auth-mode=sso 25 | -------------------------------------------------------------------------------- /packages/argocd/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | namespace: argocd 2 | resources: 3 | - install.yaml 4 | -------------------------------------------------------------------------------- /packages/argocd/dev/appproject-cnoe.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: AppProject 3 | metadata: 4 | name: cnoe 5 | namespace: argocd 6 | # Finalizer that ensures that project is not deleted until it is not referenced by any application 7 | finalizers: 8 | - resources-finalizer.argocd.argoproj.io 9 | spec: 10 | description: For the CNOE project 11 | sourceRepos: 12 | - '*' 13 | clusterResourceWhitelist: 14 | - group: '*' 15 | kind: '*' 16 | destinations: 17 | - namespace: cert-manager 18 | server: https://kubernetes.default.svc 19 | - namespace: ingress-nginx 20 | server: https://kubernetes.default.svc 21 | - namespace: aws-load-balancer-controller 22 | server: https://kubernetes.default.svc 23 | - namespace: external-dns 24 | server: https://kubernetes.default.svc 25 | - namespace: external-secrets 26 | server: https://kubernetes.default.svc 27 | - namespace: keycloak 28 | server: https://kubernetes.default.svc 29 | - namespace: argo 30 | server: https://kubernetes.default.svc 31 | - namespace: cnoe-workflows 32 | server: https://kubernetes.default.svc 33 | - namespace: spark-operator 34 | server: https://kubernetes.default.svc 35 | - namespace: crossplane-system 36 | server: https://kubernetes.default.svc 37 | - namespace: backstage 38 | server: https://kubernetes.default.svc 39 | - namespace: data-on-eks 40 | server: https://kubernetes.default.svc 41 | - namespace: data-on-eks 42 | server: https://kubernetes.default.svc 43 | -------------------------------------------------------------------------------- /packages/argocd/dev/appproject-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: AppProject 3 | metadata: 4 | name: demo 5 | namespace: argocd 6 | finalizers: 7 | - resources-finalizer.argocd.argoproj.io 8 | spec: 9 | description: For the demo 10 | sourceRepos: 11 | - '*' 12 | clusterResourceWhitelist: 13 | - group: '*' 14 | kind: '*' 15 | destinations: 16 | - namespace: demo 17 | server: https://kubernetes.default.svc 18 | -------------------------------------------------------------------------------- /packages/argocd/dev/argocd-cmd-params-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: argocd-cmd-params-cm 5 | labels: 6 | app.kubernetes.io/name: argocd-cmd-params-cm 7 | app.kubernetes.io/part-of: argocd 8 | data: 9 | server.insecure: "true" 10 | -------------------------------------------------------------------------------- /packages/argocd/dev/cm-argocd-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: argocd-cm 6 | app.kubernetes.io/part-of: argocd 7 | name: argocd-cm 8 | data: 9 | accounts.backstage: apiKey 10 | accounts.backstage.enabled: "true" 11 | application.resourceTrackingMethod: annotation 12 | resource.exclusions: | 13 | - kinds: 14 | - ProviderConfigUsage 15 | apiGroups: 16 | - "*" 17 | resource.customizations: | 18 | "awsblueprints.io/*": 19 | health.lua: | 20 | health_status = { 21 | status = "Progressing", 22 | message = "Provisioning ..." 23 | } 24 | 25 | if obj.status == nil or obj.status.conditions == nil then 26 | return health_status 27 | end 28 | 29 | for i, condition in ipairs(obj.status.conditions) do 30 | if condition.type == "Ready" then 31 | if condition.status == "True" then 32 | health_status.status = "Healthy" 33 | health_status.message = "Resource is up-to-date." 34 | return health_status 35 | end 36 | end 37 | 38 | if condition.type == "LastAsyncOperation" then 39 | if condition.status == "False" then 40 | health_status.status = "Degraded" 41 | health_status.message = condition.message 42 | return health_status 43 | end 44 | end 45 | 46 | if condition.type == "Synced" then 47 | if condition.status == "False" then 48 | health_status.status = "Degraded" 49 | health_status.message = condition.message 50 | return health_status 51 | end 52 | end 53 | end 54 | return health_status 55 | "*.aws.upbound.io/*": 56 | health.lua: | 57 | health_status = { 58 | status = "Progressing", 59 | message = "Provisioning ..." 60 | } 61 | 62 | if obj.status == nil or obj.status.conditions == nil then 63 | return health_status 64 | end 65 | 66 | for i, condition in ipairs(obj.status.conditions) do 67 | if condition.type == "Ready" then 68 | if condition.status == "True" then 69 | health_status.status = "Healthy" 70 | health_status.message = "Resource is up-to-date." 71 | return health_status 72 | end 73 | end 74 | 75 | if condition.type == "LastAsyncOperation" then 76 | if condition.status == "False" then 77 | health_status.status = "Degraded" 78 | health_status.message = condition.message 79 | return health_status 80 | end 81 | end 82 | 83 | if condition.type == "Synced" then 84 | if condition.status == "False" then 85 | health_status.status = "Degraded" 86 | health_status.message = condition.message 87 | return health_status 88 | end 89 | end 90 | end 91 | 92 | return health_status 93 | "*.aws.crossplane.io/*": 94 | health.lua: | 95 | health_status = { 96 | status = "Progressing", 97 | message = "Provisioning ..." 98 | } 99 | 100 | if obj.status == nil or obj.status.conditions == nil then 101 | return health_status 102 | end 103 | 104 | for i, condition in ipairs(obj.status.conditions) do 105 | if condition.type == "Ready" then 106 | if condition.status == "True" then 107 | health_status.status = "Healthy" 108 | health_status.message = "Resource is up-to-date." 109 | return health_status 110 | end 111 | end 112 | 113 | if condition.type == "LastAsyncOperation" then 114 | if condition.status == "False" then 115 | health_status.status = "Degraded" 116 | health_status.message = condition.message 117 | return health_status 118 | end 119 | end 120 | 121 | if condition.type == "Synced" then 122 | if condition.status == "False" then 123 | health_status.status = "Degraded" 124 | health_status.message = condition.message 125 | return health_status 126 | end 127 | end 128 | end 129 | return health_status 130 | -------------------------------------------------------------------------------- /packages/argocd/dev/cm-argocd-rbac-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: argocd-rbac-cm 5 | data: 6 | policy.csv: | 7 | g, superuser, role:admin 8 | g, backstage, role:readonly 9 | -------------------------------------------------------------------------------- /packages/argocd/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | namespace: argocd 2 | resources: 3 | - ../base/ 4 | - service-argogrpc.yaml 5 | - appproject-cnoe.yaml 6 | - appproject-demo.yaml 7 | patchesStrategicMerge: 8 | - cm-argocd-cm.yaml 9 | - argocd-cmd-params-cm.yaml 10 | - cm-argocd-rbac-cm.yaml 11 | images: 12 | - name: quay.io/argoproj/argocd:v2.7.6 13 | newTag: v2.7.6 14 | -------------------------------------------------------------------------------- /packages/argocd/dev/service-argogrpc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | alb.ingress.kubernetes.io/backend-protocol-version: HTTP2 6 | labels: 7 | app: argogrpc 8 | name: argogrpc 9 | namespace: argocd 10 | spec: 11 | ports: 12 | - name: "8080" 13 | port: 8080 14 | protocol: TCP 15 | targetPort: 8080 16 | selector: 17 | app.kubernetes.io/name: argocd-server 18 | sessionAffinity: None 19 | type: ClusterIP 20 | -------------------------------------------------------------------------------- /packages/backstage/base/install-backstage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: backstage 5 | namespace: backstage 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: backstage 11 | template: 12 | metadata: 13 | labels: 14 | app: backstage 15 | spec: 16 | containers: 17 | - name: backstage 18 | image: abc/abc:abc 19 | ports: 20 | - name: http 21 | containerPort: 7007 22 | envFrom: 23 | - secretRef: 24 | name: postgresql-config 25 | --- 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | name: backstage 30 | namespace: backstage 31 | spec: 32 | selector: 33 | app: backstage 34 | ports: 35 | - name: http 36 | port: 7007 37 | targetPort: http 38 | --- 39 | apiVersion: v1 40 | kind: Namespace 41 | metadata: 42 | name: backstage 43 | -------------------------------------------------------------------------------- /packages/backstage/base/install-postgresql.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: postgresql 5 | namespace: backstage 6 | labels: 7 | app: postgresql 8 | spec: 9 | storageClassName: gp2 10 | capacity: 11 | accessModes: 12 | - ReadWriteOnce 13 | resources: 14 | requests: 15 | storage: 1Gi 16 | --- 17 | apiVersion: apps/v1 18 | kind: StatefulSet 19 | metadata: 20 | name: postgresql 21 | namespace: backstage 22 | labels: 23 | app: postgresql 24 | spec: 25 | serviceName: service-postgresql 26 | replicas: 1 27 | selector: 28 | matchLabels: 29 | app: postgresql 30 | template: 31 | metadata: 32 | labels: 33 | app: postgresql 34 | spec: 35 | containers: 36 | - name: postgres 37 | resources: 38 | limits: 39 | memory: 500Mi 40 | requests: 41 | cpu: 100m 42 | memory: 300Mi 43 | image: docker.io/library/postgres:15.3-alpine3.18 44 | envFrom: 45 | - secretRef: 46 | name: postgresql-config 47 | ports: 48 | - containerPort: 5432 49 | name: postgresdb 50 | volumeMounts: 51 | - name: data 52 | mountPath: /var/lib/postgresql/data 53 | subPath: postgress 54 | volumes: 55 | - name: data 56 | persistentVolumeClaim: 57 | claimName: postgresql 58 | --- 59 | apiVersion: v1 60 | kind: Service 61 | metadata: 62 | name: postgresql 63 | namespace: backstage 64 | labels: 65 | app: postgresql 66 | spec: 67 | ports: 68 | - port: 5432 69 | name: postgres 70 | clusterIP: None 71 | selector: 72 | app: postgresql 73 | -------------------------------------------------------------------------------- /packages/backstage/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - install-postgresql.yaml 3 | - install-backstage.yaml 4 | -------------------------------------------------------------------------------- /packages/backstage/dev/cm-backstage-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: backstage 6 | name: backstage-config 7 | data: 8 | app-config.yaml: | 9 | app: 10 | title: CNOE Backstage 11 | baseUrl: ${BACKSTAGE_FRONTEND_URL} 12 | organization: 13 | name: CNOE 14 | backend: 15 | # Used for enabling authentication, secret is shared by all backend plugins 16 | # See https://backstage.io/docs/tutorials/backend-to-backend-auth for 17 | # information on the format 18 | # auth: 19 | # keys: 20 | # - secret: ${BACKEND_SECRET} 21 | baseUrl: ${BACKSTAGE_FRONTEND_URL} 22 | listen: 23 | port: 7007 24 | # Uncomment the following host directive to bind to specific interfaces 25 | # host: 127.0.0.1 26 | csp: 27 | connect-src: ["'self'", 'http:', 'https:'] 28 | # Content-Security-Policy directives follow the Helmet format: https://helmetjs.github.io/#reference 29 | # Default Helmet Content-Security-Policy values can be removed by setting the key to false 30 | cors: 31 | origin: ${BACKSTAGE_FRONTEND_URL} 32 | methods: [GET, HEAD, PATCH, POST, PUT, DELETE] 33 | credentials: true 34 | database: 35 | client: pg 36 | connection: 37 | host: ${POSTGRES_HOST} 38 | port: ${POSTGRES_PORT} 39 | user: ${POSTGRES_USER} 40 | password: ${POSTGRES_PASSWORD} 41 | cache: 42 | store: memory 43 | # workingDirectory: /tmp # Use this to configure a working directory for the scaffolder, defaults to the OS temp-dir 44 | 45 | integrations: 46 | github: 47 | - host: github.com 48 | apps: 49 | - $include: github-integration.yaml 50 | # - host: github.com 51 | # # This is a Personal Access Token or PAT from GitHub. You can find out how to generate this token, and more information 52 | # # about setting up the GitHub integration here: https://backstage.io/docs/getting-started/configuration#setting-up-a-github-integration 53 | # token: ${GITHUB_TOKEN} 54 | ### Example for how to add your GitHub Enterprise instance using the API: 55 | # - host: ghe.example.net 56 | # apiBaseUrl: https://ghe.example.net/api/v3 57 | # token: ${GHE_TOKEN} 58 | 59 | proxy: 60 | '/argo-workflows/api': 61 | target: ${ARGO_WORKFLOWS_URL} 62 | changeOrigin: true 63 | secure: true 64 | headers: 65 | Authorization: 66 | $env: ARGO_WORKFLOWS_AUTH_TOKEN 67 | '/argocd/api': 68 | target: ${ARGO_CD_URL} 69 | changeOrigin: true 70 | headers: 71 | Cookie: 72 | $env: ARGOCD_AUTH_TOKEN 73 | 74 | # Reference documentation http://backstage.io/docs/features/techdocs/configuration 75 | # Note: After experimenting with basic setup, use CI/CD to generate docs 76 | # and an external cloud storage when deploying TechDocs for production use-case. 77 | # https://backstage.io/docs/features/techdocs/how-to-guides#how-to-migrate-from-techdocs-basic-to-recommended-deployment-approach 78 | techdocs: 79 | builder: 'local' # Alternatives - 'external' 80 | generator: 81 | runIn: 'docker' # Alternatives - 'local' 82 | publisher: 83 | type: 'local' # Alternatives - 'googleGcs' or 'awsS3'. Read documentation for using alternatives. 84 | 85 | auth: 86 | environment: development 87 | session: 88 | secret: MW2sV-sIPngEl26vAzatV-6VqfsgAx4bPIz7PuE_2Lk= 89 | providers: 90 | keycloak-oidc: 91 | development: 92 | metadataUrl: ${KEYCLOAK_NAME_METADATA} 93 | clientId: backstage 94 | clientSecret: ${KEYCLOAK_CLIENT_SECRET} 95 | scope: 'openid profile email groups' 96 | prompt: auto 97 | 98 | scaffolder: 99 | # see https://backstage.io/docs/features/software-templates/configuration for software template options 100 | 101 | catalog: 102 | import: 103 | entityFilename: catalog-info.yaml 104 | pullRequestBranchName: backstage-integration 105 | rules: 106 | - allow: [Component, System, API, Resource, Location, Template] 107 | locations: 108 | # Examples from a public GitHub repository. 109 | - type: url 110 | target: https://github.com/awslabs/backstage-templates-on-eks/blob/main/catalog-info.yaml 111 | ## Uncomment these lines to add an example org 112 | # - type: url 113 | # target: https://github.com/backstage/backstage/blob/master/packages/catalog-model/examples/acme-corp.yaml 114 | # rules: 115 | # - allow: [User, Group] 116 | kubernetes: 117 | serviceLocatorMethod: 118 | type: 'multiTenant' 119 | clusterLocatorMethods: 120 | - $include: k8s-config.yaml 121 | argoWorkflows: 122 | baseUrl: ${ARGO_WORKFLOWS_URL} 123 | -------------------------------------------------------------------------------- /packages/backstage/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../base 3 | - sa-backstage.yaml 4 | - cm-backstage-config.yaml 5 | - secret-k8s-config.yaml 6 | patchesStrategicMerge: 7 | - patches/deployment-backstage.yaml 8 | -------------------------------------------------------------------------------- /packages/backstage/dev/patches/deployment-backstage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: backstage 5 | namespace: backstage 6 | spec: 7 | template: 8 | spec: 9 | serviceAccountName: backstage 10 | volumes: 11 | - name: backstage-config 12 | projected: 13 | sources: 14 | - secret: 15 | name: integrations 16 | items: 17 | - key: github-integration.yaml 18 | path: github-integration.yaml 19 | - configMap: 20 | name: backstage-config 21 | items: 22 | - key: app-config.yaml 23 | path: app-config.yaml 24 | - secret: 25 | name: k8s-config 26 | items: 27 | - key: k8s-config.yaml 28 | path: k8s-config.yaml 29 | containers: 30 | - name: backstage 31 | image: public.ecr.aws/cnoe-io/backstage:v0.0.2 32 | command: 33 | - node 34 | - packages/backend 35 | - --config 36 | - config/app-config.yaml 37 | volumeMounts: 38 | - name: backstage-config 39 | mountPath: "/app/config" 40 | readOnly: true 41 | envFrom: 42 | - secretRef: 43 | name: backstage-env-vars 44 | -------------------------------------------------------------------------------- /packages/backstage/dev/sa-backstage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: backstage 5 | namespace: backstage 6 | 7 | --- 8 | kind: ClusterRole 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: read-all 12 | rules: 13 | - apiGroups: ["*"] 14 | resources: ["*"] 15 | verbs: ["get", "list", "watch"] 16 | --- 17 | kind: ClusterRoleBinding 18 | apiVersion: rbac.authorization.k8s.io/v1 19 | metadata: 20 | name: backstage-read-all 21 | subjects: 22 | - kind: ServiceAccount 23 | name: backstage 24 | namespace: backstage 25 | roleRef: 26 | kind: ClusterRole 27 | name: read-all 28 | apiGroup: rbac.authorization.k8s.io 29 | --- 30 | # These are needed for the argo workflow plugin to create workflows during scaffolding. 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: ClusterRole 33 | metadata: 34 | name: backstage-argo-worfklows 35 | rules: 36 | - apiGroups: ["argoproj.io"] 37 | resources: ["workflows"] 38 | verbs: ["create"] 39 | - apiGroups: [""] 40 | resources: ["configmaps"] 41 | verbs: ["create"] 42 | --- 43 | apiVersion: rbac.authorization.k8s.io/v1 44 | kind: ClusterRoleBinding 45 | metadata: 46 | name: backstage-argo-worfklows 47 | roleRef: 48 | apiGroup: rbac.authorization.k8s.io 49 | kind: ClusterRole 50 | name: backstage-argo-worfklows 51 | subjects: 52 | - kind: ServiceAccount 53 | name: backstage 54 | namespace: backstage 55 | -------------------------------------------------------------------------------- /packages/backstage/dev/secret-k8s-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: k8s-config 5 | namespace: backstage 6 | stringData: 7 | k8s-config.yaml: | 8 | type: 'config' 9 | clusters: 10 | - url: https://kubernetes.default.svc.cluster.local 11 | name: local 12 | authProvider: 'serviceAccount' 13 | skipTLSVerify: true 14 | skipMetricsLookup: true 15 | serviceAccountToken: 16 | $file: /var/run/secrets/kubernetes.io/serviceaccount/token 17 | caData: 18 | $file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 19 | -------------------------------------------------------------------------------- /packages/backstage/dev/user-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: keycloak-superuser-group 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: Group 12 | name: superuser 13 | 14 | --- 15 | kind: ClusterRole 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | metadata: 18 | name: keycloak-write-workflows 19 | rules: 20 | - apiGroups: ["argoproj.io"] 21 | resources: ["workflows"] 22 | verbs: ["*"] 23 | 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: ClusterRoleBinding 27 | metadata: 28 | name: keycloak-backstage-users-group 29 | roleRef: 30 | apiGroup: rbac.authorization.k8s.io 31 | kind: ClusterRole 32 | name: keycloak-write-workflows 33 | subjects: 34 | - apiGroup: rbac.authorization.k8s.io 35 | kind: Group 36 | name: backstage-users 37 | -------------------------------------------------------------------------------- /packages/cert-manager/dev/values.yaml: -------------------------------------------------------------------------------- 1 | installCRDs: true 2 | global: 3 | leaderElection: 4 | namespace: cert-manager 5 | -------------------------------------------------------------------------------- /packages/crossplane-compositions/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | namespace: crossplane-system 2 | resources: 3 | - https://github.com/awslabs/crossplane-on-eks/compositions/upbound-aws-provider/s3 4 | -------------------------------------------------------------------------------- /packages/crossplane-compositions/base/provider-aws-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: aws.upbound.io/v1beta1 2 | kind: ProviderConfig 3 | metadata: 4 | name: provider-aws-config 5 | spec: 6 | credentials: 7 | source: IRSA 8 | -------------------------------------------------------------------------------- /packages/crossplane-compositions/base/provider-aws.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pkg.crossplane.io/v1 2 | kind: Provider 3 | metadata: 4 | name: provider-aws-s3 5 | spec: 6 | package: xpkg.upbound.io/upbound/provider-aws-s3:v0.41.0 7 | controllerConfigRef: 8 | name: provider-aws-config 9 | -------------------------------------------------------------------------------- /packages/crossplane-compositions/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../base 3 | -------------------------------------------------------------------------------- /packages/crossplane/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | namespace: crossplane-system 2 | resources: 3 | - provider-aws.yaml 4 | - provider-aws-config.yaml 5 | -------------------------------------------------------------------------------- /packages/crossplane/base/provider-aws-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: aws.upbound.io/v1beta1 2 | kind: ProviderConfig 3 | metadata: 4 | name: provider-aws-config 5 | annotations: 6 | argocd.argoproj.io/sync-wave: "20" 7 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 8 | spec: 9 | credentials: 10 | source: IRSA 11 | -------------------------------------------------------------------------------- /packages/crossplane/base/provider-aws.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pkg.crossplane.io/v1 2 | kind: Provider 3 | metadata: 4 | annotations: 5 | argocd.argoproj.io/sync-wave: "0" 6 | name: provider-family-aws 7 | spec: 8 | package: xpkg.upbound.io/upbound/provider-family-aws:v0.41.0 9 | 10 | --- 11 | apiVersion: pkg.crossplane.io/v1 12 | kind: Provider 13 | metadata: 14 | name: provider-aws-s3 15 | annotations: 16 | argocd.argoproj.io/sync-wave: "10" 17 | spec: 18 | package: xpkg.upbound.io/upbound/provider-aws-s3:v0.41.0 19 | controllerConfigRef: 20 | name: provider-aws-config 21 | -------------------------------------------------------------------------------- /packages/crossplane/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../base 3 | -------------------------------------------------------------------------------- /packages/crossplane/dev/values.yaml: -------------------------------------------------------------------------------- 1 | args: 2 | - --debug 3 | - --enable-environment-configs 4 | -------------------------------------------------------------------------------- /packages/external-dns/dev/values.yaml: -------------------------------------------------------------------------------- 1 | sources: 2 | - ingress 3 | provider: aws 4 | txtOwnerId: cnoe-external-dns 5 | registry: txt 6 | interval: 2m 7 | policy: upsert-only 8 | extraArgs: 9 | - --aws-zone-type=public 10 | - --aws-zones-cache-duration=1h -------------------------------------------------------------------------------- /packages/external-secrets/dev/values.yaml: -------------------------------------------------------------------------------- 1 | installCRDs: true 2 | -------------------------------------------------------------------------------- /packages/ingress-nginx/dev/values.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | config: 3 | hsts: 'false' 4 | service: 5 | type: LoadBalancer 6 | annotations: 7 | service.beta.kubernetes.io/aws-load-balancer-name: cnoe 8 | service.beta.kubernetes.io/aws-load-balancer-type: external 9 | service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing 10 | service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip 11 | loadBalancerClass: service.k8s.aws/nlb 12 | targetPorts: 13 | https: 'https' 14 | -------------------------------------------------------------------------------- /packages/keycloak/base/install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: keycloak 5 | labels: 6 | app: keycloak 7 | spec: 8 | ports: 9 | - name: http 10 | port: 8080 11 | targetPort: 8080 12 | selector: 13 | app: keycloak 14 | type: LoadBalancer 15 | --- 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | name: keycloak 20 | labels: 21 | app: keycloak 22 | spec: 23 | replicas: 1 24 | selector: 25 | matchLabels: 26 | app: keycloak 27 | template: 28 | metadata: 29 | labels: 30 | app: keycloak 31 | spec: 32 | containers: 33 | - name: keycloak 34 | image: quay.io/keycloak/keycloak:22.0.0 35 | args: ["start-dev"] 36 | env: 37 | - name: KEYCLOAK_ADMIN 38 | value: "admin" 39 | - name: KEYCLOAK_ADMIN_PASSWORD 40 | value: "admin" 41 | - name: KC_PROXY 42 | value: "edge" 43 | ports: 44 | - name: http 45 | containerPort: 8080 46 | readinessProbe: 47 | httpGet: 48 | path: /realms/master 49 | port: 8080 50 | -------------------------------------------------------------------------------- /packages/keycloak/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - install.yaml -------------------------------------------------------------------------------- /packages/keycloak/dev-external-secrets/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1beta1 3 | kind: ExternalSecret 4 | metadata: 5 | name: keycloak-config 6 | namespace: keycloak 7 | spec: 8 | refreshInterval: 5m 9 | secretStoreRef: 10 | name: keycloak 11 | kind: SecretStore 12 | target: 13 | name: keycloak-config 14 | creationPolicy: Owner 15 | data: 16 | - secretKey: KC_HOSTNAME 17 | remoteRef: 18 | key: cnoe/keycloak/config 19 | property: KC_HOSTNAME 20 | - secretKey: KEYCLOAK_ADMIN_PASSWORD 21 | remoteRef: 22 | key: cnoe/keycloak/config 23 | property: KEYCLOAK_ADMIN_PASSWORD 24 | 25 | --- 26 | apiVersion: external-secrets.io/v1beta1 27 | kind: ExternalSecret 28 | metadata: 29 | name: postgresql-config 30 | namespace: keycloak 31 | spec: 32 | refreshInterval: 5m 33 | secretStoreRef: 34 | name: keycloak 35 | kind: SecretStore 36 | target: 37 | name: postgresql-config 38 | creationPolicy: Owner 39 | data: 40 | - secretKey: POSTGRES_DB 41 | remoteRef: 42 | key: cnoe/keycloak/config 43 | property: POSTGRES_DB 44 | - secretKey: POSTGRES_PASSWORD 45 | remoteRef: 46 | key: cnoe/keycloak/config 47 | property: POSTGRES_PASSWORD 48 | - secretKey: POSTGRES_USER 49 | remoteRef: 50 | key: cnoe/keycloak/config 51 | property: POSTGRES_USER 52 | --- 53 | apiVersion: external-secrets.io/v1beta1 54 | kind: ExternalSecret 55 | metadata: 56 | name: keycloak-user-config 57 | namespace: keycloak 58 | spec: 59 | refreshInterval: 5m 60 | secretStoreRef: 61 | name: keycloak 62 | kind: SecretStore 63 | target: 64 | name: keycloak-user-config 65 | creationPolicy: Owner 66 | data: 67 | - secretKey: user1-password 68 | remoteRef: 69 | key: cnoe/keycloak/config 70 | property: user1-password 71 | -------------------------------------------------------------------------------- /packages/keycloak/dev-external-secrets/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../dev 3 | - external-secrets.yaml 4 | -------------------------------------------------------------------------------- /packages/keycloak/dev/cm-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: keycloak-config 5 | data: 6 | keycloak.conf: | 7 | # Database 8 | # The database vendor. 9 | db=postgres 10 | 11 | # The username of the database user. 12 | db-username=keycloak 13 | db-url-host=postgresql.keycloak 14 | 15 | # Observability 16 | 17 | # If the server should expose healthcheck endpoints. 18 | #health-enabled=true 19 | 20 | # If the server should expose metrics endpoints. 21 | #metrics-enabled=true 22 | 23 | # The proxy address forwarding mode if the server is behind a reverse proxy. 24 | proxy=edge 25 | 26 | hostname-strict-backchannel=true 27 | -------------------------------------------------------------------------------- /packages/keycloak/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../base 3 | - ns.yaml 4 | - cm-config.yaml 5 | - postgres.yaml 6 | namespace: keycloak 7 | patchesStrategicMerge: 8 | - patches/service.yaml 9 | - patches/deployment.yaml 10 | patchesJson6902: 11 | - target: 12 | version: v1 13 | kind: Deployment 14 | group: apps 15 | name: keycloak 16 | namespace: keycloak 17 | patch: |- 18 | - op: remove 19 | path: /spec/template/spec/containers/0/env/2 20 | -------------------------------------------------------------------------------- /packages/keycloak/dev/ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: keycloak 5 | -------------------------------------------------------------------------------- /packages/keycloak/dev/patches/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: keycloak 5 | labels: 6 | app: keycloak 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: keycloak 12 | template: 13 | metadata: 14 | labels: 15 | app: keycloak 16 | spec: 17 | volumes: 18 | - name: keycloak-config 19 | configMap: 20 | name: keycloak-config 21 | containers: 22 | - name: keycloak 23 | env: 24 | - name: KEYCLOAK_ADMIN 25 | value: 'cnoe-admin' 26 | - name: KC_DB_PASSWORD 27 | valueFrom: 28 | secretKeyRef: 29 | name: postgresql-config 30 | key: POSTGRES_PASSWORD 31 | envFrom: 32 | - secretRef: 33 | name: keycloak-config 34 | args: 35 | - start 36 | volumeMounts: 37 | - name: keycloak-config 38 | mountPath: "/opt/keycloak/conf" 39 | readOnly: true 40 | -------------------------------------------------------------------------------- /packages/keycloak/dev/patches/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: keycloak 5 | spec: 6 | type: ClusterIP 7 | -------------------------------------------------------------------------------- /packages/keycloak/dev/postgres.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: postgresql 5 | namespace: keycloak 6 | labels: 7 | app: postgresql 8 | spec: 9 | storageClassName: gp2 10 | capacity: 11 | accessModes: 12 | - ReadWriteOnce 13 | resources: 14 | requests: 15 | storage: 1Gi 16 | --- 17 | apiVersion: apps/v1 18 | kind: StatefulSet 19 | metadata: 20 | name: postgresql 21 | namespace: keycloak 22 | labels: 23 | app: postgresql 24 | spec: 25 | serviceName: service-postgresql 26 | replicas: 1 27 | selector: 28 | matchLabels: 29 | app: postgresql 30 | template: 31 | metadata: 32 | labels: 33 | app: postgresql 34 | spec: 35 | containers: 36 | - name: postgres 37 | resources: 38 | limits: 39 | memory: 500Mi 40 | requests: 41 | cpu: 100m 42 | memory: 300Mi 43 | image: docker.io/library/postgres:15.3-alpine3.18 44 | envFrom: 45 | - secretRef: 46 | name: postgresql-config 47 | ports: 48 | - containerPort: 5432 49 | name: postgresdb 50 | volumeMounts: 51 | - name: data 52 | mountPath: /var/lib/postgresql/data 53 | subPath: postgress 54 | volumes: 55 | - name: data 56 | persistentVolumeClaim: 57 | claimName: postgresql 58 | 59 | --- 60 | apiVersion: v1 61 | kind: Service 62 | metadata: 63 | name: postgresql 64 | namespace: keycloak 65 | labels: 66 | app: postgresql 67 | spec: 68 | ports: 69 | - port: 5432 70 | name: postgres 71 | clusterIP: None 72 | selector: 73 | app: postgresql 74 | -------------------------------------------------------------------------------- /packages/keycloak/dev/service-admin.yaml: -------------------------------------------------------------------------------- 1 | # apiVersion: v1 2 | # kind: Service 3 | # metadata: 4 | # name: keycloak-admin 5 | # labels: 6 | # app: keycloak 7 | # spec: 8 | # ports: 9 | # - name: http 10 | # port: 8080 11 | # targetPort: 8080 12 | # selector: 13 | # app: keycloak 14 | # type: ClusterIP 15 | # --- 16 | # apiVersion: v1 17 | # kind: Service 18 | # metadata: 19 | # name: keycloak-dummy 20 | # labels: 21 | # app: keycloak 22 | # spec: 23 | # ports: 24 | # - name: dummy 25 | # port: 8081 26 | # targetPort: 8081 27 | # selector: 28 | # app: keycloak 29 | # type: ClusterIP -------------------------------------------------------------------------------- /setups/argocd/application-set.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: ApplicationSet 3 | metadata: 4 | name: demo 5 | namespace: argocd 6 | spec: 7 | generators: 8 | - scmProvider: 9 | cloneProtocol: https 10 | filters: 11 | - repositoryMatch: ^demo 12 | pathsExist: [kustomize/dev/kustomization.yaml] 13 | github: 14 | allBranches: false 15 | organization: ${GITHUB_ORG_NAME} 16 | tokenRef: 17 | key: password 18 | secretName: github-token 19 | requeueAfterSeconds: 180 20 | template: 21 | metadata: 22 | name: '{{ repository }}' 23 | spec: 24 | destination: 25 | namespace: demo 26 | server: https://kubernetes.default.svc 27 | project: demo 28 | source: 29 | path: kustomize/dev 30 | repoURL: '{{ url }}' 31 | targetRevision: HEAD 32 | syncPolicy: 33 | automated: {} 34 | syncOptions: 35 | - CreateNamespace=true 36 | 37 | -------------------------------------------------------------------------------- /setups/argocd/github-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: github-token 5 | namespace: argocd 6 | labels: 7 | argocd.argoproj.io/secret-type: repo-creds 8 | stringData: 9 | url: $GITHUB_URL 10 | username: unused 11 | password: $GITHUB_TOKEN 12 | -------------------------------------------------------------------------------- /setups/argocd/install-sso.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnoe-io/reference-implementation-aws/2c571cb70d083c357e8e056d33e5f4fb625eec60/setups/argocd/install-sso.sh -------------------------------------------------------------------------------- /setups/argocd/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | REPO_ROOT=$(git rev-parse --show-toplevel) 5 | 6 | if [ -f "${REPO_ROOT}/private/github-token" ]; then 7 | GITHUB_TOKEN=$(cat ${REPO_ROOT}/private/github-token | tr -d '\n') 8 | else 9 | echo 'To get started grant the following permissions: 10 | - Repository access for all repositories 11 | - Read-only access to: Administration, Contents, and Metadata. 12 | Get your GitHub personal access token from: https://github.com/settings/tokens?type=beta' 13 | echo "Enter your token. e.g. github_pat_abcde: " 14 | read -s GITHUB_TOKEN 15 | fi 16 | 17 | 18 | if [[ -z "${GITHUB_URL}" ]]; then 19 | read -p "Enter GitHub repository URL e.g. https://github.com/cnoe-io/reference-implementation-aws : " GITHUB_URL 20 | export GITHUB_URL 21 | fi 22 | 23 | export GITHUB_TOKEN 24 | 25 | echo 'creating secret for ArgoCD in your cluster...' 26 | kubectl create ns argocd || true 27 | envsubst < github-secret.yaml | kubectl apply -f - 28 | 29 | echo 'creating Argo CD resources' 30 | cd ${REPO_ROOT} 31 | retry_count=0 32 | max_retries=2 33 | 34 | set +e 35 | while [ $retry_count -le $max_retries ]; do 36 | kustomize build packages/argocd/dev | kubectl apply -f - 37 | if [ $? -eq 0 ]; then 38 | break 39 | fi 40 | echo "An error occurred. Retrying in 5 seconds" 41 | sleep 5 42 | ((retry_count++)) 43 | done 44 | 45 | if [ $? -ne 0 ]; then 46 | echo 'could not install argocd in your cluster' 47 | exit 1 48 | fi 49 | 50 | set -e 51 | echo 'waiting for ArgoCD to be ready' 52 | kubectl -n argocd rollout status --watch --timeout=300s statefulset/argocd-application-controller 53 | kubectl -n argocd rollout status --watch --timeout=300s deployment/argocd-server 54 | 55 | cd - 56 | -------------------------------------------------------------------------------- /setups/argocd/secret-argocd-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: argocd-secret 5 | namespace: argocd 6 | type: Opaque 7 | stringData: 8 | oidc.keycloak.clientSecret: ${KEYCLOAK_CLIENT_SECRET} 9 | -------------------------------------------------------------------------------- /setups/argocd/uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | REPO_ROOT=$(git rev-parse --show-toplevel) 5 | kustomize build ${REPO_ROOT}/packages/argocd/dev | kubectl delete -f - 6 | 7 | kubectl delete ns argocd || true 8 | -------------------------------------------------------------------------------- /setups/config.yaml: -------------------------------------------------------------------------------- 1 | # This is the GITHUB URL where Kubernetes manifests are stored. 2 | # If you forked this repo, you will need to update this. 3 | repo_url: "https://github.com/cnoe-io/reference-implementation-aws" 4 | # Tags to apply to AWS resources 5 | tags: 6 | env: "dev" 7 | project: "cnoe" 8 | region: "us-west-2" 9 | # The name of the EKS cluster you are installing this under. 10 | cluster_name: "cnoe-ref-impl" 11 | # Set this to false if you want to manage DNS somewhere else. e.g. manually. 12 | enable_dns_management: true 13 | # If using external DNS, specify the Route53 hosted zone ID. Required if enable_dns_management is set to true 14 | hosted_zone_id: Z0REPLACEME 15 | # if external DNS is not used, this value must be provided. 16 | domain_name: sudbomain.domain.root 17 | 18 | # If set to true, we will store secrets to AWS Secrets Manager, then sync it to the cluster using External Secrets Operator. 19 | enable_external_secret: true 20 | -------------------------------------------------------------------------------- /setups/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | 5 | source ${REPO_ROOT}/setups/utils.sh 6 | 7 | echo -e "${GREEN}Installing with the following options: ${NC}" 8 | echo -e "${GREEN}----------------------------------------------------${NC}" 9 | yq '... comments=""' ${REPO_ROOT}/setups/config.yaml 10 | echo -e "${GREEN}----------------------------------------------------${NC}" 11 | echo -e "${PURPLE}\nTargets:${NC}" 12 | echo "Kubernetes cluster: $(kubectl config current-context)" 13 | echo "AWS profile (if set): ${AWS_PROFILE}" 14 | echo "AWS account number: $(aws sts get-caller-identity --query "Account" --output text)" 15 | 16 | echo -e "${GREEN}\nAre you sure you want to continue?${NC}" 17 | read -p '(yes/no): ' response 18 | if [[ ! "$response" =~ ^[Yy][Ee][Ss]$ ]]; then 19 | echo 'exiting.' 20 | exit 0 21 | fi 22 | 23 | export GITHUB_URL=$(yq '.repo_url' ./setups/config.yaml) 24 | 25 | # Set up ArgoCD. We will use ArgoCD to install all components. 26 | cd "${REPO_ROOT}/setups/argocd/" 27 | ./install.sh 28 | cd - 29 | 30 | # The rest of the steps are defined as a Terraform module. Parse the config to JSON and use it as the Terraform variable file. This is done because JSON doesn't allow you to easily place comments. 31 | cd "${REPO_ROOT}/terraform/" 32 | yq -o json '.' ../setups/config.yaml > terraform.tfvars.json 33 | terraform init -upgrade 34 | terraform apply -auto-approve 35 | -------------------------------------------------------------------------------- /setups/uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | REPO_ROOT=$(git rev-parse --show-toplevel) 5 | SETUP_DIR="${REPO_ROOT}/setups" 6 | TF_DIR="${REPO_ROOT}/terraform" 7 | source ${REPO_ROOT}/setups/utils.sh 8 | 9 | cd ${SETUP_DIR} 10 | 11 | echo -e "${PURPLE}\nTargets:${NC}" 12 | echo "Kubernetes cluster: $(kubectl config current-context)" 13 | echo "AWS profile (if set): ${AWS_PROFILE}" 14 | echo "AWS account number: $(aws sts get-caller-identity --query "Account" --output text)" 15 | 16 | echo -e "${RED}\nAre you sure you want to continue?${NC}" 17 | read -p '(yes/no): ' response 18 | if [[ ! "$response" =~ ^[Yy][Ee][Ss]$ ]]; then 19 | echo 'exiting.' 20 | exit 0 21 | fi 22 | 23 | cd "${TF_DIR}" 24 | terraform destroy 25 | 26 | cd "${SETUP_DIR}/argocd/" 27 | ./uninstall.sh 28 | cd - 29 | -------------------------------------------------------------------------------- /setups/utils.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | export RED='\033[0;31m' 3 | export GREEN='\033[0;32m' 4 | export PURPLE='\033[0;35m' 5 | export NC='\033[0m' 6 | 7 | check_command() { 8 | command -v "$1" >/dev/null 2>&1 9 | } 10 | 11 | # Validation 12 | clis=("aws" "kubectl" "jq" "kustomize" "curl" "yq") 13 | for cli in "${clis[@]}"; do 14 | if check_command "$cli"; then 15 | continue 16 | else 17 | echo -e "${RED}$cli is not installed. Please install it to continue.${NC}" 18 | exit 4 19 | fi 20 | done 21 | 22 | DEFAULT_KUBECONFIG_FILE="$HOME/.kube/config" 23 | # Check if the default kubeconfig file exists 24 | if [ ! -f "${DEFAULT_KUBECONFIG_FILE}" ]; then 25 | echo "${DEFAULT_KUBECONFIG_FILE} kubeconfig file does not exist. Exiting..." 26 | exit 1 27 | fi 28 | 29 | if [ "$( grep -v "^$\|^ *$" -c "${DEFAULT_KUBECONFIG_FILE}" )" -eq "0" ]; then 30 | echo -e "${RED}Error: ${DEFAULT_KUBECONFIG_FILE} kubeconfig file does not exist or is empty.${NC}" 31 | echo -e "${PURPLE}Info: Please configure a valid kubeconfig file or set the KUBECONFIG environment variable.${NC}" 32 | exit 1 33 | fi 34 | 35 | kubectl cluster-info > /dev/null 36 | if [ $? -ne 0 ]; then 37 | echo "Could not get cluster info. Ensure kubectl is configured correctly" 38 | exit 1 39 | fi 40 | 41 | minor=$(kubectl version --client=true -o yaml | yq '.clientVersion.minor') 42 | if [[ ${minor} -lt "27" ]]; then 43 | echo -e "${RED} ${minor} this kubectl version is not supported. Please upgrade to 1.27+ ${NC}" 44 | exit 5 45 | fi 46 | -------------------------------------------------------------------------------- /terraform/argo-workflows.tf: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------- 2 | # Setups to run Data on EKS demo 3 | #--------------------------------------------------------------- 4 | module "data_on_eks_runner_role" { 5 | source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" 6 | version = "~> 5.14" 7 | 8 | role_policy_arns = { 9 | policy = "arn:aws:iam::aws:policy/AdministratorAccess" 10 | } 11 | role_name_prefix = "cnoe-external-dns" 12 | oidc_providers = { 13 | main = { 14 | provider_arn = data.aws_iam_openid_connect_provider.eks_oidc.arn 15 | namespace_service_accounts = ["data-on-eks:data-on-eks"] 16 | } 17 | } 18 | tags = var.tags 19 | } 20 | 21 | resource "kubernetes_manifest" "namespace_data_on_eks" { 22 | manifest = { 23 | "apiVersion" = "v1" 24 | "kind" = "Namespace" 25 | "metadata" = { 26 | "name" = "data-on-eks" 27 | } 28 | } 29 | } 30 | 31 | resource "kubernetes_manifest" "serviceaccount_data_on_eks" { 32 | depends_on = [ 33 | kubernetes_manifest.namespace_data_on_eks 34 | ] 35 | manifest = { 36 | "apiVersion" = "v1" 37 | "kind" = "ServiceAccount" 38 | "metadata" = { 39 | "annotations" = { 40 | "eks.amazonaws.com/role-arn" = tostring(module.data_on_eks_runner_role.iam_role_arn) 41 | } 42 | "labels" = { 43 | "app" = "data-on-eks" 44 | } 45 | "name" = "data-on-eks" 46 | "namespace" = "data-on-eks" 47 | } 48 | } 49 | } 50 | 51 | 52 | #--------------------------------------------------------------- 53 | # Argo Workflows 54 | #--------------------------------------------------------------- 55 | 56 | resource "kubernetes_manifest" "namespace_argo_workflows" { 57 | manifest = { 58 | "apiVersion" = "v1" 59 | "kind" = "Namespace" 60 | "metadata" = { 61 | "name" = "argo" 62 | } 63 | } 64 | } 65 | 66 | resource "terraform_data" "argo_workflows_keycloak_setup" { 67 | depends_on = [ 68 | kubectl_manifest.application_argocd_keycloak, 69 | kubernetes_manifest.namespace_argo_workflows 70 | ] 71 | 72 | provisioner "local-exec" { 73 | command = "./install.sh" 74 | 75 | working_dir = "${path.module}/scripts/argo-workflows" 76 | environment = { 77 | "ARGO_WORKFLOWS_REDIRECT_URL" = "${local.argo_redirect_url}" 78 | } 79 | interpreter = ["/bin/bash", "-c"] 80 | } 81 | 82 | provisioner "local-exec" { 83 | when = destroy 84 | 85 | command = "./uninstall.sh" 86 | working_dir = "${path.module}/scripts/argo-workflows" 87 | interpreter = ["/bin/bash", "-c"] 88 | } 89 | } 90 | 91 | resource "kubectl_manifest" "application_argocd_argo_workflows" { 92 | depends_on = [ 93 | terraform_data.argo_workflows_keycloak_setup 94 | ] 95 | 96 | yaml_body = templatefile("${path.module}/templates/argocd-apps/argo-workflows.yaml", { 97 | GITHUB_URL = local.repo_url 98 | KEYCLOAK_CNOE_URL = local.kc_cnoe_url 99 | ARGO_REDIRECT_URL = local.argo_redirect_url 100 | } 101 | ) 102 | } 103 | 104 | resource "kubectl_manifest" "application_argocd_argo_workflows_templates" { 105 | depends_on = [ 106 | terraform_data.argo_workflows_keycloak_setup 107 | ] 108 | 109 | yaml_body = templatefile("${path.module}/templates/argocd-apps/argo-workflows-templates.yaml", { 110 | GITHUB_URL = local.repo_url 111 | } 112 | ) 113 | } 114 | 115 | resource "kubectl_manifest" "application_argocd_argo_workflows_sso_config" { 116 | depends_on = [ 117 | terraform_data.argo_workflows_keycloak_setup 118 | ] 119 | 120 | yaml_body = templatefile("${path.module}/templates/argocd-apps/argo-workflows-sso-config.yaml", { 121 | GITHUB_URL = local.repo_url 122 | } 123 | ) 124 | } 125 | 126 | resource "kubectl_manifest" "ingress_argo_workflows" { 127 | depends_on = [ 128 | kubectl_manifest.application_argocd_argo_workflows, 129 | ] 130 | 131 | yaml_body = templatefile("${path.module}/templates/manifests/ingress-argo-workflows.yaml", { 132 | ARGO_WORKFLOWS_DOMAIN_NAME = local.argo_domain_name 133 | } 134 | ) 135 | } 136 | -------------------------------------------------------------------------------- /terraform/argocd-ingress.tf: -------------------------------------------------------------------------------- 1 | resource "kubectl_manifest" "ingress_argocd" { 2 | yaml_body = templatefile("${path.module}/templates/manifests/ingress-argocd.yaml", { 3 | ARGOCD_DOMAIN_NAME = local.argocd_domain_name 4 | } 5 | ) 6 | } 7 | -------------------------------------------------------------------------------- /terraform/aws-load-balancer.tf: -------------------------------------------------------------------------------- 1 | module "aws_load_balancer_role" { 2 | source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" 3 | version = "~> 5.14" 4 | 5 | role_name_prefix = "cnoe-aws-load-balancer-controller-" 6 | 7 | attach_load_balancer_controller_policy = true 8 | 9 | oidc_providers = { 10 | main = { 11 | provider_arn = data.aws_iam_openid_connect_provider.eks_oidc.arn 12 | namespace_service_accounts = ["aws-load-balancer-controller:aws-load-balancer-controller"] 13 | } 14 | } 15 | tags = var.tags 16 | } 17 | 18 | resource "kubectl_manifest" "application_argocd_aws_load_balancer_controller" { 19 | depends_on = [ module.aws_load_balancer_role ] 20 | yaml_body = templatefile("${path.module}/templates/argocd-apps/aws-load-balancer.yaml", { 21 | CLUSTER_NAME = local.cluster_name 22 | ROLE_ARN = module.aws_load_balancer_role.iam_role_arn 23 | } 24 | ) 25 | 26 | provisioner "local-exec" { 27 | command = "kubectl wait --for=jsonpath=.status.health.status=Healthy -n argocd application/aws-load-balancer-controller" 28 | 29 | interpreter = ["/bin/bash", "-c"] 30 | } 31 | 32 | provisioner "local-exec" { 33 | when = destroy 34 | 35 | command = "kubectl wait --for=delete svc ingress-nginx-controller -n ingress-nginx --timeout=300s" 36 | 37 | interpreter = ["/bin/bash", "-c"] 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /terraform/backstage.tf: -------------------------------------------------------------------------------- 1 | resource "random_password" "backstage_postgres_password" { 2 | length = 48 3 | special = true 4 | override_special = "!#" 5 | } 6 | 7 | resource "kubernetes_manifest" "namespace_backstage" { 8 | manifest = { 9 | "apiVersion" = "v1" 10 | "kind" = "Namespace" 11 | "metadata" = { 12 | "name" = "backstage" 13 | } 14 | } 15 | } 16 | 17 | resource "kubernetes_manifest" "secret_backstage_postgresql_config" { 18 | depends_on = [ 19 | kubernetes_manifest.namespace_backstage 20 | ] 21 | 22 | manifest = { 23 | "apiVersion" = "v1" 24 | "kind" = "Secret" 25 | "metadata" = { 26 | "name" = "postgresql-config" 27 | "namespace" = "backstage" 28 | } 29 | "data" = { 30 | "POSTGRES_DB" = "${base64encode("backstage")}" 31 | "POSTGRES_PASSWORD" = "${base64encode(random_password.backstage_postgres_password.result)}" 32 | "POSTGRES_USER" = "${base64encode("backstage")}" 33 | } 34 | } 35 | } 36 | 37 | resource "terraform_data" "backstage_keycloak_setup" { 38 | depends_on = [ 39 | kubectl_manifest.application_argocd_keycloak, 40 | kubernetes_manifest.namespace_backstage 41 | ] 42 | 43 | provisioner "local-exec" { 44 | command = "./install.sh ${random_password.backstage_postgres_password.result} ${local.backstage_domain_name} ${local.kc_domain_name} ${local.argo_domain_name}" 45 | 46 | working_dir = "${path.module}/scripts/backstage" 47 | interpreter = ["/bin/bash", "-c"] 48 | } 49 | 50 | provisioner "local-exec" { 51 | when = destroy 52 | 53 | command = "./uninstall.sh" 54 | 55 | working_dir = "${path.module}/scripts/backstage" 56 | interpreter = ["/bin/bash", "-c"] 57 | } 58 | } 59 | 60 | resource "kubectl_manifest" "application_argocd_backstage" { 61 | depends_on = [ 62 | terraform_data.backstage_keycloak_setup 63 | ] 64 | 65 | yaml_body = templatefile("${path.module}/templates/argocd-apps/backstage.yaml", { 66 | GITHUB_URL = local.repo_url 67 | } 68 | ) 69 | } 70 | 71 | resource "kubectl_manifest" "ingress_backstage" { 72 | depends_on = [ 73 | kubectl_manifest.application_argocd_backstage, 74 | ] 75 | 76 | yaml_body = templatefile("${path.module}/templates/manifests/ingress-backstage.yaml", { 77 | BACKSTAGE_DOMAIN_NAME = local.backstage_domain_name 78 | } 79 | ) 80 | } 81 | -------------------------------------------------------------------------------- /terraform/cert-manager.tf: -------------------------------------------------------------------------------- 1 | resource "kubectl_manifest" "application_argocd_cert_manager" { 2 | yaml_body = templatefile("${path.module}/templates/argocd-apps/cert-manager.yaml", { 3 | REPO_URL = local.repo_url 4 | }) 5 | } 6 | 7 | resource "terraform_data" "wait_for_cert_manager" { 8 | provisioner "local-exec" { 9 | command = "kubectl wait --for=jsonpath=.status.health.status=Healthy -n argocd application/cert-manager && kubectl wait --for=jsonpath=.status.sync.status=Synced --timeout=300s -n argocd application/cert-manager" 10 | } 11 | 12 | depends_on = [kubectl_manifest.application_argocd_cert_manager] 13 | } 14 | 15 | resource "kubectl_manifest" "cluster_issuer_prod" { 16 | depends_on = [ 17 | terraform_data.wait_for_cert_manager, 18 | kubectl_manifest.application_argocd_ingress_nginx 19 | ] 20 | yaml_body = templatefile("${path.module}/templates/manifests/cluster-issuer.yaml", { 21 | REPO_URL = local.repo_url 22 | }) 23 | } 24 | -------------------------------------------------------------------------------- /terraform/crossplane.tf: -------------------------------------------------------------------------------- 1 | module "crossplane_aws_provider_role" { 2 | source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" 3 | version = "~> 5.14" 4 | 5 | role_name_prefix = "cnoe-crossplane-provider-aws" 6 | role_policy_arns = { 7 | policy = "arn:aws:iam::aws:policy/AdministratorAccess" 8 | } 9 | 10 | assume_role_condition_test = "StringLike" 11 | oidc_providers = { 12 | main = { 13 | provider_arn = data.aws_iam_openid_connect_provider.eks_oidc.arn 14 | namespace_service_accounts = ["crossplane-system:provider-aws*"] 15 | } 16 | } 17 | tags = var.tags 18 | } 19 | 20 | resource "kubectl_manifest" "application_argocd_crossplane" { 21 | yaml_body = templatefile("${path.module}/templates/argocd-apps/crossplane.yaml", { 22 | GITHUB_URL = local.repo_url 23 | } 24 | ) 25 | 26 | provisioner "local-exec" { 27 | command = "kubectl wait --for=jsonpath=.status.health.status=Healthy -n argocd application/crossplane --timeout=300s && kubectl wait --for=jsonpath=.status.sync.status=Synced --timeout=300s -n argocd application/crossplane" 28 | 29 | interpreter = ["/bin/bash", "-c"] 30 | } 31 | 32 | provisioner "local-exec" { 33 | when = destroy 34 | 35 | command = "./uninstall.sh" 36 | working_dir = "${path.module}/scripts/crossplane" 37 | interpreter = ["/bin/bash", "-c"] 38 | } 39 | } 40 | 41 | resource "kubectl_manifest" "crossplane_provider_controller_config" { 42 | depends_on = [ 43 | kubectl_manifest.application_argocd_crossplane, 44 | ] 45 | yaml_body = templatefile("${path.module}/templates/manifests/crossplane-aws-controller-config.yaml", { 46 | ROLE_ARN = module.crossplane_aws_provider_role.iam_role_arn 47 | } 48 | ) 49 | } 50 | 51 | resource "kubectl_manifest" "application_argocd_crossplane_provider" { 52 | depends_on = [ 53 | kubectl_manifest.application_argocd_crossplane, 54 | ] 55 | yaml_body = templatefile("${path.module}/templates/argocd-apps/crossplane-provider.yaml", { 56 | GITHUB_URL = local.repo_url 57 | } 58 | ) 59 | } 60 | 61 | resource "kubectl_manifest" "application_argocd_crossplane_compositions" { 62 | depends_on = [ 63 | kubectl_manifest.application_argocd_crossplane, 64 | ] 65 | yaml_body = templatefile("${path.module}/templates/argocd-apps/crossplane-compositions.yaml", { 66 | GITHUB_URL = local.repo_url 67 | } 68 | ) 69 | } 70 | -------------------------------------------------------------------------------- /terraform/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_eks_cluster" "target" { 2 | name = var.cluster_name 3 | } 4 | 5 | data "aws_iam_openid_connect_provider" "eks_oidc" { 6 | url = data.aws_eks_cluster.target.identity[0].oidc[0].issuer 7 | } 8 | 9 | data "aws_route53_zone" "selected" { 10 | count = local.dns_count 11 | zone_id = local.hosted_zone_id 12 | } 13 | 14 | data "aws_caller_identity" "current" {} 15 | -------------------------------------------------------------------------------- /terraform/external-dns.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "aws_iam_policy" "external-dns" { 3 | count = local.dns_count 4 | 5 | name_prefix = "cnoe-external-dns-" 6 | description = "For use with External DNS Controller" 7 | policy = jsonencode( 8 | { 9 | "Version": "2012-10-17", 10 | "Statement": [ 11 | { 12 | "Effect": "Allow", 13 | "Action": [ 14 | "route53:ChangeResourceRecordSets", 15 | "route53:ListResourceRecordSets", 16 | "route53:ListTagsForResource" 17 | ], 18 | "Resource": [ 19 | "arn:aws:route53:::hostedzone/${local.hosted_zone_id}" 20 | ] 21 | }, 22 | { 23 | "Effect": "Allow", 24 | "Action": [ 25 | "route53:ListHostedZones" 26 | ], 27 | "Resource": [ 28 | "*" 29 | ] 30 | } 31 | ] 32 | } 33 | ) 34 | } 35 | 36 | resource "aws_iam_role_policy_attachment" "external_dns_role_attach" { 37 | count = local.dns_count 38 | 39 | role = module.external_dns_role[0].iam_role_name 40 | policy_arn = aws_iam_policy.external-dns[0].arn 41 | } 42 | 43 | module "external_dns_role" { 44 | source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" 45 | version = "~> 5.14" 46 | count = local.dns_count 47 | 48 | role_name_prefix = "cnoe-external-dns" 49 | oidc_providers = { 50 | main = { 51 | provider_arn = data.aws_iam_openid_connect_provider.eks_oidc.arn 52 | namespace_service_accounts = ["external-dns:external-dns"] 53 | } 54 | } 55 | tags = var.tags 56 | } 57 | 58 | resource "kubectl_manifest" "application_argocd_external_dns" { 59 | yaml_body = templatefile("${path.module}/templates/argocd-apps/external-dns.yaml", { 60 | GITHUB_URL = local.repo_url 61 | ROLE_ARN = module.external_dns_role[0].iam_role_arn 62 | DOMAIN_NAME = data.aws_route53_zone.selected[0].name 63 | } 64 | ) 65 | 66 | provisioner "local-exec" { 67 | command = "kubectl wait --for=jsonpath=.status.health.status=Healthy --timeout=300s -n argocd application/external-dns" 68 | 69 | interpreter = ["/bin/bash", "-c"] 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /terraform/external-secrets.tf: -------------------------------------------------------------------------------- 1 | resource "kubectl_manifest" "application_argocd_external_secrets" { 2 | yaml_body = templatefile("${path.module}/templates/argocd-apps/external-secrets.yaml", { 3 | GITHUB_URL = local.repo_url 4 | } 5 | ) 6 | 7 | provisioner "local-exec" { 8 | command = "kubectl wait --for=jsonpath=.status.health.status=Healthy --timeout=300s -n argocd application/external-secrets" 9 | 10 | interpreter = ["/bin/bash", "-c"] 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /terraform/ingress-nginx.tf: -------------------------------------------------------------------------------- 1 | resource "kubectl_manifest" "application_argocd_ingress_nginx" { 2 | depends_on = [ 3 | kubectl_manifest.application_argocd_aws_load_balancer_controller 4 | ] 5 | yaml_body = templatefile("${path.module}/templates/argocd-apps/ingress-nginx.yaml", { 6 | GITHUB_URL = local.repo_url 7 | } 8 | ) 9 | 10 | provisioner "local-exec" { 11 | command = "kubectl wait --for=jsonpath=.status.health.status=Healthy --timeout=300s -n argocd application/ingress-nginx" 12 | 13 | interpreter = ["/bin/bash", "-c"] 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /terraform/keycloak.tf: -------------------------------------------------------------------------------- 1 | 2 | #--------------------------------------------------------------- 3 | # External Secrets for Keycloak if enabled 4 | #--------------------------------------------------------------- 5 | resource "aws_iam_policy" "external-secrets" { 6 | count = local.secret_count 7 | 8 | name_prefix = "cnoe-external-secrets-" 9 | description = "For use with External Secrets Controller for Keycloak" 10 | policy = jsonencode( 11 | { 12 | "Version": "2012-10-17", 13 | "Statement": [ 14 | { 15 | "Effect": "Allow", 16 | "Action": [ 17 | "secretsmanager:GetResourcePolicy", 18 | "secretsmanager:GetSecretValue", 19 | "secretsmanager:DescribeSecret", 20 | "secretsmanager:ListSecretVersionIds" 21 | ], 22 | "Resource": [ 23 | "arn:aws:secretsmanager:${var.region}:${data.aws_caller_identity.current.account_id}:secret:cnoe/keycloak/*" 24 | ] 25 | } 26 | ] 27 | } 28 | ) 29 | } 30 | 31 | module "external_secrets_role_keycloak" { 32 | source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" 33 | version = "~> 5.14" 34 | count = local.secret_count 35 | 36 | role_name_prefix = "cnoe-external-secrets-" 37 | 38 | oidc_providers = { 39 | main = { 40 | provider_arn = data.aws_iam_openid_connect_provider.eks_oidc.arn 41 | namespace_service_accounts = ["keycloak:external-secret-keycloak"] 42 | } 43 | } 44 | tags = var.tags 45 | } 46 | 47 | resource "aws_iam_role_policy_attachment" "external_secrets_role_attach" { 48 | count = local.dns_count 49 | 50 | role = module.external_secrets_role_keycloak[0].iam_role_name 51 | policy_arn = aws_iam_policy.external-secrets[0].arn 52 | } 53 | 54 | # should use gitops really. 55 | resource "kubernetes_manifest" "namespace_keycloak" { 56 | count = local.secret_count 57 | 58 | manifest = { 59 | "apiVersion" = "v1" 60 | "kind" = "Namespace" 61 | "metadata" = { 62 | "name" = "keycloak" 63 | } 64 | } 65 | } 66 | 67 | resource "kubernetes_manifest" "serviceaccount_external_secret_keycloak" { 68 | count = local.secret_count 69 | depends_on = [ 70 | kubernetes_manifest.namespace_keycloak, 71 | kubectl_manifest.application_argocd_external_secrets 72 | ] 73 | 74 | manifest = { 75 | "apiVersion" = "v1" 76 | "kind" = "ServiceAccount" 77 | "metadata" = { 78 | "annotations" = { 79 | "eks.amazonaws.com/role-arn" = tostring(module.external_secrets_role_keycloak[0].iam_role_arn) 80 | } 81 | "name" = "external-secret-keycloak" 82 | "namespace" = "keycloak" 83 | } 84 | } 85 | } 86 | 87 | resource "aws_secretsmanager_secret" "keycloak_config" { 88 | count = local.secret_count 89 | 90 | description = "for use with cnoe keycloak installation" 91 | name = "cnoe/keycloak/config" 92 | recovery_window_in_days = 0 93 | } 94 | 95 | resource "aws_secretsmanager_secret_version" "keycloak_config" { 96 | count = local.secret_count 97 | 98 | secret_id = aws_secretsmanager_secret.keycloak_config[0].id 99 | secret_string = jsonencode({ 100 | KC_HOSTNAME = local.kc_domain_name 101 | KEYCLOAK_ADMIN_PASSWORD = random_password.keycloak_admin_password.result 102 | POSTGRES_PASSWORD = random_password.keycloak_postgres_password.result 103 | POSTGRES_DB = "keycloak" 104 | POSTGRES_USER = "keycloak" 105 | "user1-password" = random_password.keycloak_user_password.result 106 | }) 107 | } 108 | 109 | resource "kubectl_manifest" "keycloak_secret_store" { 110 | depends_on = [ 111 | kubectl_manifest.application_argocd_aws_load_balancer_controller, 112 | kubectl_manifest.application_argocd_external_secrets, 113 | kubernetes_manifest.serviceaccount_external_secret_keycloak 114 | ] 115 | 116 | yaml_body = templatefile("${path.module}/templates/manifests/keycloak-secret-store.yaml", { 117 | REGION = local.region 118 | } 119 | ) 120 | } 121 | 122 | #--------------------------------------------------------------- 123 | # Keycloak secrets if external secrets is not enabled 124 | #--------------------------------------------------------------- 125 | 126 | resource "kubernetes_manifest" "secret_keycloak_keycloak_config" { 127 | count = local.secret_count == 1 ? 0 : 1 128 | 129 | manifest = { 130 | "apiVersion" = "v1" 131 | "kind" = "Secret" 132 | "metadata" = { 133 | "name" = "keycloak-config" 134 | "namespace" = "keycloak" 135 | } 136 | "data" = { 137 | "KC_HOSTNAME" = "${base64encode(local.kc_domain_name)}" 138 | "KEYCLOAK_ADMIN_PASSWORD" = "${base64encode(random_password.keycloak_admin_password.result)}" 139 | } 140 | } 141 | } 142 | 143 | resource "kubernetes_manifest" "secret_keycloak_postgresql_config" { 144 | count = local.secret_count == 1 ? 0 : 1 145 | 146 | manifest = { 147 | "apiVersion" = "v1" 148 | "kind" = "Secret" 149 | "metadata" = { 150 | "name" = "postgresql-config" 151 | "namespace" = "keycloak" 152 | } 153 | "data" = { 154 | "POSTGRES_DB" = "${base64encode("keycloak")}" 155 | "POSTGRES_PASSWORD" = "${base64encode(random_password.keycloak_postgres_password.result)}" 156 | "POSTGRES_USER" = "${base64encode("keycloak")}" 157 | } 158 | } 159 | } 160 | 161 | resource "kubernetes_manifest" "secret_keycloak_keycloak_user_config" { 162 | count = local.secret_count == 1 ? 0 : 1 163 | 164 | manifest = { 165 | "apiVersion" = "v1" 166 | "kind" = "Secret" 167 | "metadata" = { 168 | "name" = "keycloak-user-config" 169 | "namespace" = "keycloak" 170 | } 171 | "data" = { 172 | "user1-password" = "${base64encode(random_password.keycloak_user_password.result)}" 173 | } 174 | } 175 | } 176 | 177 | #--------------------------------------------------------------- 178 | # Keycloak passwords 179 | #--------------------------------------------------------------- 180 | 181 | resource "random_password" "keycloak_admin_password" { 182 | length = 48 183 | special = false 184 | override_special = "!#?" 185 | } 186 | 187 | resource "random_password" "keycloak_user_password" { 188 | length = 48 189 | special = false 190 | override_special = "!#?" 191 | } 192 | 193 | resource "random_password" "keycloak_postgres_password" { 194 | length = 48 195 | special = false 196 | override_special = "!#?" 197 | } 198 | 199 | #--------------------------------------------------------------- 200 | # Keycloak installation 201 | #--------------------------------------------------------------- 202 | 203 | resource "kubectl_manifest" "application_argocd_keycloak" { 204 | depends_on = [ 205 | kubectl_manifest.keycloak_secret_store, 206 | kubectl_manifest.application_argocd_ingress_nginx 207 | ] 208 | 209 | yaml_body = templatefile("${path.module}/templates/argocd-apps/keycloak.yaml", { 210 | GITHUB_URL = local.repo_url 211 | PATH = "${local.secret_count == 1 ? "packages/keycloak/dev-external-secrets/" : "packages/keycloak/dev/"}" 212 | } 213 | ) 214 | 215 | provisioner "local-exec" { 216 | command = "./install.sh '${random_password.keycloak_user_password.result}' '${random_password.keycloak_admin_password.result}'" 217 | 218 | working_dir = "${path.module}/scripts/keycloak" 219 | interpreter = ["/bin/bash", "-c"] 220 | } 221 | provisioner "local-exec" { 222 | when = destroy 223 | command = "./uninstall.sh" 224 | 225 | working_dir = "${path.module}/scripts/keycloak" 226 | interpreter = ["/bin/bash", "-c"] 227 | } 228 | } 229 | 230 | resource "kubectl_manifest" "ingress_keycloak" { 231 | depends_on = [ 232 | kubectl_manifest.application_argocd_keycloak, 233 | ] 234 | 235 | yaml_body = templatefile("${path.module}/templates/manifests/ingress-keycloak.yaml", { 236 | KEYCLOAK_DOMAIN_NAME = local.kc_domain_name 237 | } 238 | ) 239 | } 240 | -------------------------------------------------------------------------------- /terraform/main.tf: -------------------------------------------------------------------------------- 1 | 2 | locals { 3 | repo_url = trimsuffix(var.repo_url, "/") 4 | region = var.region 5 | tags = var.tags 6 | cluster_name = var.cluster_name 7 | hosted_zone_id = var.hosted_zone_id 8 | dns_count = var.enable_dns_management ? 1 : 0 9 | secret_count = var.enable_external_secret ? 1 : 0 10 | 11 | domain_name = var.enable_dns_management ? "${trimsuffix(data.aws_route53_zone.selected[0].name, ".")}" : "${var.domain_name}" 12 | kc_domain_name = "keycloak.${local.domain_name}" 13 | kc_cnoe_url = "https://${local.kc_domain_name}/realms/cnoe" 14 | argo_domain_name = "argo.${local.domain_name}" 15 | argo_redirect_url = "https://${local.argo_domain_name}/oauth2/callback" 16 | argocd_domain_name = "argocd.${local.domain_name}" 17 | backstage_domain_name = "backstage.${local.domain_name}" 18 | } 19 | 20 | 21 | provider "aws" { 22 | region = local.region 23 | default_tags { 24 | tags = local.tags 25 | } 26 | } 27 | 28 | provider "kubernetes" { 29 | config_path = "~/.kube/config" 30 | } 31 | 32 | provider "helm" { 33 | kubernetes { 34 | config_path = "~/.kube/config" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /terraform/scripts/argo-workflows/config-payloads/client-payload.json: -------------------------------------------------------------------------------- 1 | { 2 | "protocol": "openid-connect", 3 | "clientId": "argo-workflows", 4 | "name": "Argo Workflows Client", 5 | "description": "Used for Argo Workflows SSO", 6 | "publicClient": false, 7 | "authorizationServicesEnabled": false, 8 | "serviceAccountsEnabled": false, 9 | "implicitFlowEnabled": false, 10 | "directAccessGrantsEnabled": true, 11 | "standardFlowEnabled": true, 12 | "frontchannelLogout": true, 13 | "attributes": { 14 | "saml_idp_initiated_sso_url_name": "", 15 | "oauth2.device.authorization.grant.enabled": false, 16 | "oidc.ciba.grant.enabled": false 17 | }, 18 | "alwaysDisplayInConsole": false, 19 | "rootUrl": "", 20 | "baseUrl": "", 21 | "redirectUris": [ 22 | "${ARGO_WORKFLOWS_REDIRECT_URL}" 23 | ], 24 | "webOrigins": [ 25 | "/*" 26 | ] 27 | } 28 | -------------------------------------------------------------------------------- /terraform/scripts/argo-workflows/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | REPO_ROOT=$(git rev-parse --show-toplevel) 5 | 6 | kubectl wait --for=jsonpath=.status.health.status=Healthy -n argocd application/keycloak 7 | kubectl wait --for=condition=ready pod -l app=keycloak -n keycloak --timeout=30s 8 | echo "Creating keycloak client for Argo Workflows" 9 | 10 | ADMIN_PASSWORD=$(kubectl get secret -n keycloak keycloak-config -o go-template='{{index .data "KEYCLOAK_ADMIN_PASSWORD" | base64decode}}') 11 | kubectl port-forward -n keycloak svc/keycloak 8090:8080 > /dev/null 2>&1 & 12 | pid=$! 13 | trap '{ 14 | rm config-payloads/*-to-be-applied.json || true 15 | kill $pid 16 | }' EXIT 17 | echo "waiting for port forward to be ready" 18 | while ! nc -vz localhost 8090 > /dev/null 2>&1 ; do 19 | sleep 2 20 | done 21 | 22 | KEYCLOAK_TOKEN=$(curl -sS --fail-with-body -X POST -H "Content-Type: application/x-www-form-urlencoded" \ 23 | --data-urlencode "username=cnoe-admin" \ 24 | --data-urlencode "password=${ADMIN_PASSWORD}" \ 25 | --data-urlencode "grant_type=password" \ 26 | --data-urlencode "client_id=admin-cli" \ 27 | localhost:8090/realms/master/protocol/openid-connect/token | jq -e -r '.access_token') 28 | 29 | envsubst < config-payloads/client-payload.json > config-payloads/client-payload-to-be-applied.json 30 | 31 | curl -sS -H "Content-Type: application/json" \ 32 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 33 | -X POST --data @config-payloads/client-payload-to-be-applied.json \ 34 | localhost:8090/admin/realms/cnoe/clients 35 | 36 | CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \ 37 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 38 | -X GET localhost:8090/admin/realms/cnoe/clients | jq -e -r '.[] | select(.clientId == "argo-workflows") | .id') 39 | 40 | export CLIENT_SECRET=$(curl -sS -H "Content-Type: application/json" \ 41 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 42 | -X GET localhost:8090/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret') 43 | 44 | CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X GET localhost:8090/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id') 45 | 46 | curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X PUT localhost:8090/admin/realms/cnoe/clients/${CLIENT_ID}/default-client-scopes/${CLIENT_SCOPE_GROUPS_ID} 47 | 48 | echo 'storing client secrets to argo namespace' 49 | 50 | envsubst < secret-sso.yaml | kubectl apply -f - 51 | 52 | # If TLS secret is available in /private, use it. Could be empty... 53 | if ls ${REPO_ROOT}/private/argo-workflows-tls-backup-* 1> /dev/null 2>&1; then 54 | TLS_FILE=$(ls -t ${REPO_ROOT}/private/argo-workflows-tls-backup-* | head -n1) 55 | kubectl apply -f ${TLS_FILE} 56 | fi 57 | -------------------------------------------------------------------------------- /terraform/scripts/argo-workflows/secret-sso.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: keycloak-oidc 5 | namespace: argo 6 | type: Opaque 7 | stringData: 8 | secret-key: ${CLIENT_SECRET} 9 | client-id: argo-workflows 10 | -------------------------------------------------------------------------------- /terraform/scripts/argo-workflows/uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | REPO_ROOT=$(git rev-parse --show-toplevel) 5 | NAMESPACE="argo" 6 | LABEL_SELECTOR="controller.cert-manager.io/fao=true" 7 | NAME=argo-workflows 8 | 9 | 10 | echo "backing up TLS secrets to ${REPO_ROOT}/private" 11 | 12 | mkdir -p ${REPO_ROOT}/private 13 | secrets=$(kubectl get secrets -n ${NAMESPACE} -l ${LABEL_SELECTOR} --ignore-not-found) 14 | 15 | if [[ ! -z "${secrets}" ]]; then 16 | kubectl get secrets -n ${NAMESPACE} -l ${LABEL_SELECTOR} -o yaml > ${REPO_ROOT}/private/${NAME}-tls-backup-$(date +%s).yaml 17 | fi 18 | 19 | kubectl delete -f secret-sso.yaml || true 20 | 21 | ADMIN_PASSWORD=$(kubectl get secret -n keycloak keycloak-config -o go-template='{{index .data "KEYCLOAK_ADMIN_PASSWORD" | base64decode}}') 22 | 23 | kubectl port-forward -n keycloak svc/keycloak 8090:8080 > /dev/null 2>&1 & 24 | pid=$! 25 | trap '{ 26 | kill $pid 27 | }' EXIT 28 | 29 | echo "waiting for port forward to be ready" 30 | while ! nc -vz localhost 8090 > /dev/null 2>&1 ; do 31 | sleep 2 32 | done 33 | 34 | echo 'deleting Keycloak client' 35 | KEYCLOAK_TOKEN=$(curl -sS --fail-with-body -X POST -H "Content-Type: application/x-www-form-urlencoded" \ 36 | --data-urlencode "username=cnoe-admin" \ 37 | --data-urlencode "password=${ADMIN_PASSWORD}" \ 38 | --data-urlencode "grant_type=password" \ 39 | --data-urlencode "client_id=admin-cli" \ 40 | localhost:8090/realms/master/protocol/openid-connect/token | jq -e -r '.access_token') 41 | 42 | CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \ 43 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 44 | -X GET localhost:8090/admin/realms/cnoe/clients | jq -e -r '.[] | select(.clientId == "argo-workflows") | .id') 45 | 46 | curl -sS -H "Content-Type: application/json" \ 47 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 48 | -X DELETE localhost:8090/admin/realms/cnoe/clients/${CLIENT_ID} 49 | 50 | -------------------------------------------------------------------------------- /terraform/scripts/backstage/config-payloads/client-payload.json: -------------------------------------------------------------------------------- 1 | { 2 | "protocol": "openid-connect", 3 | "clientId": "backstage", 4 | "name": "Backstage Client", 5 | "description": "Used for Backstage SSO", 6 | "publicClient": false, 7 | "authorizationServicesEnabled": false, 8 | "serviceAccountsEnabled": false, 9 | "implicitFlowEnabled": false, 10 | "directAccessGrantsEnabled": true, 11 | "standardFlowEnabled": true, 12 | "frontchannelLogout": true, 13 | "attributes": { 14 | "saml_idp_initiated_sso_url_name": "", 15 | "oauth2.device.authorization.grant.enabled": false, 16 | "oidc.ciba.grant.enabled": false 17 | }, 18 | "alwaysDisplayInConsole": false, 19 | "rootUrl": "", 20 | "baseUrl": "", 21 | "redirectUris": [ 22 | "https://${BACKSTAGE_DOMAIN_NAME}/api/auth/keycloak-oidc/handler/frame" 23 | ], 24 | "webOrigins": [ 25 | "/*" 26 | ] 27 | } 28 | -------------------------------------------------------------------------------- /terraform/scripts/backstage/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | REPO_ROOT=$(git rev-parse --show-toplevel) 5 | 6 | export POSTGRES_PASSWORD=${1} 7 | export BACKSTAGE_DOMAIN_NAME=${2} 8 | export KEYCLOAK_DOMAIN_NAME=${3} 9 | export ARGO_WORKFLOWS_DOMAIN_NAME=${4} 10 | export GITHUB_APP_YAML_INDENTED=$(cat ${REPO_ROOT}/private/github-integration.yaml | base64 | sed 's/^/ /') 11 | 12 | kubectl wait --for=jsonpath=.status.health.status=Healthy -n argocd application/keycloak 13 | kubectl wait --for=condition=ready pod -l app=keycloak -n keycloak --timeout=30s 14 | echo "Creating keycloak client for Backstage" 15 | 16 | ADMIN_PASSWORD=$(kubectl get secret -n keycloak keycloak-config -o go-template='{{index .data "KEYCLOAK_ADMIN_PASSWORD" | base64decode}}') 17 | 18 | kubectl port-forward -n keycloak svc/keycloak 8080:8080 > /dev/null 2>&1 & 19 | pid=$! 20 | trap '{ 21 | rm config-payloads/*-to-be-applied.json || true 22 | kill $pid 23 | }' EXIT 24 | echo "waiting for port forward to be ready" 25 | while ! nc -vz localhost 8080 > /dev/null 2>&1 ; do 26 | sleep 2 27 | done 28 | 29 | KEYCLOAK_TOKEN=$(curl -sS --fail-with-body -X POST -H "Content-Type: application/x-www-form-urlencoded" \ 30 | --data-urlencode "username=cnoe-admin" \ 31 | --data-urlencode "password=${ADMIN_PASSWORD}" \ 32 | --data-urlencode "grant_type=password" \ 33 | --data-urlencode "client_id=admin-cli" \ 34 | localhost:8080/realms/master/protocol/openid-connect/token | jq -e -r '.access_token') 35 | 36 | envsubst < config-payloads/client-payload.json > config-payloads/client-payload-to-be-applied.json 37 | 38 | curl -sS -H "Content-Type: application/json" \ 39 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 40 | -X POST --data @config-payloads/client-payload-to-be-applied.json \ 41 | localhost:8080/admin/realms/cnoe/clients 42 | 43 | CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \ 44 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 45 | -X GET localhost:8080/admin/realms/cnoe/clients | jq -e -r '.[] | select(.clientId == "backstage") | .id') 46 | 47 | export CLIENT_SECRET=$(curl -sS -H "Content-Type: application/json" \ 48 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 49 | -X GET localhost:8080/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret') 50 | 51 | CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X GET localhost:8080/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id') 52 | 53 | curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X PUT localhost:8080/admin/realms/cnoe/clients/${CLIENT_ID}/default-client-scopes/${CLIENT_SCOPE_GROUPS_ID} 54 | 55 | # Get ArgoCD token for Backstage 56 | kubectl port-forward svc/argocd-server -n argocd 8085:80 > /dev/null 2>&1 & 57 | pid=$! 58 | trap '{ 59 | rm config-payloads/*-to-be-applied.json || true 60 | kill $pid 61 | }' EXIT 62 | echo "waiting for port forward to be ready" 63 | while ! nc -vz localhost 8085 > /dev/null 2>&1 ; do 64 | sleep 2 65 | done 66 | 67 | pass=$(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d) 68 | 69 | token=$(curl -sS localhost:8085/api/v1/session -d "{\"username\":\"admin\",\"password\":\"${pass}\"}" | yq .token) 70 | 71 | # THIS DOES NOT EXPIRE. Has read all permissions. 72 | argocdToken=$(curl -sS http://localhost:8085/api/v1/account/backstage/token -X POST -H "Authorization: Bearer ${token}" | yq .token) 73 | 74 | echo 'storing client secrets to backstage namespace' 75 | envsubst < secret-env-var.yaml | kubectl apply -f - 76 | envsubst < secret-integrations.yaml | kubectl apply -f - 77 | 78 | #If TLS secret is available in /private, use it. Could be empty... 79 | if ls ${REPO_ROOT}/private/backstage-tls-backup-* 1> /dev/null 2>&1; then 80 | TLS_FILE=$(ls -t ${REPO_ROOT}/private/backstage-tls-backup-* | head -n1) 81 | kubectl apply -f ${TLS_FILE} 82 | fi 83 | -------------------------------------------------------------------------------- /terraform/scripts/backstage/secret-env-var.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: backstage-env-vars 5 | namespace: backstage 6 | stringData: 7 | BACKSTAGE_FRONTEND_URL: https://${BACKSTAGE_DOMAIN_NAME} 8 | POSTGRES_HOST: postgresql.backstage.svc.cluster.local 9 | POSTGRES_PORT: '5432' 10 | POSTGRES_USER: backstage 11 | POSTGRES_PASSWORD: '${POSTGRES_PASSWORD}' 12 | ARGO_WORKFLOWS_URL: https://${ARGO_WORKFLOWS_DOMAIN_NAME} 13 | KEYCLOAK_NAME_METADATA: https://${KEYCLOAK_DOMAIN_NAME}/realms/cnoe/.well-known/openid-configuration 14 | KEYCLOAK_CLIENT_SECRET: '${CLIENT_SECRET}' 15 | ARGOCD_AUTH_TOKEN: 'argocd.token=${argocdToken}' 16 | ARGO_CD_URL: 'http://argocd-server.argocd.svc.cluster.local/api/v1/' 17 | -------------------------------------------------------------------------------- /terraform/scripts/backstage/secret-integrations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: integrations 5 | namespace: backstage 6 | data: 7 | github-integration.yaml: | 8 | ${GITHUB_APP_YAML_INDENTED} 9 | -------------------------------------------------------------------------------- /terraform/scripts/backstage/uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | REPO_ROOT=$(git rev-parse --show-toplevel) 5 | NAMESPACE="backstage" 6 | LABEL_SELECTOR="controller.cert-manager.io/fao=true" 7 | NAME=backstage 8 | 9 | echo "backing up TLS secrets to ${REPO_ROOT}/private" 10 | 11 | mkdir -p ${REPO_ROOT}/private 12 | secrets=$(kubectl get secrets -n ${NAMESPACE} -l ${LABEL_SELECTOR} --ignore-not-found) 13 | 14 | if [[ ! -z "${secrets}" ]]; then 15 | kubectl get secrets -n ${NAMESPACE} -l ${LABEL_SELECTOR} -o yaml > ${REPO_ROOT}/private/${NAME}-tls-backup-$(date +%s).yaml 16 | fi 17 | 18 | ADMIN_PASSWORD=$(kubectl get secret -n keycloak keycloak-config -o go-template='{{index .data "KEYCLOAK_ADMIN_PASSWORD" | base64decode}}') 19 | kubectl port-forward -n keycloak svc/keycloak 8080:8080 > /dev/null 2>&1 & 20 | pid=$! 21 | trap '{ 22 | kill $pid 23 | }' EXIT 24 | 25 | echo "waiting for port forward to be ready" 26 | while ! nc -vz localhost 8080 > /dev/null 2>&1 ; do 27 | sleep 2 28 | done 29 | 30 | echo 'deleting Keycloak client' 31 | KEYCLOAK_TOKEN=$(curl -sS --fail-with-body -X POST -H "Content-Type: application/x-www-form-urlencoded" \ 32 | --data-urlencode "username=cnoe-admin" \ 33 | --data-urlencode "password=${ADMIN_PASSWORD}" \ 34 | --data-urlencode "grant_type=password" \ 35 | --data-urlencode "client_id=admin-cli" \ 36 | localhost:8080/realms/master/protocol/openid-connect/token | jq -e -r '.access_token') 37 | 38 | CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \ 39 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 40 | -X GET localhost:8080/admin/realms/cnoe/clients | jq -e -r '.[] | select(.clientId == "backstage") | .id') 41 | 42 | curl -sS --fail-with-body -H "Content-Type: application/json" \ 43 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 44 | -X DELETE localhost:8080/admin/realms/cnoe/clients/${CLIENT_ID} 45 | -------------------------------------------------------------------------------- /terraform/scripts/crossplane/uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | while true; do 5 | provider_count=$(kubectl get --ignore-not-found=true Provider.pkg.crossplane.io | wc -l) 6 | if [ "$provider_count" -eq 0 ]; then 7 | exit 0 8 | fi 9 | echo "waiting for $provider_count providers to be deleted" 10 | sleep 10 11 | done 12 | -------------------------------------------------------------------------------- /terraform/scripts/keycloak/config-payloads/client-scope-groups-payload.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "groups", 3 | "description": "groups a user belongs to", 4 | "attributes": { 5 | "consent.screen.text": "Access to groups a user belongs to.", 6 | "display.on.consent.screen": "true", 7 | "include.in.token.scope": "true", 8 | "gui.order": "" 9 | }, 10 | "type": "default", 11 | "protocol": "openid-connect" 12 | } -------------------------------------------------------------------------------- /terraform/scripts/keycloak/config-payloads/group-admin-payload.json: -------------------------------------------------------------------------------- 1 | {"name":"admin"} -------------------------------------------------------------------------------- /terraform/scripts/keycloak/config-payloads/group-base-user-payload.json: -------------------------------------------------------------------------------- 1 | {"name":"base-user"} -------------------------------------------------------------------------------- /terraform/scripts/keycloak/config-payloads/group-mapper-payload.json: -------------------------------------------------------------------------------- 1 | { 2 | "protocol": "openid-connect", 3 | "protocolMapper": "oidc-group-membership-mapper", 4 | "name": "groups", 5 | "config": { 6 | "claim.name": "groups", 7 | "full.path": "false", 8 | "id.token.claim": "true", 9 | "access.token.claim": "true", 10 | "userinfo.token.claim": "true" 11 | } 12 | } -------------------------------------------------------------------------------- /terraform/scripts/keycloak/config-payloads/realm-payload.json: -------------------------------------------------------------------------------- 1 | {"realm":"cnoe","enabled":true} -------------------------------------------------------------------------------- /terraform/scripts/keycloak/config-payloads/user-password.json: -------------------------------------------------------------------------------- 1 | { 2 | "temporary": false, 3 | "type": "password", 4 | "value": "${USER1_PASSWORD}" 5 | } 6 | -------------------------------------------------------------------------------- /terraform/scripts/keycloak/config-payloads/user-user1.json: -------------------------------------------------------------------------------- 1 | { 2 | "username": "user1", 3 | "email": "", 4 | "firstName": "user", 5 | "lastName": "one", 6 | "requiredActions": [], 7 | "emailVerified": false, 8 | "groups": [ 9 | "/admin" 10 | ], 11 | "enabled": true 12 | } 13 | -------------------------------------------------------------------------------- /terraform/scripts/keycloak/config-payloads/user-user2.json: -------------------------------------------------------------------------------- 1 | { 2 | "username": "user2", 3 | "email": "", 4 | "firstName": "user", 5 | "lastName": "two", 6 | "requiredActions": [], 7 | "emailVerified": false, 8 | "groups": [ 9 | "/base-user" 10 | ], 11 | "enabled": true 12 | } 13 | -------------------------------------------------------------------------------- /terraform/scripts/keycloak/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | export USER1_PASSWORD=${1} 5 | ADMIN_PASSWORD=${2} 6 | REPO_ROOT=$(git rev-parse --show-toplevel) 7 | 8 | echo "waiting for keycloak to be ready. may take a few minutes" 9 | kubectl wait --for=jsonpath=.status.health.status=Healthy -n argocd application/keycloak --timeout=300s 10 | kubectl wait --for=condition=ready pod -l app=keycloak -n keycloak --timeout=30s 11 | 12 | # Configure keycloak. Might be better to just import 13 | kubectl port-forward -n keycloak svc/keycloak 8080:8080 > /dev/null 2>&1 & 14 | pid=$! 15 | 16 | envsubst < config-payloads/user-password.json > config-payloads/user-password-to-be-applied.json 17 | 18 | # ensure port-forward is killed 19 | trap '{ 20 | rm config-payloads/user-password-to-be-applied.json || true 21 | kill $pid 22 | }' EXIT 23 | 24 | echo "waiting for port forward to be ready" 25 | while ! nc -vz localhost 8080 > /dev/null 2>&1 ; do 26 | sleep 2 27 | done 28 | 29 | # Default token expires in one minute. May need to extend. very ugly 30 | KEYCLOAK_TOKEN=$(curl -sS --fail-with-body -X POST -H "Content-Type: application/x-www-form-urlencoded" \ 31 | --data-urlencode "username=cnoe-admin" \ 32 | --data-urlencode "password=${ADMIN_PASSWORD}" \ 33 | --data-urlencode "grant_type=password" \ 34 | --data-urlencode "client_id=admin-cli" \ 35 | localhost:8080/realms/master/protocol/openid-connect/token | jq -e -r '.access_token') 36 | echo "creating cnoe realm and groups" 37 | curl -sS -H "Content-Type: application/json" \ 38 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 39 | -X POST --data @config-payloads/realm-payload.json \ 40 | localhost:8080/admin/realms 41 | 42 | curl -sS -H "Content-Type: application/json" \ 43 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 44 | -X POST --data @config-payloads/client-scope-groups-payload.json \ 45 | localhost:8080/admin/realms/cnoe/client-scopes 46 | 47 | curl -sS -H "Content-Type: application/json" \ 48 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 49 | -X POST --data @config-payloads/group-admin-payload.json \ 50 | localhost:8080/admin/realms/cnoe/groups 51 | 52 | curl -sS -H "Content-Type: application/json" \ 53 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 54 | -X POST --data @config-payloads/group-base-user-payload.json \ 55 | localhost:8080/admin/realms/cnoe/groups 56 | 57 | # Create scope mapper 58 | echo 'adding group claim to tokens' 59 | CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X GET localhost:8080/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id') 60 | 61 | curl -sS -H "Content-Type: application/json" \ 62 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 63 | -X POST --data @config-payloads/group-mapper-payload.json \ 64 | localhost:8080/admin/realms/cnoe/client-scopes/${CLIENT_SCOPE_GROUPS_ID}/protocol-mappers/models 65 | 66 | echo "creating test users" 67 | curl -sS -H "Content-Type: application/json" \ 68 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 69 | -X POST --data @config-payloads/user-user1.json \ 70 | localhost:8080/admin/realms/cnoe/users 71 | 72 | curl -sS -H "Content-Type: application/json" \ 73 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 74 | -X POST --data @config-payloads/user-user2.json \ 75 | localhost:8080/admin/realms/cnoe/users 76 | 77 | USER1ID=$(curl -sS -H "Content-Type: application/json" \ 78 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" 'localhost:8080/admin/realms/cnoe/users?lastName=one' | jq -r '.[0].id') 79 | USER2ID=$(curl -sS -H "Content-Type: application/json" \ 80 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" 'localhost:8080/admin/realms/cnoe/users?lastName=two' | jq -r '.[0].id') 81 | 82 | curl -sS -H "Content-Type: application/json" \ 83 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 84 | -X PUT --data @config-payloads/user-password-to-be-applied.json \ 85 | localhost:8080/admin/realms/cnoe/users/${USER1ID}/reset-password 86 | 87 | curl -sS -H "Content-Type: application/json" \ 88 | -H "Authorization: bearer ${KEYCLOAK_TOKEN}" \ 89 | -X PUT --data @config-payloads/user-password-to-be-applied.json \ 90 | localhost:8080/admin/realms/cnoe/users/${USER2ID}/reset-password 91 | 92 | # If TLS secret is available in /private, use it. Could be empty... 93 | 94 | if ls ${REPO_ROOT}/private/keycloak-tls-backup-* 1> /dev/null 2>&1; then 95 | TLS_FILE=$(ls -t ${REPO_ROOT}/private/keycloak-tls-backup-* | head -n1) 96 | kubectl apply -f ${TLS_FILE} 97 | fi 98 | -------------------------------------------------------------------------------- /terraform/scripts/keycloak/uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | LABEL_SELECTOR="controller.cert-manager.io/fao=true" 4 | APP_NAME=keycloak 5 | NAMESPACE=keycloak 6 | REPO_ROOT=$(git rev-parse --show-toplevel) 7 | 8 | echo "backing up TLS secrets to ${REPO_ROOT}/private" 9 | 10 | mkdir -p ${REPO_ROOT}/private 11 | secrets=$(kubectl get secrets -n ${NAMESPACE} -l ${LABEL_SELECTOR} --ignore-not-found) 12 | 13 | if [[ ! -z "${secrets}" ]]; then 14 | kubectl get secrets -n ${NAMESPACE} -l ${LABEL_SELECTOR} -o yaml > ${REPO_ROOT}/private/${APP_NAME}-tls-backup-$(date +%s).yaml 15 | fi 16 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/argo-workflows-sso-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: argo-workflows-sso-config 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | sources: 13 | - repoURL: ${GITHUB_URL} 14 | targetRevision: HEAD 15 | path: packages/argo-workflows-sso-config/dev 16 | destination: 17 | server: "https://kubernetes.default.svc" 18 | namespace: argo 19 | syncPolicy: 20 | automated: {} 21 | syncOptions: 22 | - CreateNamespace=true 23 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/argo-workflows-templates.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: argo-workflows-templates 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | sources: 13 | - repoURL: ${GITHUB_URL} 14 | targetRevision: HEAD 15 | path: packages/argo-workflows-templates/dev/ 16 | destination: 17 | server: "https://kubernetes.default.svc" 18 | namespace: argo 19 | syncPolicy: 20 | automated: {} 21 | syncOptions: 22 | - CreateNamespace=true 23 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/argo-workflows.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: argo-workflows 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | sources: 13 | - chart: argo-workflows 14 | repoURL: https://argoproj.github.io/argo-helm 15 | targetRevision: 0.31.0 16 | helm: 17 | releaseName: argo-workflows 18 | valueFiles: 19 | - $values/packages/argo-workflows/dev/values.yaml 20 | parameters: 21 | - name: server.sso.issuer 22 | value: ${KEYCLOAK_CNOE_URL} 23 | - name: server.sso.redirectUrl 24 | value: ${ARGO_REDIRECT_URL} 25 | - repoURL: ${GITHUB_URL} 26 | targetRevision: HEAD 27 | ref: values 28 | destination: 29 | server: "https://kubernetes.default.svc" 30 | namespace: argo 31 | syncPolicy: 32 | automated: {} 33 | syncOptions: 34 | - CreateNamespace=true 35 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/aws-load-balancer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: aws-load-balancer-controller 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | sources: 13 | - chart: aws-load-balancer-controller 14 | repoURL: https://aws.github.io/eks-charts 15 | targetRevision: 1.5.4 16 | helm: 17 | releaseName: aws-load-balancer-controller 18 | parameters: 19 | - name: serviceAccount.name 20 | value: aws-load-balancer-controller 21 | - name: clusterName 22 | value: ${CLUSTER_NAME} 23 | - name: serviceAccount.annotations.eks\.amazonaws\.com/role-arn 24 | value: ${ROLE_ARN} 25 | destination: 26 | server: "https://kubernetes.default.svc" 27 | namespace: aws-load-balancer-controller 28 | ignoreDifferences: 29 | - group: "" 30 | kind: Secret 31 | name: aws-load-balancer-webhook 32 | namespace: aws-load-balancer-controller 33 | jsonPointers: 34 | - /data 35 | - group: "admissionregistration.k8s.io" 36 | kind: MutatingWebhookConfiguration 37 | name: aws-load-balancer-webhook 38 | namespace: aws-load-balancer-controller 39 | jsonPointers: 40 | - /webhooks[]/clientConfig/caBundle 41 | - group: "admissionregistration.k8s.io" 42 | kind: ValidatingWebhookConfiguration 43 | name: aws-load-balancer-webhook 44 | namespace: aws-load-balancer-controller 45 | jsonPointers: 46 | - /webhooks[]/clientConfig/caBundle 47 | syncPolicy: 48 | automated: {} 49 | syncOptions: 50 | - CreateNamespace=true 51 | - RespectIgnoreDifferences=true 52 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/backstage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: backstage 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | ignoreDifferences: 12 | - jsonPointers: 13 | - /data/k8s-config.yaml 14 | kind: Secret 15 | name: k8s-config 16 | namespace: backstage 17 | project: cnoe 18 | sources: 19 | - repoURL: ${GITHUB_URL} 20 | targetRevision: HEAD 21 | path: packages/backstage/dev/ 22 | destination: 23 | server: "https://kubernetes.default.svc" 24 | namespace: backstage 25 | syncPolicy: 26 | automated: {} 27 | syncOptions: 28 | - CreateNamespace=true 29 | - RespectIgnoreDifferences=true 30 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/cert-manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: cert-manager 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | sources: 13 | - chart: cert-manager 14 | repoURL: https://charts.jetstack.io 15 | targetRevision: 1.12.2 16 | helm: 17 | releaseName: cert-manager 18 | valueFiles: 19 | - $values/packages/cert-manager/dev/values.yaml 20 | - repoURL: ${REPO_URL} 21 | targetRevision: HEAD 22 | ref: values 23 | destination: 24 | server: "https://kubernetes.default.svc" 25 | namespace: cert-manager 26 | syncPolicy: 27 | automated: {} 28 | syncOptions: 29 | - CreateNamespace=true 30 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/crossplane-compositions.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: crossplane-compositions 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | source: 13 | repoURL: ${GITHUB_URL} 14 | targetRevision: HEAD 15 | path: packages/crossplane-compositions/dev/ 16 | destination: 17 | server: "https://kubernetes.default.svc" 18 | namespace: crossplane-system 19 | syncPolicy: 20 | automated: {} 21 | syncOptions: [] 22 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/crossplane-provider.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: crossplane-provider 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | source: 13 | repoURL: ${GITHUB_URL} 14 | targetRevision: HEAD 15 | path: packages/crossplane/dev/ 16 | destination: 17 | server: "https://kubernetes.default.svc" 18 | namespace: crossplane-system 19 | syncPolicy: 20 | automated: {} 21 | syncOptions: [] 22 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/crossplane.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: crossplane 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | sources: 13 | - chart: crossplane 14 | repoURL: https://charts.crossplane.io/stable 15 | targetRevision: 1.13.2 16 | helm: 17 | releaseName: crossplane 18 | valueFiles: 19 | - $values/packages/crossplane/dev/values.yaml 20 | - repoURL: ${GITHUB_URL} 21 | targetRevision: HEAD 22 | ref: values 23 | destination: 24 | server: "https://kubernetes.default.svc" 25 | namespace: crossplane-system 26 | syncPolicy: 27 | automated: {} 28 | syncOptions: 29 | - CreateNamespace=true 30 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/external-dns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: external-dns 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | sources: 13 | - chart: external-dns 14 | repoURL: https://kubernetes-sigs.github.io/external-dns/ 15 | targetRevision: 1.13.0 16 | helm: 17 | releaseName: external-dns 18 | valueFiles: 19 | - $values/packages/external-dns/dev/values.yaml 20 | parameters: 21 | - name: serviceAccount.annotations.eks\.amazonaws\.com/role-arn 22 | value: ${ROLE_ARN} 23 | - name: domainFilters[0] 24 | value: ${DOMAIN_NAME} 25 | - repoURL: ${GITHUB_URL} 26 | targetRevision: HEAD 27 | ref: values 28 | destination: 29 | server: "https://kubernetes.default.svc" 30 | namespace: external-dns 31 | syncPolicy: 32 | automated: {} 33 | syncOptions: 34 | - CreateNamespace=true 35 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: external-secrets 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | sources: 13 | - chart: external-secrets 14 | repoURL: https://charts.external-secrets.io 15 | targetRevision: "0.9.2" 16 | helm: 17 | releaseName: external-secrets 18 | valueFiles: 19 | - $values/packages/external-secrets/dev/values.yaml 20 | - repoURL: ${GITHUB_URL} 21 | targetRevision: HEAD 22 | ref: values 23 | destination: 24 | server: "https://kubernetes.default.svc" 25 | namespace: external-secrets 26 | syncPolicy: 27 | automated: {} 28 | syncOptions: 29 | - CreateNamespace=true 30 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/ingress-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: ingress-nginx 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | sources: 13 | - chart: ingress-nginx 14 | repoURL: https://kubernetes.github.io/ingress-nginx 15 | targetRevision: 4.7.0 16 | helm: 17 | releaseName: ingress-nginx 18 | valueFiles: 19 | - $values/packages/ingress-nginx/dev/values.yaml 20 | - repoURL: ${GITHUB_URL} 21 | targetRevision: HEAD 22 | ref: values 23 | destination: 24 | server: "https://kubernetes.default.svc" 25 | namespace: ingress-nginx 26 | syncPolicy: 27 | automated: {} 28 | syncOptions: 29 | - CreateNamespace=true 30 | -------------------------------------------------------------------------------- /terraform/templates/argocd-apps/keycloak.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: keycloak 5 | namespace: argocd 6 | labels: 7 | env: dev 8 | finalizers: 9 | - resources-finalizer.argocd.argoproj.io 10 | spec: 11 | project: cnoe 12 | sources: 13 | - repoURL: ${GITHUB_URL} 14 | targetRevision: HEAD 15 | path: ${PATH} 16 | destination: 17 | server: "https://kubernetes.default.svc" 18 | namespace: keycloak 19 | syncPolicy: 20 | automated: {} 21 | syncOptions: 22 | - CreateNamespace=true 23 | -------------------------------------------------------------------------------- /terraform/templates/manifests/cluster-issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-prod 5 | spec: 6 | acme: 7 | server: https://acme-v02.api.letsencrypt.org/directory 8 | privateKeySecretRef: 9 | name: letsencrypt-prod 10 | solvers: 11 | - http01: 12 | ingress: 13 | ingressClassName: nginx 14 | -------------------------------------------------------------------------------- /terraform/templates/manifests/crossplane-aws-controller-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pkg.crossplane.io/v1alpha1 2 | kind: ControllerConfig 3 | metadata: 4 | name: provider-aws-config 5 | annotations: 6 | eks.amazonaws.com/role-arn: ${ROLE_ARN} 7 | spec: 8 | podSecurityContext: 9 | fsGroup: 2000 10 | args: 11 | - --debug 12 | - --enable-management-policies 13 | -------------------------------------------------------------------------------- /terraform/templates/manifests/ingress-argo-workflows.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: argo-workflows 5 | namespace: argo 6 | annotations: 7 | cert-manager.io/cluster-issuer: 'letsencrypt-prod' 8 | spec: 9 | ingressClassName: nginx 10 | tls: 11 | - hosts: 12 | - ${ARGO_WORKFLOWS_DOMAIN_NAME} 13 | secretName: argo-workflows-prod-tls 14 | rules: 15 | - host: ${ARGO_WORKFLOWS_DOMAIN_NAME} 16 | http: 17 | paths: 18 | - path: / 19 | pathType: Prefix 20 | backend: 21 | service: 22 | name: argo-workflows-server 23 | port: 24 | number: 2746 25 | -------------------------------------------------------------------------------- /terraform/templates/manifests/ingress-argocd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: argocd-server-ingress 5 | namespace: argocd 6 | annotations: 7 | cert-manager.io/cluster-issuer: 'letsencrypt-prod' 8 | spec: 9 | ingressClassName: nginx 10 | tls: 11 | - hosts: 12 | - ${ARGOCD_DOMAIN_NAME} 13 | secretName: argocd-prod-tls 14 | rules: 15 | - host: ${ARGOCD_DOMAIN_NAME} 16 | http: 17 | paths: 18 | - path: / 19 | pathType: Prefix 20 | backend: 21 | service: 22 | name: argocd-server 23 | port: 24 | name: https 25 | -------------------------------------------------------------------------------- /terraform/templates/manifests/ingress-backstage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: backstage 5 | namespace: backstage 6 | annotations: 7 | cert-manager.io/cluster-issuer: 'letsencrypt-prod' 8 | spec: 9 | ingressClassName: nginx 10 | tls: 11 | - hosts: 12 | - ${BACKSTAGE_DOMAIN_NAME} 13 | secretName: backstage-prod-tls 14 | rules: 15 | - host: ${BACKSTAGE_DOMAIN_NAME} 16 | http: 17 | paths: 18 | - path: / 19 | pathType: Prefix 20 | backend: 21 | service: 22 | name: backstage 23 | port: 24 | number: 7007 25 | -------------------------------------------------------------------------------- /terraform/templates/manifests/ingress-keycloak.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: keycloak 5 | namespace: keycloak 6 | annotations: 7 | cert-manager.io/cluster-issuer: 'letsencrypt-prod' 8 | spec: 9 | ingressClassName: nginx 10 | tls: 11 | - hosts: 12 | - ${KEYCLOAK_DOMAIN_NAME} 13 | secretName: keycloak-prod-tls 14 | rules: 15 | - host: ${KEYCLOAK_DOMAIN_NAME} 16 | http: 17 | paths: 18 | - path: /realms/master 19 | pathType: Prefix 20 | backend: 21 | service: 22 | name: keycloak 23 | port: 24 | number: 8081 25 | - path: / 26 | pathType: Exact 27 | backend: 28 | service: 29 | name: keycloak 30 | port: 31 | number: 8081 32 | - path: /realms 33 | pathType: Prefix 34 | backend: 35 | service: 36 | name: keycloak 37 | port: 38 | number: 8080 39 | - path: /resources 40 | pathType: Prefix 41 | backend: 42 | service: 43 | name: keycloak 44 | port: 45 | number: 8080 46 | -------------------------------------------------------------------------------- /terraform/templates/manifests/keycloak-secret-store.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1beta1 2 | kind: SecretStore 3 | metadata: 4 | name: keycloak 5 | namespace: keycloak 6 | spec: 7 | provider: 8 | aws: 9 | service: SecretsManager 10 | region: ${REGION} 11 | auth: 12 | jwt: 13 | serviceAccountRef: 14 | name: external-secret-keycloak 15 | -------------------------------------------------------------------------------- /terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "repo_url" { 2 | description = "Repository URL where application definitions are stored" 3 | default = "https://github.com/manabuOrg/ref-impl" 4 | type = string 5 | } 6 | 7 | variable "tags" { 8 | description = "Tags to apply to AWS resources" 9 | default = { 10 | env = "dev" 11 | project = "cnoe" 12 | } 13 | type = map(string) 14 | } 15 | 16 | variable "region" { 17 | description = "Region" 18 | type = string 19 | default = "us-west-2" 20 | } 21 | 22 | variable "cluster_name" { 23 | description = "EKS Cluster name" 24 | default = "cnoe-ref-impl" 25 | type = string 26 | } 27 | 28 | variable "hosted_zone_id" { 29 | description = "If using external DNS, specify the Route53 hosted zone ID. Required if enable_dns_management is set to true." 30 | default = "Z0202147IFM0KVTW2P35" 31 | type = string 32 | } 33 | 34 | variable "domain_name" { 35 | description = "if external DNS is not used, this value must be provided." 36 | default = "svc.cluster.local" 37 | type = string 38 | } 39 | 40 | variable "organization_url" { 41 | description = "github organization url" 42 | default = "https://github.com/cnoe-io" 43 | type = string 44 | } 45 | 46 | variable "enable_dns_management" { 47 | description = "Do you want to use external dns to manage dns records in Route53?" 48 | default = true 49 | type = bool 50 | } 51 | 52 | variable "enable_external_secret" { 53 | description = "Do you want to use external secret to manage dns records in Route53?" 54 | default = true 55 | type = bool 56 | } 57 | -------------------------------------------------------------------------------- /terraform/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.5.5" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.17" 8 | } 9 | kubernetes = { 10 | source = "hashicorp/kubernetes" 11 | version = ">= 2.23" 12 | } 13 | random = { 14 | source = "hashicorp/random" 15 | version = ">= 3.5.1" 16 | } 17 | kubectl = { 18 | source = "alekc/kubectl" 19 | version = ">= 2.0.0" 20 | } 21 | } 22 | } 23 | --------------------------------------------------------------------------------