├── .gitignore ├── COPYRIGHT.md ├── LICENSE.md ├── README.md ├── ansible.cfg ├── collections └── requirements.yml ├── config_vars.sh ├── default.config.yml ├── docs ├── aks │ ├── README.md │ ├── aks.custom.config.yml │ ├── aks.inventory │ └── images │ │ ├── azure_login_step_1.jpg │ │ ├── azure_login_step_2.png │ │ ├── azure_login_step_3.png │ │ ├── azure_login_step_4.png │ │ ├── azure_login_step_5.png │ │ ├── azure_login_step_6.png │ │ └── azure_login_step_7.png ├── dkp │ ├── README.md │ └── dkp.default.config.yml ├── eks │ ├── README.md │ ├── eks.custom.config.yml │ ├── eks.inventory │ └── images │ │ └── aws_login.png ├── gke │ ├── README.md │ ├── gke.custom.config.yml │ ├── gke.inventory │ └── images │ │ ├── gcloud_cli_signin.jpg │ │ ├── hosted_zone.jpg │ │ └── records.jpg ├── k3s │ ├── README.md │ ├── k3s.default.config.yml │ ├── k3s.inventory │ └── k3s.offline.default.config.yml └── rke2 │ ├── README.md │ ├── deploy-rke2-cluster │ └── deploy-rke2-cluster.yaml │ ├── rke2.default.config.yml │ └── rke2.inventory ├── inventory ├── playbooks ├── apply_cloud_permissions.yml ├── assertions.yml ├── backup.yml ├── create_bundle.yml ├── group_vars │ └── all.yml ├── install_ascender.yml ├── install_ledger.yml ├── install_react.yml ├── kubernetes_setup.yml ├── restore.yml ├── roles │ ├── apply_permissions │ │ ├── tasks │ │ │ └── apply_permissions_eks.yml │ │ └── templates │ │ │ └── eks │ │ │ └── iam_policies │ │ │ ├── ascenderinstallpermissions.json │ │ │ ├── ascenderinstallpermissions_all.json │ │ │ ├── eksallaccess.json │ │ │ └── iamlimitedaccess.json │ ├── ascender_backup │ │ ├── tasks │ │ │ └── ascender_backup.yml │ │ └── templates │ │ │ └── ascender-backup.yml │ ├── ascender_install │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── ascender_install_aks.yml │ │ │ ├── ascender_install_dkp.yml │ │ │ ├── ascender_install_eks.yml │ │ │ ├── ascender_install_gke.yml │ │ │ ├── ascender_install_k3s.yml │ │ │ └── ascender_install_rke2.yml │ │ └── templates │ │ │ ├── ascender-deployment │ │ │ ├── additional-spec.yml │ │ │ ├── ascender-deployment-aks.yml │ │ │ ├── ascender-deployment-dkp.yml │ │ │ ├── ascender-deployment-eks.yml │ │ │ ├── ascender-deployment-gke.yml │ │ │ ├── ascender-deployment-k3s.yml │ │ │ └── ascender-deployment-rke2.yml │ │ │ ├── awx-operator │ │ │ └── kustomization.j2 │ │ │ └── storage-classes │ │ │ ├── aks-sc.yml │ │ │ ├── eks-sc.yml │ │ │ └── gke-sc.yml │ ├── ascender_react │ │ ├── tasks │ │ │ ├── react_install_aks.yml │ │ │ ├── react_install_eks.yml │ │ │ ├── react_install_gke.yml │ │ │ └── react_install_k3s.yml │ │ └── templates │ │ │ ├── eda-operator │ │ │ └── kustomization.j2 │ │ │ └── react-deployment │ │ │ ├── react-deployment-aks.yml │ │ │ ├── react-deployment-eks.yml │ │ │ ├── react-deployment-gke.yml │ │ │ └── react-deployment-k3s.yml │ ├── ascender_restore │ │ ├── tasks │ │ │ └── ascender_restore.yml │ │ └── templates │ │ │ └── ascender-restore.yml │ ├── common │ │ ├── tasks │ │ │ ├── aks_packages.yml │ │ │ ├── eks_packages.yml │ │ │ ├── gke_packages.yml │ │ │ └── main.yml │ │ └── vars │ │ │ └── main.yml │ ├── k8s_setup │ │ ├── files │ │ │ ├── aks_deploy │ │ │ │ ├── main.tf │ │ │ │ ├── providers.tf │ │ │ │ └── vars.tf │ │ │ ├── eks_deploy │ │ │ │ ├── main.tf │ │ │ │ ├── providers.tf │ │ │ │ └── vars.tf │ │ │ └── gke_deploy │ │ │ │ ├── main.tf │ │ │ │ ├── providers.tf │ │ │ │ └── vars.tf │ │ ├── tasks │ │ │ ├── k8s_setup_aks.yml │ │ │ ├── k8s_setup_dkp.yml │ │ │ ├── k8s_setup_eks.yml │ │ │ ├── k8s_setup_gke.yml │ │ │ ├── k8s_setup_k3s.yml │ │ │ └── k8s_setup_rke2.yml │ │ └── templates │ │ │ └── eks │ │ │ ├── cert-manager.yml │ │ │ ├── ebs-scsi-driver-policy.json │ │ │ ├── ebs-scsi-driver-role.json │ │ │ ├── eks-cluster-manifest.yml │ │ │ ├── iam-policy.json │ │ │ ├── ingress-class-params.yml │ │ │ └── ingress-controller.yml │ ├── ledger_install │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── ledger_install_aks.yml │ │ │ ├── ledger_install_dkp.yml │ │ │ ├── ledger_install_eks.yml │ │ │ ├── ledger_install_gke.yml │ │ │ ├── ledger_install_k3s.yml │ │ │ └── ledger_install_rke2.yml │ │ └── templates │ │ │ ├── ledger_deployment_aks.yaml │ │ │ ├── ledger_deployment_dkp.yaml │ │ │ ├── ledger_deployment_eks.yaml │ │ │ ├── ledger_deployment_gke.yaml │ │ │ ├── ledger_deployment_k3s.yaml │ │ │ ├── ledger_deployment_registry_secret.yaml │ │ │ └── ledger_deployment_rke2.yaml │ └── setup_playbooks │ │ ├── tasks │ │ ├── ascender_credentials_aks.yml │ │ ├── ascender_credentials_dkp.yml │ │ ├── ascender_credentials_eks.yml │ │ ├── ascender_credentials_gke.yml │ │ ├── ascender_credentials_k3s.yml │ │ ├── ascender_credentials_rke2.yml │ │ ├── inventories.yml │ │ ├── main.yml │ │ ├── projects.yml │ │ ├── surveys │ │ │ ├── patching.json │ │ │ └── selinux.json │ │ ├── templates.yml │ │ └── workflow_templates.yml │ │ └── vars │ │ └── main.yml ├── setup.yml └── setup_playbooks.yml └── setup.sh /.gitignore: -------------------------------------------------------------------------------- 1 | collections/ 2 | ascender-install-instructions/ 3 | inventory 4 | inventory.yml 5 | custom.config.yml 6 | offline/ 7 | .vscode/* 8 | ascender_install_artifacts/* 9 | -------------------------------------------------------------------------------- /COPYRIGHT.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023, Ctrl IQ, Inc. All rights reserved. 2 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | nocows = True 3 | roles_path = ./roles:/etc/ansible/roles 4 | collections_path = ./collections:/etc/ansible/collections 5 | inventory = inventory 6 | become = false 7 | host_key_checking = false -------------------------------------------------------------------------------- /collections/requirements.yml: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: amazon.aws 3 | version: ">=6.5.0" 4 | - name: azure.azcollection 5 | version: ">=2.4.0" 6 | - name: google.cloud 7 | version: ">=1.3.0" 8 | - name: ansible.posix 9 | version: ">=1.5.4" 10 | - name: awx.awx 11 | version: ">=22.3.0" 12 | - name: community.docker 13 | version: ">=3.7.0" 14 | - name: community.general 15 | version: ">=8.3.0" 16 | - name: kubernetes.core 17 | version: ">=2.4.0" -------------------------------------------------------------------------------- /docs/aks/aks.custom.config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This variable specificies which Kubernetes platform Ascender and its components will be installed on. 3 | k8s_platform: aks 4 | # Boolean indicating whether or not the kubeconfig file needs to be downloaded to the Ansible controller 5 | download_kubeconfig: true 6 | # Determines whether to use HTTP or HTTPS for Ascender and Ledger. 7 | # If set to https, you MUST provide certificate/key options for the Installer to use. 8 | k8s_lb_protocol: https 9 | # Determines whether to use Azure DNS Domain Management (which is automated) 10 | # Or a third-party service (e.g., Cloudflare, GoDaddy, etc.) 11 | # If this value is set to false, you will have to manually set an A record for 12 | # {{ASCENDER_HOSTNAME }} and {{ LEDGER_HOSTNAME }} to point to the Azure 13 | # Loadbalancers 14 | USE_AZURE_DNS: true 15 | # The name of the eks cluster to install Ascender on - if it does not already exist, the installer can set it up 16 | AKS_CLUSTER_NAME: ascender-aks-cluster 17 | # Specifies whether the AKS cluster needs to be provisioned (provision), exists but needs to be configured to support Ascender (configure), or exists and needs nothing done before installing Ascender (no_action) 18 | AKS_CLUSTER_STATUS: provision 19 | # The Azure region hosting the aks cluster 20 | AKS_CLUSTER_REGION: eastus 21 | # The kubernetes version for the aks cluster; available kubernetes versions can be found here: 22 | AKS_K8S_VERSION: "1.29" 23 | # The aks worker node instance types 24 | AKS_INSTANCE_TYPE: "Standard_D2_v2" 25 | # The desired number of aks worker nodes 26 | AKS_NUM_WORKER_NODES: 3 27 | # The volume size of aks worker nodes in GB 28 | AKS_WORKER_VOLUME_SIZE: 100 29 | # TLS Certificate file location on the local installing machine 30 | tls_crt_path: "/home/rocky/ascender.crt" 31 | # TLS Private Key file location on the local installing machine 32 | tls_key_path: "/home/rocky/ascender.key" 33 | # CA Bundle that contains both your CA Cert and other external CA bundles 34 | # Such as the ones located at /etc/ssl/certs/ca-bundle.crt 35 | # To create: "cat /etc/ssl/certs/ca-bundle.crt ~/myca.crt > ~/my-ca-bundle.crt" 36 | #custom_cacert_bundle: "~/my-ca-bundle.crt" 37 | # LDAP CA Cert 38 | #custom_ldap_cacert: "~/my-ldap-ca.crt" 39 | # A directory in which to place both temporary artifacts 40 | # and timestamped Kubernetes Manifests to make Ascender/Ledger easy 41 | # to uninstall 42 | tmp_dir: "{{ playbook_dir}}/../ascender_install_artifacts" 43 | # DNS resolvable hostname for Ascender service. This is required for install. 44 | ASCENDER_HOSTNAME: ascender.example.com 45 | # DNS domain for Ascender service. This is required when hosting on cloud services. 46 | ASCENDER_DOMAIN: example.com 47 | # Namespace for Ascender Kubernetes objects 48 | ASCENDER_NAMESPACE: ascender 49 | # Administrator username for Ascender 50 | ASCENDER_ADMIN_USER: admin 51 | # Administrator password for Ascender 52 | ASCENDER_ADMIN_PASSWORD: "myadminpassword" 53 | # The image tag indicating the version of Ascender you wish to install 54 | ASCENDER_VERSION: 25.0.0 55 | # The version of the AWX Operator used to install Ascender and its components 56 | ANSIBLE_OPERATOR_VERSION: 2.19.1 57 | # Determines whether to keep the secrets required to encrypt within Ascender (important when backing up) 58 | ascender_garbage_collect_secrets: false 59 | # External PostgreSQL database name used for Ascender (this DB must exist) 60 | ascender_replicas: 1 61 | # Determines whether or not Ledger will be installed 62 | LEDGER_INSTALL: true 63 | # DNS resolvable hostname for Ledger service. This is required for install 64 | LEDGER_HOSTNAME: ledger.example.com 65 | # Number of replicas for the Ledger web container 66 | ledger_web_replicas: 1 67 | # Number of replicas for the Ledger Parser container 68 | ledger_parser_replicas: 1 69 | # The image tag indicating the version of Ledger you wish to install 70 | LEDGER_VERSION: latest 71 | # The Kubernetes namespace in which Ledger objects will live 72 | LEDGER_NAMESPACE: ledger 73 | # Admin password for Ledger (the username is admin by default) 74 | LEDGER_ADMIN_PASSWORD: myadminpassword 75 | # Password for Ledger database 76 | LEDGER_DB_PASSWORD: mydbpassword 77 | -------------------------------------------------------------------------------- /docs/aks/aks.inventory: -------------------------------------------------------------------------------- 1 | [localhost] 2 | localhost ansible_host=localhost ansible_connection=local ansible_user=rocky 3 | 4 | [ascender] 5 | ascender_host ansible_host=localhost ansible_connection=local ansible_user=rocky -------------------------------------------------------------------------------- /docs/aks/images/azure_login_step_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ctrliq/ascender-install/34827417c6055fd5494b6d981fc6fe130bfad653/docs/aks/images/azure_login_step_1.jpg -------------------------------------------------------------------------------- /docs/aks/images/azure_login_step_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ctrliq/ascender-install/34827417c6055fd5494b6d981fc6fe130bfad653/docs/aks/images/azure_login_step_2.png -------------------------------------------------------------------------------- /docs/aks/images/azure_login_step_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ctrliq/ascender-install/34827417c6055fd5494b6d981fc6fe130bfad653/docs/aks/images/azure_login_step_3.png -------------------------------------------------------------------------------- /docs/aks/images/azure_login_step_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ctrliq/ascender-install/34827417c6055fd5494b6d981fc6fe130bfad653/docs/aks/images/azure_login_step_4.png -------------------------------------------------------------------------------- /docs/aks/images/azure_login_step_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ctrliq/ascender-install/34827417c6055fd5494b6d981fc6fe130bfad653/docs/aks/images/azure_login_step_5.png -------------------------------------------------------------------------------- /docs/aks/images/azure_login_step_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ctrliq/ascender-install/34827417c6055fd5494b6d981fc6fe130bfad653/docs/aks/images/azure_login_step_6.png -------------------------------------------------------------------------------- /docs/aks/images/azure_login_step_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ctrliq/ascender-install/34827417c6055fd5494b6d981fc6fe130bfad653/docs/aks/images/azure_login_step_7.png -------------------------------------------------------------------------------- /docs/dkp/README.md: -------------------------------------------------------------------------------- 1 | The Ascender installer is a script that makes for relatively easy 2 | install of Ascender Automation Platform on Kubernetes platforms of 3 | multiple flavors. The installer is being expanded to new Kubernetes 4 | platforms as users/contributors allow, and if you have specific needs 5 | for a platform not yet supported, please submit an issue to this 6 | Github repository. 7 | 8 | ## Table of Contents 9 | 10 | - [General Prerequisites](#general-prerequisites) 11 | - [DKP-specific Install Notes](#dkp-specific-install-notes) 12 | - [DKP-specific Prerequisites](#dkp-specific-prerequisites) 13 | - [Ascender Install Instructions](#ascender-install-instructions) 14 | 15 | ## General Prerequisites 16 | 17 | If you have not done so already, be sure to follow the general 18 | prerequisites found in the [Ascender-Install main 19 | README](../../README.md#general-prerequisites) 20 | 21 | ## DKP-specific Install Notes 22 | 23 | - D2iQ Kubernetes Platform (hereafter referred to as DKP) is used by a broad array of government agencies, and as such, there is an installer for it here. 24 | - As DKP can be installed on a number of infrastructure/cloud platforms, this installer will provide general instructions for setting up a DKP cluster. The actual install playbooks assume an **EXISTING** DKP kubernetes cluster. 25 | - The DKP installer has been tested with the following parameters: 26 | - [DKP version 2.5](https://docs.d2iq.com/dkp/2.5/day-0-basic-installs-by-infrastructure). 27 | - Whether you are working with an existing DKP cluster, or have to create a new one, you will have to [download the DKP binary](https://docs.d2iq.com/dkp/2.5/download-dkp) for either MacOS or Linux, whichever you are managing the DKP cluster from. 28 | - [DKP Essential](https://d2iq.com/products/essential) License, for a single cluster deployment 29 | - [Traefik Labs ingress controller](https://traefik.io/solutions/kubernetes-ingress/) - this is provided by the [Kommander](https://d2iq.com/products/kommander) Management Plane as part of DKP. 30 | - vSphere 7.0.3 with appropriate [user permissions](https://docs.d2iq.com/dkp/2.5/vsphere-minimum-user-permissions) 31 | - SSL Certificate and Key 32 | - To enable HTTPS on your website, you need to provide the Ascender 33 | installer with an SSL Certificate file, and a Private Key 34 | file. While these can be self-signed certificates, it is best 35 | practice to use a trusted certificate, issued by a Certificate 36 | Authority. A good way to generate a trusted Certificate for the 37 | purpose of sandboxing, is to use the free Certificate Authority, 38 | [Let's Encrypt](https://letsencrypt.org/getting-started/). 39 | - Once you have a Certificate and Private Key file, make sure they 40 | are present on the Ascender installing server, and specify their 41 | locations in the default config file, with the variables 42 | `tls_crt_path`and `tls_key_path`, respectively. The installer will 43 | parse these files for their content, and use the content to create 44 | a Kubernetes TLS Secret for HTTPS enablement. 45 | 46 | ## DKP-specific Prerequisites 47 | 48 | ### If there is no existing DKP Cluster 49 | 50 | Keep in mind that these intructions, while some general, will help primarily with the setup described in the previous section, [DKP-specific Install Notes](#dkp-specific-install-notes). 51 | 52 | - Installation instructions for DKP, for every platfom it runs upon, can be found [here](https://docs.d2iq.com/dkp/2.5/day-0-basic-installs-by-infrastructure). 53 | - Make sure you have appropriate permissions for whatever platform. For example, for AWS, you'll need proper IAM Roles/Policies. 54 | - Create a DKP Cluster API compliant image for your DKP cluster nodes: 55 | - Depending on where you are deploying DKP, this may be done via the [Konvoy Image Builder](https://docs.d2iq.com/dkp/2.5/konvoy-image-builder), or an optimized image provided by D2IQ on the cloud of your choice. 56 | - Create Bootstrap cluster (required for vSphere, GCP, Azure, and Pre-provisioned deployments) 57 | - To create Kubernetes clusters, Konvoy uses Cluster API (CAPI) controllers, which run on a Kubernetes cluster. To get started creating your vSphere cluster, you need a bootstrap cluster. 58 | - [vSphere Bootstrap cluster creation](https://docs.d2iq.com/dkp/2.5/vsphere-bootstrap) 59 | - NOTE: While the instructions result in a KUBECONFIG file being place at `$HOME/.kube/config`, it would be useful to create another file called `$HOME/.kube/config_bootstrap` and place its contents there as well. The DKP cluster will generate its own KUBECONFIG file, and you can change the contents of `$HOME/.kube/config` to whichever cluster you wish to connect to. 60 | - Create DKP cluster 61 | - [vSphere DKP cluster creation](https://docs.d2iq.com/dkp/2.5/create-new-vsphere-cluster) 62 | - For on-premise deploymebConfigure MetalLB Loadbalancer address pool 63 | - [vSphere MetalLB Configuration](https://docs.d2iq.com/dkp/2.5/configure-metallb-for-a-vsphere-managed-cluster) 64 | - Set up Traefik Ingress Controller 65 | - [vSphere Kommander Install Instructions](https://docs.d2iq.com/dkp/2.5/vsphere-install-kommander) 66 | - [Kommander Install Customizations](https://docs.d2iq.com/dkp/2.5/dkp-install-configuration) - specifies how to select Traefik to install 67 | - [DKP 2.5 Components and Applications](https://docs.d2iq.com/dkp/2.5/dkp-2-5-0-components-and-applications) 68 | 69 | 70 | ## Ascender Install Instructions 71 | 72 | ### Ensure KUBECONFIG file is present 73 | 74 | You MUST ensure that the KUBECONFIG file for the DKP cluster is present on the same machine as the Ascender install script, located at `$HOME/.kube/config`. 75 | 76 | ### Set the configuration variables for a DKP Install 77 | 78 | You can use the dkp.default.config.yml in this directory as a DKP reference, but 79 | the file used by the script must be located at the top level directory, with the filename `custom.config.yml`. 80 | 81 | ### Run the setup script 82 | 83 | Run `./setup.sh` from top level directory in this repository. 84 | 85 | The setup must run as root, so you may need to utilize `sudo` to 86 | execute it. Example: `sudo ./setup.sh` 87 | -------------------------------------------------------------------------------- /docs/dkp/dkp.default.config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ---Kubernetes-specific variables--- 3 | 4 | # This variable specificies which Kubernetes platform Ascender and its components will be installed on. 5 | k8s_platform: dkp # Options include k3s and dkp, with more to come. 6 | 7 | # The name of the dkp cluster you wish to deploy Ascender to and/or create 8 | # See a list of all dkp clusters with the command 9 | # # kubectl get clusters -A 10 | # NOTE: This must be done while pointing to the DKP Bootstrap Cluster 11 | DKP_CLUSTER_NAME: dkp-cluster 12 | 13 | # Determines whether to use HTTP or HTTPS for Ascender and Ledger. 14 | # If set to https, you MUST provide certificate/key options for the Installer to use. 15 | k8s_lb_protocol: http #options include http and https 16 | 17 | # ---Local artifact variables--- 18 | 19 | # TLS Certificate file, required when deploying HTTPS in K3s 20 | tls_crt_path: "~/ascender.crt" 21 | 22 | # TLS Private Key file, required when deploying HTTPS in K3s 23 | tls_key_path: "~/ascender.key" 24 | 25 | # Set to false if using an external DNS server for resolution 26 | # Set to true if not 27 | use_etc_hosts: true 28 | 29 | # A directory in which to place both temporary artifacts 30 | # and timestamped Kubernetes Manifests to make Ascender/Ledger easy 31 | # to uninstall 32 | tmp_dir: "{{ playbook_dir }}/../ascender_install_artifacts" 33 | 34 | # ---Ascender install variables--- 35 | 36 | # DNS resolvable hostname for Ascender service. This is required for install. 37 | ASCENDER_HOSTNAME: ascender.example.com 38 | 39 | # k8s namespace for Ascender k8s objects 40 | ASCENDER_NAMESPACE: ascender 41 | 42 | # Administrator username for Ascender 43 | ASCENDER_ADMIN_USER: admin 44 | 45 | # Administrator password for Ascender 46 | # NOTE: Do NOT use the character `!` in the password, of the Ascender install will fail. 47 | ASCENDER_ADMIN_PASSWORD: "myadminpassword" 48 | 49 | # The OCI container image for Ascender 50 | ASCENDER_IMAGE: ghcr.io/ctrliq/ascender 51 | 52 | # The image tag indicating the version of Ascender you wish to install 53 | ASCENDER_VERSION: 25.0.0 54 | 55 | # The version of the AWX Operator used to install Ascender and its components 56 | ANSIBLE_OPERATOR_VERSION: 2.5.2 57 | 58 | # Determines whether to keep the secrets required to encrypt within Ascender (important when backing up) 59 | ascender_garbage_collect_secrets: true 60 | 61 | # # External PostgreSQL ip or url resolvable by the cluster 62 | # ASCENDER_PGSQL_HOST: "ascenderpghost.example.com" 63 | 64 | # # External PostgreSQL port, this usually defaults to 5432 65 | # ASCENDER_PGSQL_PORT: 5432 66 | 67 | # # External PostgreSQL username 68 | # ASCENDER_PGSQL_USER: ascender 69 | 70 | # # External PostgreSQL password 71 | # ASCENDER_PGSQL_PWD: mypgadminpassword 72 | 73 | # # External PostgreSQL database name used for Ascender (this DB must exist) 74 | # ASCENDER_PGSQL_DB: ascenderdb 75 | 76 | 77 | ### All of these options are unnecessary to change, but will allow you to tweak your Ascender deployment if you choose to change them 78 | ascender_replicas: 1 79 | ascender_image_pull_policy: Always 80 | 81 | 82 | # ---Ledger install variables--- 83 | 84 | # Determines whether or not Ledger will be installed 85 | LEDGER_INSTALL: true 86 | 87 | # DNS resolvable hostname for Ledger service. This is required for install. 88 | LEDGER_HOSTNAME: ledger.example.com 89 | 90 | # The OCI container image for Ledger 91 | LEDGER_WEB_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-web 92 | 93 | # The number of ledger web pods - this is good to ensure high availability 94 | ledger_web_replicas: 1 95 | 96 | # The OCI container image for the Ledger Parser 97 | LEDGER_PARSER_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-parser 98 | 99 | # The number of ledger parser pods - this is good to ensure high availability 100 | ledger_parser_replicas: 1 101 | 102 | # The OCI container image for the Ledger Database 103 | LEDGER_DB_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-db 104 | 105 | # The image tag indicating the version of Ledger you wish to install 106 | LEDGER_VERSION: latest 107 | 108 | # The Kubernetes namespace in which Ledger objects will live 109 | LEDGER_NAMESPACE: ledger 110 | 111 | # Admin password for Ledger (the username is admin by default) 112 | LEDGER_ADMIN_PASSWORD: "myadminpassword" 113 | 114 | # Password for Ledger database 115 | LEDGER_DB_PASSWORD: "mydbpassword" 116 | 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /docs/eks/eks.inventory: -------------------------------------------------------------------------------- 1 | [localhost] 2 | localhost ansible_host=localhost ansible_connection=local ansible_user=rocky 3 | 4 | [ascender] 5 | ascender_host ansible_host=localhost ansible_connection=local ansible_user=rocky -------------------------------------------------------------------------------- /docs/eks/images/aws_login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ctrliq/ascender-install/34827417c6055fd5494b6d981fc6fe130bfad653/docs/eks/images/aws_login.png -------------------------------------------------------------------------------- /docs/gke/gke.custom.config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This variable specificies which Kubernetes platform Ascender and its components will be installed on. 3 | k8s_platform: gke 4 | # Boolean indicating whether or not the kubeconfig file needs to be downloaded to the Ansible controller 5 | download_kubeconfig: true 6 | # Determines whether to use HTTP or HTTPS for Ascender and Ledger. 7 | # If set to https, you MUST provide certificate/key options for the Installer to use. 8 | k8s_lb_protocol: http 9 | # Determines whether to use Google Cloud DNS Domain Management (which is automated) 10 | # Or a third-party service (e.g., Cloudflare, GoDaddy, etc.) 11 | # If this value is set to false, you will have to manually set an A record for 12 | # {{ASCENDER_HOSTNAME }} and {{ LEDGER_HOSTNAME }} to point to the Google Cloud 13 | # Loadbalancers 14 | USE_GOOGLE_DNS: true 15 | # The name of the gke cluster to install Ascender on - if it does not already exist, the installer can set it up 16 | GKE_CLUSTER_NAME: ascender-gke-cluster 17 | # Specifies whether the GKE cluster needs to be provisioned (provision), exists but needs to be configured to support Ascender (configure), or exists and needs nothing done before installing Ascender (no_action) 18 | GKE_CLUSTER_STATUS: provision 19 | # The Google Cloud zone hosting the gke cluster 20 | GKE_CLUSTER_ZONE: 21 | # The kubernetes version for the gke cluster 22 | GKE_K8S_VERSION: "1.29.4-gke.1043002" 23 | # The gke worker node instance types 24 | GKE_INSTANCE_TYPE: "e2-medium" 25 | # The desired number of gke worker nodes 26 | GKE_NUM_WORKER_NODES: 3 27 | # The volume size of gke worker nodes in GB 28 | GKE_WORKER_VOLUME_SIZE: 100 29 | # TLS Certificate file location on the local installing machine 30 | tls_crt_path: "/home/rocky/ascender.crt" 31 | # TLS Private Key file location on the local installing machine 32 | tls_key_path: "/home/rocky/ascender.key" 33 | # A directory in which to place both temporary artifacts 34 | # and timestamped Kubernetes Manifests to make Ascender/Ledger easy 35 | # to uninstall 36 | tmp_dir: "{{ playbook_dir}}/../ascender_install_artifacts" 37 | # DNS resolvable hostname for Ascender service. This is required for install. 38 | ASCENDER_HOSTNAME: ascender.example.com 39 | # DNS domain for Ascender service. This is required when hosting on cloud services. 40 | ASCENDER_DOMAIN: example.com 41 | # In Google Cloud DNS the name of an existing hosted DNS zone for your DNS record. 42 | GOOGLE_DNS_MANAGED_ZONE: "example-com" 43 | # Namespace for Ascender Kubernetes objects 44 | ASCENDER_NAMESPACE: ascender 45 | # Administrator username for Ascender 46 | ASCENDER_ADMIN_USER: admin 47 | # Administrator password for Ascender 48 | ASCENDER_ADMIN_PASSWORD: "myadminpassword" 49 | # The image tag indicating the version of Ascender you wish to install 50 | ASCENDER_VERSION: 25.0.0 51 | # The version of the AWX Operator used to install Ascender and its components 52 | ANSIBLE_OPERATOR_VERSION: 2.19.0 53 | # Determines whether to keep the secrets required to encrypt within Ascender (important when backing up) 54 | ascender_garbage_collect_secrets: false 55 | # External PostgreSQL database name used for Ascender (this DB must exist) 56 | ascender_replicas: 1 57 | # Boolean indicating whether to add standard playbooks into Ascender after installation 58 | ascender_setup_playbooks: false 59 | # Determines whether or not Ledger will be installed 60 | LEDGER_INSTALL: true 61 | # DNS resolvable hostname for Ledger service. This is required for install 62 | LEDGER_HOSTNAME: ledger.example.com 63 | # Number of replicas for the Ledger web container 64 | ledger_web_replicas: 1 65 | # Number of replicas for the Ledger Parser container 66 | ledger_parser_replicas: 1 67 | # The image tag indicating the version of Ledger you wish to install 68 | LEDGER_VERSION: latest 69 | # The Kubernetes namespace in which Ledger objects will live 70 | LEDGER_NAMESPACE: ledger 71 | # Admin password for Ledger (the username is admin by default) 72 | LEDGER_ADMIN_PASSWORD: myadminpassword 73 | # Password for Ledger database 74 | LEDGER_DB_PASSWORD: mydbpassword 75 | -------------------------------------------------------------------------------- /docs/gke/gke.inventory: -------------------------------------------------------------------------------- 1 | [localhost] 2 | localhost ansible_host=localhost ansible_connection=local ansible_user=rocky 3 | 4 | [ascender] 5 | ascender_host ansible_host=localhost ansible_connection=local ansible_user=rocky -------------------------------------------------------------------------------- /docs/gke/images/gcloud_cli_signin.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ctrliq/ascender-install/34827417c6055fd5494b6d981fc6fe130bfad653/docs/gke/images/gcloud_cli_signin.jpg -------------------------------------------------------------------------------- /docs/gke/images/hosted_zone.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ctrliq/ascender-install/34827417c6055fd5494b6d981fc6fe130bfad653/docs/gke/images/hosted_zone.jpg -------------------------------------------------------------------------------- /docs/gke/images/records.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ctrliq/ascender-install/34827417c6055fd5494b6d981fc6fe130bfad653/docs/gke/images/records.jpg -------------------------------------------------------------------------------- /docs/k3s/k3s.default.config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ---Kubernetes-specific variables--- 3 | 4 | # This variable specificies which Kubernetes platform Ascender and its components will be installed on. 5 | k8s_platform: k3s # Options include k3s, eks and dkp, with more to come. 6 | 7 | # Determines whether to use HTTP or HTTPS for Ascender and Ledger. 8 | # If set to https, you MUST provide certificate/key options for the Installer to use. 9 | k8s_lb_protocol: http #options include http and https 10 | 11 | # Routable IP address for the K8s API Server 12 | # (This could be a Load Balancer if using 3 K8s control nodes) 13 | kubeapi_server_ip: "127.0.0.1" 14 | 15 | # This value being set to "true" means that some work needs to be done to set up a 16 | # cluster before proceeding. Here is ther behavior for different values of k8s_platforms: 17 | # k3s: A single-node k3s cluster will be set up on the inventory server 18 | # named "ascender_host" 19 | # eks: N/A, as this is handled by the EKS_CLUSTER_STATUS variable 20 | # rke2: N/A, as you must use Labyrinth Labs' Ansible role to set up a fresh kubernetes cluster 21 | kube_install: true 22 | 23 | # Offline Install - Whether to use local assets to complete the install 24 | k8s_offline: false 25 | 26 | # Specify an INTERNAL container registry and namespace where the k8s cluster can access Ascender images 27 | # k8s_container_registry: "" 28 | 29 | # Kubernetes secret containing the login credentials required for the INTERNAL registry holding the ASCENDER images 30 | #LEAVE AS NONE if no such secret is required 31 | # k8s_image_pull_secret: None 32 | 33 | # Kubernetes secret containing the login credentials required for the INTERNAL registry holding the EXECUTION ENVIRONMENT images 34 | #LEAVE AS NONE if no such secret is required 35 | # k8s_ee_pull_credentials_secret: None 36 | 37 | # Indictates whether or not the kubeconfig file needs to be downloaded to the Ansible controller 38 | download_kubeconfig: true 39 | 40 | # ---k3s variables--- 41 | 42 | # IP address for the K3s Master/Worker node 43 | # Required for local DNS and k3s install 44 | # This IP Address must be close reachable by the server from which this installer is running 45 | k3s_master_node_ip: "127.0.0.1" 46 | # ---Local artifact variables--- 47 | 48 | # TLS Certificate file, required when deploying HTTPS in K3s 49 | tls_crt_path: "~/ascender.crt" 50 | 51 | # TLS Private Key file, required when deploying HTTPS in K3s 52 | tls_key_path: "~/ascender.key" 53 | 54 | # CA Bundle that contains both your CA Cert and other external CA bundles 55 | # Such as the ones located at /etc/ssl/certs/ca-bundle.crt 56 | # To create: "cat /etc/ssl/certs/ca-bundle.crt ~/myca.crt > ~/my-ca-bundle.crt" 57 | #custom_cacert_bundle: "~/my-ca-bundle.crt" 58 | 59 | # LDAP CA Cert 60 | #custom_ldap_cacert: "~/my-ldap-ca.crt" 61 | 62 | # Set to false if using an external DNS server for resolution 63 | # Set to true if not 64 | use_etc_hosts: true 65 | 66 | # A directory in which to place both temporary artifacts 67 | # and timestamped Kubernetes Manifests to make Ascender/Ledger easy 68 | # to uninstall 69 | tmp_dir: "{{ playbook_dir }}/../ascender_install_artifacts" 70 | 71 | # ---Ascender install variables--- 72 | 73 | # DNS resolvable hostname for Ascender service. This is required for install. 74 | ASCENDER_HOSTNAME: ascender.example.com 75 | 76 | # The domain name for all components; required when k8s_platorm=="eks" 77 | ASCENDER_DOMAIN: example.com 78 | 79 | # k8s namespace for Ascender k8s objects 80 | ASCENDER_NAMESPACE: ascender 81 | 82 | # Administrator username for Ascender 83 | ASCENDER_ADMIN_USER: admin 84 | 85 | # Administrator password for Ascender 86 | ASCENDER_ADMIN_PASSWORD: "myadminpassword" 87 | 88 | # The OCI container image for Ascender 89 | ASCENDER_IMAGE: ghcr.io/ctrliq/ascender 90 | 91 | # The image tag indicating the version of Ascender you wish to install 92 | ASCENDER_VERSION: 25.0.0 93 | 94 | # The version of the AWX Operator used to install Ascender and its components 95 | ANSIBLE_OPERATOR_VERSION: 2.19.1 96 | 97 | # Determines whether to keep the secrets required to encrypt within Ascender (important when backing up) 98 | ascender_garbage_collect_secrets: true 99 | 100 | # Setup extra demo playbooks after installation 101 | ascender_setup_playbooks: true 102 | 103 | # # External PostgreSQL ip or url resolvable by the cluster 104 | # ASCENDER_PGSQL_HOST: "ascenderpghost.example.com" 105 | 106 | # # External PostgreSQL port, this usually defaults to 5432 107 | # ASCENDER_PGSQL_PORT: 5432 108 | 109 | # # External PostgreSQL username 110 | # ASCENDER_PGSQL_USER: ascender 111 | 112 | # # External PostgreSQL password 113 | # NOTE: Do NOT use the special characters in the postgres password (Django requirement) 114 | # ASCENDER_PGSQL_PWD: mypgadminpassword 115 | 116 | # # External PostgreSQL database name used for Ascender (this DB must exist) 117 | # ASCENDER_PGSQL_DB: ascenderdb 118 | 119 | 120 | ### All of these options are unnecessary to change, but will allow you to tweak your Ascender deployment if you choose to change them 121 | ascender_replicas: 1 122 | ascender_image_pull_policy: Always 123 | 124 | 125 | # ---Ledger install variables--- 126 | 127 | # Determines whether or not Ledger will be installed 128 | LEDGER_INSTALL: true 129 | 130 | # DNS resolvable hostname for Ledger service. This is required for install. 131 | LEDGER_HOSTNAME: ledger.example.com 132 | 133 | # The OCI container image for Ledger 134 | LEDGER_WEB_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-web 135 | 136 | # The number of ledger web pods - this is good to ensure high availability 137 | ledger_web_replicas: 1 138 | 139 | # The OCI container image for the Ledger Parser 140 | LEDGER_PARSER_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-parser 141 | 142 | # The number of ledger parser pods - this is good to ensure high availability 143 | ledger_parser_replicas: 1 144 | 145 | # The OCI container image for the Ledger Database 146 | LEDGER_DB_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-db 147 | 148 | # The image tag indicating the version of Ledger you wish to install 149 | LEDGER_VERSION: latest 150 | 151 | # The Kubernetes namespace in which Ledger objects will live 152 | LEDGER_NAMESPACE: ledger 153 | 154 | # Admin password for Ledger (the username is admin by default) 155 | LEDGER_ADMIN_PASSWORD: "myadminpassword" 156 | 157 | # Password for Ledger database 158 | LEDGER_DB_PASSWORD: "mydbpassword" 159 | 160 | 161 | 162 | 163 | -------------------------------------------------------------------------------- /docs/k3s/k3s.inventory: -------------------------------------------------------------------------------- 1 | [localhost] 2 | localhost ansible_host=localhost ansible_connection=local 3 | 4 | [ascender] 5 | # ascender_host ansible_host=x.x.x.x ansible_user=user ansible_ssh_pass=password 6 | # ascender_host ansible_host=x.x.x.x ansible_user=user ansible_ssh_private_key_file=~/.ssh/key.pem 7 | ascender_host ansible_host=localhost ansible_connection=local 8 | -------------------------------------------------------------------------------- /docs/k3s/k3s.offline.default.config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ---Kubernetes-specific variables--- 3 | 4 | # This variable specificies which Kubernetes platform Ascender and its components will be installed on. 5 | k8s_platform: k3s # Options include k3s, eks and dkp, with more to come. 6 | 7 | # Determines whether to use HTTP or HTTPS for Ascender and Ledger. 8 | # If set to https, you MUST provide certificate/key options for the Installer to use. 9 | k8s_lb_protocol: http #options include http and https 10 | 11 | # Routable IP address for the K8s API Server 12 | # (This could be a Load Balancer if using 3 K8s control nodes) 13 | kubeapi_server_ip: "127.0.0.1" 14 | 15 | # This value being set to "true" means that some work needs to be done to set up a 16 | # cluster before proceeding. Here is ther behavior for different values of k8s_platforms: 17 | # k3s: A single-node k3s cluster will be set up on the inventory server 18 | # named "ascender_host" 19 | # eks: N/A, as this is handled by the EKS_CLUSTER_STATUS variable 20 | # rke2: N/A, as you must use Labyrinth Labs' Ansible role to set up a fresh kubernetes cluster 21 | kube_install: true 22 | 23 | # Offline Install - Whether to use local assets to complete the install 24 | k8s_offline: true 25 | 26 | # Specify an INTERNAL container registry and namespace where the k8s cluster can access Ascender images 27 | # k8s_container_registry: "" 28 | 29 | # Kubernetes secret containing the login credentials required for the INTERNAL registry holding the ASCENDER images 30 | #LEAVE AS NONE if no such secret is required 31 | # k8s_image_pull_secret: None 32 | 33 | # Kubernetes secret containing the login credentials required for the INTERNAL registry holding the EXECUTION ENVIRONMENT images 34 | #LEAVE AS NONE if no such secret is required 35 | # k8s_ee_pull_credentials_secret: None 36 | 37 | # Indictates whether or not the kubeconfig file needs to be downloaded to the Ansible controller 38 | download_kubeconfig: true 39 | 40 | # ---k3s variables--- 41 | 42 | # IP address for the K3s Master/Worker node 43 | # Required for local DNS and k3s install 44 | # This IP Address must be close reachable by the server from which this installer is running 45 | k3s_master_node_ip: "127.0.0.1" 46 | # ---Local artifact variables--- 47 | 48 | # TLS Certificate file, required when deploying HTTPS in K3s 49 | tls_crt_path: "~/ascender.crt" 50 | 51 | # TLS Private Key file, required when deploying HTTPS in K3s 52 | tls_key_path: "~/ascender.key" 53 | 54 | # CA Bundle that contains both your CA Cert and other external CA bundles 55 | # Such as the ones located at /etc/ssl/certs/ca-bundle.crt 56 | # To create: "cat /etc/ssl/certs/ca-bundle.crt ~/myca.crt > ~/my-ca-bundle.crt" 57 | #custom_cacert_bundle: "~/my-ca-bundle.crt" 58 | 59 | # LDAP CA Cert 60 | #custom_ldap_cacert: "~/my-ldap-ca.crt" 61 | 62 | # Set to false if using an external DNS server for resolution 63 | # Set to true if not 64 | use_etc_hosts: true 65 | 66 | # A directory in which to place both temporary artifacts 67 | # and timestamped Kubernetes Manifests to make Ascender/Ledger easy 68 | # to uninstall 69 | tmp_dir: "{{ playbook_dir }}/../ascender_install_artifacts" 70 | 71 | # ---Ascender install variables--- 72 | 73 | # DNS resolvable hostname for Ascender service. This is required for install. 74 | ASCENDER_HOSTNAME: ascender.example.com 75 | 76 | # The domain name for all components; required when k8s_platorm=="eks" 77 | ASCENDER_DOMAIN: example.com 78 | 79 | # k8s namespace for Ascender k8s objects 80 | ASCENDER_NAMESPACE: ascender 81 | 82 | # Administrator username for Ascender 83 | ASCENDER_ADMIN_USER: admin 84 | 85 | # Administrator password for Ascender 86 | ASCENDER_ADMIN_PASSWORD: "myadminpassword" 87 | 88 | # The OCI container image for Ascender 89 | ASCENDER_IMAGE: ghcr.io/ctrliq/ascender 90 | 91 | # The image tag indicating the version of Ascender you wish to install 92 | ASCENDER_VERSION: 25.0.0 93 | 94 | # The version of the AWX Operator used to install Ascender and its components 95 | ANSIBLE_OPERATOR_VERSION: 2.19.1 96 | 97 | # Determines whether to keep the secrets required to encrypt within Ascender (important when backing up) 98 | ascender_garbage_collect_secrets: true 99 | 100 | # Setup extra demo playbooks after installation 101 | ascender_setup_playbooks: true 102 | 103 | # # External PostgreSQL ip or url resolvable by the cluster 104 | # ASCENDER_PGSQL_HOST: "ascenderpghost.example.com" 105 | 106 | # # External PostgreSQL port, this usually defaults to 5432 107 | # ASCENDER_PGSQL_PORT: 5432 108 | 109 | # # External PostgreSQL username 110 | # ASCENDER_PGSQL_USER: ascender 111 | 112 | # # External PostgreSQL password 113 | # NOTE: Do NOT use the special characters in the postgres password (Django requirement) 114 | # ASCENDER_PGSQL_PWD: mypgadminpassword 115 | 116 | # # External PostgreSQL database name used for Ascender (this DB must exist) 117 | # ASCENDER_PGSQL_DB: ascenderdb 118 | 119 | 120 | ### All of these options are unnecessary to change, but will allow you to tweak your Ascender deployment if you choose to change them 121 | ascender_replicas: 1 122 | ascender_image_pull_policy: Always 123 | 124 | 125 | # ---Ledger install variables--- 126 | 127 | # Determines whether or not Ledger will be installed 128 | LEDGER_INSTALL: true 129 | 130 | # DNS resolvable hostname for Ledger service. This is required for install. 131 | LEDGER_HOSTNAME: ledger.example.com 132 | 133 | # The OCI container image for Ledger 134 | LEDGER_WEB_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-web 135 | 136 | # The number of ledger web pods - this is good to ensure high availability 137 | ledger_web_replicas: 1 138 | 139 | # The OCI container image for the Ledger Parser 140 | LEDGER_PARSER_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-parser 141 | 142 | # The number of ledger parser pods - this is good to ensure high availability 143 | ledger_parser_replicas: 1 144 | 145 | # The OCI container image for the Ledger Database 146 | LEDGER_DB_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-db 147 | 148 | # The image tag indicating the version of Ledger you wish to install 149 | LEDGER_VERSION: latest 150 | 151 | # The Kubernetes namespace in which Ledger objects will live 152 | LEDGER_NAMESPACE: ledger 153 | 154 | # Admin password for Ledger (the username is admin by default) 155 | LEDGER_ADMIN_PASSWORD: "myadminpassword" 156 | 157 | # Password for Ledger database 158 | LEDGER_DB_PASSWORD: "mydbpassword" 159 | 160 | 161 | 162 | 163 | -------------------------------------------------------------------------------- /docs/rke2/deploy-rke2-cluster/deploy-rke2-cluster.yaml: -------------------------------------------------------------------------------- 1 | - name: Deploy RKE2 2 | hosts: all 3 | become: yes 4 | vars: 5 | # RKE2 version 6 | # All releases at: 7 | # https://github.com/rancher/rke2/releases 8 | rke2_version: v1.28.4+rke2r1 9 | # RKE2 channel 10 | rke2_channel: stable 11 | # Architecture to be downloaded, currently there are releases for amd64 and s390x 12 | rke2_architecture: amd64 13 | # Changes the deploy strategy to install based on local artifacts 14 | rke2_airgap_mode: true 15 | # Airgap implementation type - download, copy or exists 16 | # - 'download' will fetch the artifacts on each node, 17 | # - 'copy' will transfer local files in 'rke2_artifact' to the nodes, 18 | # - 'exists' assumes 'rke2_artifact' files are already stored in 'rke2_artifact_path' 19 | rke2_airgap_implementation: download 20 | # Additional RKE2 server configuration options 21 | rke2_server_options: 22 | - "disable-cloud-controller: true" 23 | - "kubelet-arg:" 24 | - " - \"cloud-provider=external\"" 25 | - " - \"provider-id=vsphere://$master_node_id\"" 26 | # Additional RKE2 agent configuration options 27 | rke2_agent_options: 28 | - "disable-cloud-controller: true" 29 | - "kubelet-arg:" 30 | - " - \"cloud-provider=external\"" 31 | - " - \"provider-id=vsphere://$worker_id\"" 32 | # Pre-shared secret token that other server or agent nodes will register with when connecting to the cluster 33 | rke2_token: defaultSecret12345 34 | # Deploy RKE2 with default CNI canal 35 | rke2_cni: canal 36 | # Local source path where artifacts are stored 37 | rke2_airgap_copy_sourcepath: /tmp/rke2_artifacts 38 | # Local path to store artifacts 39 | rke2_artifact_path: /var/tmp/rke2_artifacts 40 | # Airgap required artifacts 41 | rke2_artifact: 42 | - sha256sum-{{ rke2_architecture }}.txt 43 | - rke2.linux-{{ rke2_architecture }}.tar.gz 44 | - rke2-images.linux-{{ rke2_architecture }}.tar.zst 45 | # Download Kubernetes config file to the Ansible controller 46 | rke2_download_kubeconf: true 47 | # Name of the Kubernetes config file will be downloaded to the Ansible controller 48 | rke2_download_kubeconf_file_name: config 49 | # Destination directory where the Kubernetes config file will be downloaded to the Ansible controller 50 | rke2_download_kubeconf_path: ~/.kube 51 | # rke2_airgap_copy_additional_tarballs: 52 | # - rke2-images-multus.linux-{{ rke2_architecture }}.tar.gz 53 | # - rke2-images-multus.linux-{{ rke2_architecture }}.tar.zst 54 | # - rke2-images-multus.linux-{{ rke2_architecture }}.txt 55 | # - rke2-images-calico.linux-{{ rke2_architecture }}.tar.gz 56 | # - rke2-images-calico.linux-{{ rke2_architecture }}.tar.zst 57 | # - rke2-images-calico.linux-{{ rke2_architecture }}.txt 58 | 59 | roles: 60 | - role: lablabs.rke2 -------------------------------------------------------------------------------- /docs/rke2/rke2.default.config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This variable specificies which Kubernetes platform Ascender and its components will be installed on. 3 | k8s_platform: rke2 4 | # Determines whether to use HTTP or HTTPS for Ascender and Ledger. 5 | # If set to https, you MUST provide certificate/key options for the Installer to use. 6 | k8s_lb_protocol: https 7 | # Routable IP address for the K8s API Server 8 | # (This could be a Load Balancer if using 3 K8s control nodes) 9 | kubeapi_server_ip: "127.0.0.1" 10 | # This value being set to "true" means that some work needs to be done to set up a 11 | # cluster before proceeding. Here is ther behavior for different values of k8s_platforms: 12 | # k3s: A single-node k3s cluster will be set up on the inventory server 13 | # named "ascender_host" 14 | # eks: A new EKS cluster will be set up 15 | # rke2: N/A, as you must use Labyrinth Labs' Ansible role to set up a fresh kubernetes cluster 16 | kube_install: false 17 | # Indictates whether or not the kubeconfig file needs to be downloaded to the Ansible controller 18 | download_kubeconfig: false 19 | # Boolean indicating whether to use the local /etc/hosts file for DNS resolution to access Ascender 20 | use_etc_hosts: true 21 | # TLS Certificate file location on the local installing machine 22 | tls_crt_path: "/root/ascender.crt" 23 | # TLS Private Key file location on the local installing machine 24 | tls_key_path: "/root/ascender.key" 25 | # A directory in which to place both temporary artifacts 26 | # and timestamped Kubernetes Manifests to make Ascender/Ledger easy 27 | # to uninstall 28 | tmp_dir: "{{ playbook_dir }}/../ascender_install_artifacts" 29 | # DNS resolvable hostname for Ascender service. This is required for install. 30 | ASCENDER_HOSTNAME: ascender.example.com 31 | # Namespace for Ascender Kubernetes objects 32 | ASCENDER_NAMESPACE: ascender 33 | # Administrator username for Ascender 34 | ASCENDER_ADMIN_USER: admin 35 | # Administrator password for Ascender 36 | ASCENDER_ADMIN_PASSWORD: "myadminpassword" 37 | # The OCI container image for Ascender 38 | ASCENDER_IMAGE: ghcr.io/ctrliq/ascender 39 | # The image tag indicating the version of Ascender you wish to install 40 | ASCENDER_VERSION: 25.0.0 41 | # The version of the AWX Operator used to install Ascender and its components 42 | ANSIBLE_OPERATOR_VERSION: 2.9.0 43 | # Determines whether to keep the secrets required to encrypt within Ascender (important when backing up) 44 | ascender_garbage_collect_secrets: false 45 | # External PostgreSQL database name used for Ascender (this DB must exist) 46 | ascender_replicas: 1 47 | # The Ascender web container image pull policy (If unsure, choose IfNotPresent) 48 | image_pull_policy: IfNotPresent 49 | # Determines whether or not Ledger will be installed 50 | LEDGER_INSTALL: true 51 | # DNS resolvable hostname for Ledger service. This is required for install 52 | LEDGER_HOSTNAME: ledger.example.com 53 | # The OCI container image for Ledger 54 | LEDGER_WEB_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-web 55 | # Number of replicas for the Ledger web container 56 | ledger_web_replicas: 1 57 | # The OCI container image for Ledger Parser 58 | LEDGER_PARSER_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-parser 59 | # Number of replicas for the Ledger Parser container 60 | ledger_parser_replicas: 1 61 | # The OCI container image for Ledger DB 62 | LEDGER_DB_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-db 63 | # The image tag indicating the version of Ledger you wish to install 64 | LEDGER_VERSION: latest 65 | # The Kubernetes namespace in which Ledger objects will live 66 | LEDGER_NAMESPACE: ledger 67 | # Admin password for Ledger (the username is admin by default) 68 | LEDGER_ADMIN_PASSWORD: myadminpassword 69 | # Password for Ledger database 70 | LEDGER_DB_PASSWORD: mydbpassword 71 | -------------------------------------------------------------------------------- /docs/rke2/rke2.inventory: -------------------------------------------------------------------------------- 1 | [localhost] 2 | localhost ansible_host=localhost ansible_connection=local 3 | 4 | [ascender] 5 | ascender_host ansible_host=localhost ansible_connection=local -------------------------------------------------------------------------------- /inventory: -------------------------------------------------------------------------------- 1 | [localhost] 2 | localhost ansible_host=localhost ansible_connection=local 3 | 4 | [ascender] 5 | # ascender_host ansible_host=x.x.x.x ansible_user=user ansible_ssh_pass=password 6 | # ascender_host ansible_host=x.x.x.x ansible_user=user ansible_ssh_private_key_file=~/.ssh/key.pem 7 | ascender_host ansible_host=localhost ansible_connection=local -------------------------------------------------------------------------------- /playbooks/apply_cloud_permissions.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Ctrl IQ, Inc. All rights reserved. 2 | 3 | - hosts: localhost 4 | gather_facts: no 5 | become: false 6 | 7 | vars_files: 8 | - ["../custom.config.yml", "../default.config.yml"] 9 | 10 | environment: 11 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config" 12 | NAMESPACE: "{{ ASCENDER_NAMESPACE }}" 13 | PATH: "/usr/local/bin:{{ lookup('env', 'PATH') }}" #required as the aws cli lives at /usr/local/bin/aws 14 | 15 | tasks: 16 | 17 | - name: "Apply minimum permissions to install Ascender in a Kubernetes cluster of type {{ k8s_platform }}" 18 | ansible.builtin.include_role: 19 | name: apply_permissions 20 | tasks_from: "apply_permissions_{{ k8s_platform }}" -------------------------------------------------------------------------------- /playbooks/assertions.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Ctrl IQ, Inc. All rights reserved. 2 | 3 | - hosts: localhost 4 | gather_facts: no 5 | connection: local 6 | become: false 7 | 8 | vars_files: 9 | - ["../custom.config.yml", "../default.config.yml"] 10 | 11 | tasks: 12 | 13 | - name: Verify that LEDGER_HOSTNAME and ASCENDER_HOSTNAME are different 14 | ansible.builtin.assert: 15 | that: 16 | - LEDGER_HOSTNAME != ASCENDER_HOSTNAME 17 | fail_msg: "LEDGER_HOSTNAME ({{ LEDGER_HOSTNAME }}) and ASCENDER_HOSTNAME ({{ ASCENDER_HOSTNAME }}) must be different" 18 | when: LEDGER_HOSTNAME is defined 19 | 20 | - hosts: ascender 21 | gather_facts: yes 22 | become: false 23 | 24 | vars_files: 25 | - ["../custom.config.yml", "../default.config.yml"] 26 | 27 | tasks: 28 | 29 | - name: Retrieve the Linux details 30 | block: 31 | 32 | - name: Verify x86_64 architecture 33 | ansible.builtin.assert: 34 | that: 35 | - ansible_architecture == "x86_64" 36 | fail_msg: "K3s server must be of type x86_64" 37 | 38 | - name: Verify minimum processor count when Ledger is not being installed 39 | ansible.builtin.assert: 40 | that: 41 | - ansible_processor_vcpus >= 2 42 | - ansible_memtotal_mb >= 3500 43 | fail_msg: "K3s server requires at least 2 vCPUs and 4000 MB of memory; target server has {{ ansible_processor_vcpus }} vCPUs and {{ ansible_memtotal_mb }} MBs of memory" 44 | when: not LEDGER_INSTALL 45 | 46 | - name: Verify minimum processor count when Ledger is being installed 47 | ansible.builtin.assert: 48 | that: 49 | - ansible_processor_vcpus >= 2 50 | - ansible_memtotal_mb >= 7500 51 | fail_msg: "K3s server requires at least 2 vCPUs and 8000 MB of memory; target server has {{ ansible_processor_vcpus }} vCPUs and {{ ansible_memtotal_mb }} MBs of memory" 52 | when: LEDGER_INSTALL 53 | 54 | - name: Verify Enterprise Linux OS Family architecture 55 | ansible.builtin.assert: 56 | that: 57 | - ansible_os_family == "RedHat" 58 | fail_msg: "K3s Server OS Family must be of type Rocky/CentOS" 59 | 60 | - name: Verify RedHat OS Family architecture 61 | ansible.builtin.assert: 62 | that: 63 | - ansible_distribution_major_version == '8' or ansible_distribution_major_version == '9' 64 | fail_msg: "K3s Server OS major version must be 8 or 9" 65 | 66 | - name: Get mount location of /var directory 67 | ansible.builtin.command: df -h /var 68 | register: mount_location 69 | 70 | # - name: Print mount location 71 | # ansible.builtin.debug: 72 | # var: mount_location.stdout_lines[1].split()[0] 73 | 74 | - name: "Get available disk of {{ mount_location.stdout_lines[1].split()[0] }}" 75 | ansible.builtin.debug: 76 | var: ansible_mounts 77 | 78 | 79 | - name: "Ensure that free space on {{ mount_location.stdout_lines[1].split()[0] }} is greater than 30GB for k3s offline install" 80 | ansible.builtin.assert: 81 | that: item.size_available >= 32212254720 82 | fail_msg: "Free disk space for /var must be at least 30GB" 83 | when: 84 | - item.device is match(mount_location.stdout_lines[1].split()[0]) 85 | - item.mount == "/" 86 | - k8s_offline 87 | with_items: "{{ ansible_mounts }}" 88 | 89 | 90 | - name: "Ensure that free space on {{ mount_location.stdout_lines[1].split()[0] }} is greater than 20GB" 91 | ansible.builtin.assert: 92 | that: item.size_available >= 21474836480 93 | fail_msg: "Free disk space for /var must be at least 20GB" 94 | when: 95 | - item.device is match(mount_location.stdout_lines[1].split()[0]) 96 | - item.mount == "/" 97 | with_items: "{{ ansible_mounts }}" 98 | 99 | when: k8s_platform == "k3s" 100 | 101 | 102 | - name: Verify Collections 103 | ansible.builtin.assert: 104 | that: 105 | - lookup('community.general.collection_version', item.name) is version(item.version, item.compare, version_type=item.version_type) 106 | fail_msg: "{{ item.name }} Collection is not installed or the incorrect version. Please run 'ansible-galaxy install -r collections/requirements.yml'" 107 | loop: "{{ required_collections }}" 108 | loop_control: 109 | label: "{{ item.name }} {{ item.compare }} {{ item.version }}" 110 | -------------------------------------------------------------------------------- /playbooks/backup.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Ctrl IQ, Inc. All rights reserved. 2 | 3 | - hosts: localhost 4 | gather_facts: no 5 | connection: local 6 | 7 | environment: 8 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config" 9 | 10 | vars_files: 11 | - ["../custom.config.yml", "../default.config.yml"] 12 | 13 | tasks: 14 | 15 | - name: "Run ascender_backup role" 16 | ansible.builtin.include_role: 17 | name: ascender_backup 18 | tasks_from: "ascender_backup" 19 | 20 | - ansible.builtin.debug: 21 | msg: "Ascender backup complete." -------------------------------------------------------------------------------- /playbooks/install_ascender.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Ctrl IQ, Inc. All rights reserved. 2 | 3 | - hosts: localhost 4 | gather_facts: no 5 | become: false 6 | 7 | vars_files: 8 | - ["../custom.config.yml", "../default.config.yml"] 9 | 10 | environment: 11 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config" 12 | NAMESPACE: "{{ ASCENDER_NAMESPACE }}" 13 | PATH: "/usr/local/bin:{{ lookup('env', 'PATH') }}" #required as the aws cli lives at /usr/local/bin/aws 14 | 15 | tasks: 16 | 17 | - name: "Run ascender_install role for {{ k8s_platform }}" 18 | ansible.builtin.include_role: 19 | name: ascender_install 20 | tasks_from: "ascender_install_{{ k8s_platform }}" 21 | 22 | - ansible.builtin.debug: 23 | msg: "Ascender install complete." -------------------------------------------------------------------------------- /playbooks/install_ledger.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Ctrl IQ, Inc. All rights reserved. 2 | 3 | - hosts: localhost 4 | become: false 5 | gather_facts: no 6 | 7 | vars_files: 8 | - ["../custom.config.yml", "../default.config.yml"] 9 | 10 | environment: 11 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config" 12 | NAMESPACE: "{{ LEDGER_NAMESPACE }}" 13 | PATH: "/usr/local/bin:{{ lookup('env', 'PATH') }}" #required as the aws cli lives at /usr/local/bin/aws 14 | 15 | tasks: 16 | 17 | - name: Create Namespace 18 | kubernetes.core.k8s: 19 | name: "{{ LEDGER_NAMESPACE }}" 20 | api_version: v1 21 | kind: Namespace 22 | state: present 23 | validate_certs: false 24 | 25 | - name: "Run ascender_install role for {{ k8s_platform }}" 26 | ansible.builtin.include_role: 27 | name: ledger_install 28 | tasks_from: "ledger_install_{{ k8s_platform }}" 29 | 30 | - ansible.builtin.debug: 31 | msg: "Ledger install complete." -------------------------------------------------------------------------------- /playbooks/install_react.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Ctrl IQ, Inc. All rights reserved. 2 | 3 | - hosts: localhost 4 | gather_facts: no 5 | become: false 6 | 7 | vars_files: 8 | - ["../custom.config.yml", "../default.config.yml"] 9 | 10 | environment: 11 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config" 12 | NAMESPACE: "{{ ASCENDER_NAMESPACE }}" 13 | PATH: "/usr/local/bin:{{ lookup('env', 'PATH') }}" #required as the aws cli lives at /usr/local/bin/aws 14 | 15 | tasks: 16 | 17 | - name: "Run ascender_react role for {{ k8s_platform }}" 18 | ansible.builtin.include_role: 19 | name: ascender_react 20 | tasks_from: "react_install_{{ k8s_platform }}" 21 | 22 | - ansible.builtin.debug: 23 | msg: "React install complete." -------------------------------------------------------------------------------- /playbooks/kubernetes_setup.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Ctrl IQ, Inc. All rights reserved. 2 | 3 | - hosts: localhost 4 | gather_facts: no 5 | become: false 6 | 7 | vars_files: 8 | - ["../custom.config.yml", "../default.config.yml"] 9 | 10 | environment: 11 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config" 12 | NAMESPACE: "{{ ASCENDER_NAMESPACE }}" 13 | PATH: "/usr/bin:/usr/local/bin:{{ lookup('env', 'PATH') }}" #required as the aws cli lives at /usr/local/bin/aws 14 | 15 | tasks: 16 | 17 | - name: "Install prerequisite packages on localhost" 18 | ansible.builtin.include_role: 19 | name: common 20 | 21 | 22 | - hosts: ascender 23 | gather_facts: no 24 | become: false 25 | 26 | vars_files: 27 | - ["../custom.config.yml", "../default.config.yml"] 28 | 29 | environment: 30 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config" 31 | NAMESPACE: "{{ ASCENDER_NAMESPACE }}" 32 | PATH: "/usr/bin:/usr/local/bin:{{ lookup('env', 'PATH') }}" #required as the aws cli lives at /usr/local/bin/aws 33 | 34 | tasks: 35 | 36 | - name: Run k8s_setup role for {{ k8s_platform }} 37 | ansible.builtin.include_role: 38 | name: k8s_setup 39 | tasks_from: "k8s_setup_{{ k8s_platform }}" 40 | # when: kube_install -------------------------------------------------------------------------------- /playbooks/restore.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Ctrl IQ, Inc. All rights reserved. 2 | 3 | - hosts: localhost 4 | gather_facts: no 5 | connection: local 6 | 7 | tasks: 8 | 9 | - ansible.builtin.debug: 10 | msg: "This is the RESTORE playbook." -------------------------------------------------------------------------------- /playbooks/roles/apply_permissions/tasks/apply_permissions_eks.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - name: Query for aws account number 7 | amazon.aws.aws_caller_info: 8 | register: caller_info 9 | no_log: true 10 | 11 | - name: Create Ascender Create Policy JSON file 12 | ansible.builtin.template: 13 | src: templates/eks/iam_policies/ascenderinstallpermissions_all.json 14 | dest: "{{ playbook_dir }}/../ascender_install_artifacts/ascender_install_permissions.json.{{ ansible_date_time.iso8601_basic_short }}" -------------------------------------------------------------------------------- /playbooks/roles/apply_permissions/templates/eks/iam_policies/ascenderinstallpermissions.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "VisualEditor0", 6 | "Effect": "Allow", 7 | "Action": [ 8 | "iam:ListPolicies", 9 | "iam:CreatePolicy", 10 | "iam:DeletePolicy", 11 | "iam:PassRole", 12 | "iam:ListOpenIDConnectProviders", 13 | "route53:ListHostedZones", 14 | "route53:ChangeResourceRecordSets", 15 | "route53:ListResourceRecordSets", 16 | "iam:CreateRole", 17 | "iam:AttachRolePolicy" 18 | ], 19 | "Resource": "*" 20 | } 21 | ] 22 | } -------------------------------------------------------------------------------- /playbooks/roles/apply_permissions/templates/eks/iam_policies/ascenderinstallpermissions_all.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": "eks:*", 7 | "Resource": "*" 8 | }, 9 | { 10 | "Action": [ 11 | "ssm:GetParameter", 12 | "ssm:GetParameters" 13 | ], 14 | "Resource": [ 15 | "arn:aws:ssm:*:{{ caller_info.account }}:parameter/aws/*", 16 | "arn:aws:ssm:*::parameter/aws/*" 17 | ], 18 | "Effect": "Allow" 19 | }, 20 | { 21 | "Action": [ 22 | "kms:CreateGrant", 23 | "kms:RevokeGrant", 24 | "kms:DescribeKey" 25 | ], 26 | "Resource": "*", 27 | "Effect": "Allow" 28 | }, 29 | { 30 | "Action": [ 31 | "logs:PutRetentionPolicy" 32 | ], 33 | "Resource": "*", 34 | "Effect": "Allow" 35 | }, 36 | { 37 | "Sid": "VisualEditor0", 38 | "Effect": "Allow", 39 | "Action": [ 40 | "iam:ListPolicies", 41 | "iam:CreatePolicy", 42 | "iam:DeletePolicy", 43 | "iam:ListOpenIDConnectProviders", 44 | "route53:ListHostedZones", 45 | "route53:ChangeResourceRecordSets", 46 | "route53:ListResourceRecordSets", 47 | "iam:CreateRole", 48 | "iam:DeleteRole" 49 | ], 50 | "Resource": "*" 51 | }, 52 | { 53 | "Effect": "Allow", 54 | "Action": [ 55 | "iam:CreateInstanceProfile", 56 | "iam:DeleteInstanceProfile", 57 | "iam:GetInstanceProfile", 58 | "iam:RemoveRoleFromInstanceProfile", 59 | "iam:GetRole", 60 | "iam:PutRolePolicy", 61 | "iam:AddRoleToInstanceProfile", 62 | "iam:ListInstanceProfilesForRole", 63 | "iam:PassRole", 64 | "iam:AttachRolePolicy", 65 | "iam:DetachRolePolicy", 66 | "iam:DeleteRolePolicy", 67 | "iam:GetRolePolicy", 68 | "iam:GetOpenIDConnectProvider", 69 | "iam:CreateOpenIDConnectProvider", 70 | "iam:DeleteOpenIDConnectProvider", 71 | "iam:TagOpenIDConnectProvider", 72 | "iam:ListAttachedRolePolicies", 73 | "iam:TagRole", 74 | "iam:GetPolicy", 75 | "iam:ListPolicyVersions" 76 | ], 77 | "Resource": [ 78 | "arn:aws:iam::{{ caller_info.account }}:instance-profile/eksctl-*", 79 | "arn:aws:iam::{{ caller_info.account }}:role/eksctl-*", 80 | "arn:aws:iam::{{ caller_info.account }}:policy/eksctl-*", 81 | "arn:aws:iam::{{ caller_info.account }}:oidc-provider/*", 82 | "arn:aws:iam::{{ caller_info.account }}:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup" 83 | ] 84 | }, 85 | { 86 | "Effect": "Allow", 87 | "Action": [ 88 | "iam:GetRole" 89 | ], 90 | "Resource": [ 91 | "arn:aws:iam::{{ caller_info.account }}:role/*" 92 | ] 93 | }, 94 | { 95 | "Effect": "Allow", 96 | "Action": [ 97 | "iam:CreateServiceLinkedRole" 98 | ], 99 | "Resource": "*", 100 | "Condition": { 101 | "StringEquals": { 102 | "iam:AWSServiceName": [ 103 | "eks.amazonaws.com", 104 | "eks-nodegroup.amazonaws.com", 105 | "eks-fargate.amazonaws.com" 106 | ] 107 | } 108 | } 109 | } 110 | ] 111 | } -------------------------------------------------------------------------------- /playbooks/roles/apply_permissions/templates/eks/iam_policies/eksallaccess.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": "eks:*", 7 | "Resource": "*" 8 | }, 9 | { 10 | "Action": [ 11 | "ssm:GetParameter", 12 | "ssm:GetParameters" 13 | ], 14 | "Resource": [ 15 | "arn:aws:ssm:*:{{ caller_info.account }}:parameter/aws/*", 16 | "arn:aws:ssm:*::parameter/aws/*" 17 | ], 18 | "Effect": "Allow" 19 | }, 20 | { 21 | "Action": [ 22 | "kms:CreateGrant", 23 | "kms:RevokeGrant", 24 | "kms:DescribeKey" 25 | ], 26 | "Resource": "*", 27 | "Effect": "Allow" 28 | }, 29 | { 30 | "Action": [ 31 | "logs:PutRetentionPolicy" 32 | ], 33 | "Resource": "*", 34 | "Effect": "Allow" 35 | } 36 | ] 37 | } 38 | -------------------------------------------------------------------------------- /playbooks/roles/apply_permissions/templates/eks/iam_policies/iamlimitedaccess.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": [ 7 | "iam:CreateInstanceProfile", 8 | "iam:DeleteInstanceProfile", 9 | "iam:GetInstanceProfile", 10 | "iam:RemoveRoleFromInstanceProfile", 11 | "iam:GetRole", 12 | "iam:CreateRole", 13 | "iam:DeleteRole", 14 | "iam:AttachRolePolicy", 15 | "iam:PutRolePolicy", 16 | "iam:AddRoleToInstanceProfile", 17 | "iam:ListInstanceProfilesForRole", 18 | "iam:PassRole", 19 | "iam:DetachRolePolicy", 20 | "iam:DeleteRolePolicy", 21 | "iam:GetRolePolicy", 22 | "iam:GetOpenIDConnectProvider", 23 | "iam:CreateOpenIDConnectProvider", 24 | "iam:DeleteOpenIDConnectProvider", 25 | "iam:TagOpenIDConnectProvider", 26 | "iam:ListAttachedRolePolicies", 27 | "iam:TagRole", 28 | "iam:GetPolicy", 29 | "iam:CreatePolicy", 30 | "iam:DeletePolicy", 31 | "iam:ListPolicyVersions" 32 | ], 33 | "Resource": [ 34 | "arn:aws:iam::{{ caller_info.account }}:instance-profile/eksctl-*", 35 | "arn:aws:iam::{{ caller_info.account }}:role/eksctl-*", 36 | "arn:aws:iam::{{ caller_info.account }}:policy/eksctl-*", 37 | "arn:aws:iam::{{ caller_info.account }}:oidc-provider/*", 38 | "arn:aws:iam::{{ caller_info.account }}:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup", 39 | "arn:aws:iam::{{ caller_info.account }}:role/eksctl-managed-*" 40 | ] 41 | }, 42 | { 43 | "Effect": "Allow", 44 | "Action": [ 45 | "iam:GetRole" 46 | ], 47 | "Resource": [ 48 | "arn:aws:iam::{{ caller_info.account }}:role/*" 49 | ] 50 | }, 51 | { 52 | "Effect": "Allow", 53 | "Action": [ 54 | "iam:CreateServiceLinkedRole" 55 | ], 56 | "Resource": "*", 57 | "Condition": { 58 | "StringEquals": { 59 | "iam:AWSServiceName": [ 60 | "eks.amazonaws.com", 61 | "eks-nodegroup.amazonaws.com", 62 | "eks-fargate.amazonaws.com" 63 | ] 64 | } 65 | } 66 | } 67 | ] 68 | } 69 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_backup/tasks/ascender_backup.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - ansible.builtin.debug: 7 | var: ansible_date_time 8 | 9 | - name: Generate manifest to install AWXBackup k8s object with timestamp attached, for purposes of deletion later 10 | ansible.builtin.template: 11 | src: "ascender-backup.yml" 12 | dest: "{{ tmp_dir }}/ascender-backup.yml.{{ ansible_date_time.iso8601_basic_short }}" 13 | 14 | - name: "Apply AWXBackup k8s manifest" 15 | kubernetes.core.k8s: 16 | state: present 17 | definition: "{{ lookup('ansible.builtin.template', 'ascender-backup.yml') }}" 18 | 19 | # - name: Wait for ascender-app-web Deployment to complete setting up 20 | # kubernetes.core.k8s_info: 21 | # kind: Deployment 22 | # wait: yes 23 | # name: ascender-app-web 24 | # namespace: "{{ ASCENDER_NAMESPACE }}" 25 | # wait_sleep: 10 26 | # wait_timeout: 360 27 | # register: ascender_web_deployment 28 | 29 | # - ansible.builtin.debug: 30 | # var: ascender_web_deployment -------------------------------------------------------------------------------- /playbooks/roles/ascender_backup/templates/ascender-backup.yml: -------------------------------------------------------------------------------- 1 | apiVersion: awx.ansible.com/v1beta1 2 | kind: AWXBackup 3 | metadata: 4 | name: ascender-backup-{{ ansible_date_time.date }}-{{ ansible_date_time.epoch }} 5 | namespace: {{ ASCENDER_NAMESPACE }} 6 | spec: 7 | deployment_name: ascender-app 8 | clean_backup_on_delete: true -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/defaults/main.yml: -------------------------------------------------------------------------------- 1 | ee_images: 2 | - name: Ascender-EE (Latest) 3 | image: ghcr.io/ctrliq/ascender-ee:latest 4 | 5 | # DNS resolvable hostname for Ascender service. This is required for install. 6 | ASCENDER_HOSTNAME: ascender.example.com 7 | 8 | # The domain name for all components; required when k8s_platorm=="eks" 9 | ASCENDER_DOMAIN: example.com 10 | 11 | # k8s namespace for Ascender k8s objects 12 | ASCENDER_NAMESPACE: ascender 13 | 14 | # Administrator username for Ascender 15 | ASCENDER_ADMIN_USER: admin 16 | 17 | # Administrator password for Ascender 18 | ASCENDER_ADMIN_PASSWORD: "myadminpassword" 19 | 20 | # The OCI container image for Ascender 21 | ASCENDER_IMAGE: ghcr.io/ctrliq/ascender 22 | 23 | # The image tag indicating the version of Ascender you wish to install 24 | ASCENDER_VERSION: latest 25 | 26 | # The version of the AWX Operator used to install Ascender and its components 27 | ANSIBLE_OPERATOR_VERSION: latest 28 | 29 | k8s_container_registry: ghcr.io/ctrliq 30 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/tasks/ascender_install_aks.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - name: Apply NGINX Ingress Controller manifest 7 | kubernetes.core.k8s: 8 | state: present 9 | src: https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/cloud/deploy.yaml 10 | verify_ssl: false 11 | 12 | - name: Create Namespace 13 | kubernetes.core.k8s: 14 | name: "{{ ASCENDER_NAMESPACE }}" 15 | api_version: v1 16 | kind: Namespace 17 | verify_ssl: false 18 | state: present 19 | 20 | - name: Generate manifest to install AWX Operator 21 | ansible.builtin.template: 22 | src: templates/awx-operator/kustomization.j2 23 | dest: "{{ tmp_dir }}/kustomization.yml" 24 | 25 | - name: Generate manifest to install AWX Operator with timestamp attached, for purposes of operator deletion later 26 | ansible.builtin.template: 27 | src: templates/awx-operator/kustomization.j2 28 | dest: "{{ tmp_dir }}/kustomization.yml.{{ ansible_date_time.iso8601_basic_short }}" 29 | 30 | - name: Copy Operator Source 31 | ansible.builtin.copy: 32 | src: "{{ playbook_dir }}/../offline/awx-operator-{{ ANSIBLE_OPERATOR_VERSION }}/config" 33 | dest: "{{ tmp_dir }}/" 34 | remote_src: true 35 | when: 36 | - k8s_offline 37 | 38 | - name: Install AWX Operator with Kustomize 39 | kubernetes.core.k8s: 40 | definition: "{{ lookup('kubernetes.core.kustomize', dir=tmp_dir) }}" 41 | verify_ssl: false 42 | 43 | - name: delete Kustomization file 44 | ansible.builtin.file: 45 | path: "{{ tmp_dir }}/kustomization.yml" 46 | state: absent 47 | 48 | - name: Wait for Operator deployment to be ready 49 | kubernetes.core.k8s_info: 50 | kind: Deployment 51 | wait: yes 52 | name: awx-operator-controller-manager 53 | namespace: "{{ ASCENDER_NAMESPACE }}" 54 | wait_sleep: 10 55 | wait_timeout: 360 56 | verify_ssl: false 57 | 58 | - name: Generate manifest to install AWX ascender-app k8s object with timestamp attached, for purposes of deletion later 59 | ansible.builtin.template: 60 | src: "ascender-deployment/ascender-deployment-aks.yml" 61 | dest: "{{ tmp_dir }}/ascender-deployment-aks.yml.{{ ansible_date_time.iso8601_basic_short }}" 62 | 63 | - name: "Apply AWX ascender-app manifest for {{ k8s_platform }}" 64 | kubernetes.core.k8s: 65 | state: present 66 | definition: "{{ lookup('ansible.builtin.template', 'ascender-deployment/ascender-deployment-aks.yml') }}" 67 | verify_ssl: false 68 | 69 | - name: Wait for ascender-app-web Deployment to complete setting up (this may take up to 10 minutes) 70 | kubernetes.core.k8s_info: 71 | kind: Deployment 72 | wait: yes 73 | name: ascender-app-web 74 | namespace: "{{ ASCENDER_NAMESPACE }}" 75 | wait_sleep: 10 76 | wait_timeout: 360 77 | verify_ssl: false 78 | register: ascender_web_deployment 79 | 80 | # - ansible.builtin.debug: 81 | # var: ascender_web_deployment 82 | 83 | 84 | - name: Retrieve the ascender-app-ingress Ingress object 85 | kubernetes.core.k8s_info: 86 | api_version: v1 87 | kind: Ingress 88 | name: ascender-app-ingress 89 | namespace: ascender 90 | register: ascender_ingress 91 | until: 92 | - ascender_ingress.resources[0].status.loadBalancer.ingress[0].ip is defined 93 | - ascender_ingress.resources[0].spec.rules[0].host is defined 94 | retries: 20 95 | 96 | # - ansible.builtin.debug: 97 | # var: ascender_ingress.resources[0].status.loadBalancer.ingress[0].ip 98 | 99 | # - ansible.builtin.debug: 100 | # var: ascender_ingress.resources[0].spec.rules[0].host 101 | 102 | - name: List all DNS zones 103 | azure.azcollection.azure_rm_dnszone_info: 104 | register: all_zones 105 | when: USE_AZURE_DNS 106 | 107 | # - ansible.builtin.debug: 108 | # var: all_zones 109 | # when: USE_AZURE_DNS 110 | 111 | - name: Set Resource Group Fact 112 | ansible.builtin.set_fact: 113 | resource_group: "{{ item.id.split('/')[4] }}" 114 | loop: "{{ all_zones.ansible_info.azure_dnszones }}" 115 | when: 116 | - item.name == ASCENDER_DOMAIN 117 | - USE_AZURE_DNS 118 | 119 | # - ansible.builtin.debug: 120 | # var: resource_group 121 | # when: USE_AZURE_DNS 122 | 123 | - name: create A record set with metadata information 124 | azure.azcollection.azure_rm_dnsrecordset: 125 | resource_group: "{{ resource_group }}" 126 | relative_name: ascender 127 | zone_name: "{{ ASCENDER_DOMAIN }}" 128 | record_type: A 129 | records: 130 | - entry: "{{ ascender_ingress.resources[0].status.loadBalancer.ingress[0].ip }}" 131 | when: USE_AZURE_DNS 132 | 133 | - name: Indicate Instructions for non-AzureDNS Domain Management 134 | ansible.builtin.debug: 135 | msg: "Please manually create a DNS A record for {{ ASCENDER_HOSTNAME }} in your DNS Record Manager of choice, mapping to {{ ascender_ingress.resources[0].status.loadBalancer.ingress[0].ip }}" 136 | when: not USE_AZURE_DNS 137 | 138 | - name: Set the Ascender URL 139 | ansible.builtin.set_fact: 140 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 141 | ascender_port: "{{ '443' if k8s_lb_protocol == 'https' else '80' }}" 142 | 143 | - ansible.builtin.debug: 144 | msg: "The Ascender API endpoint is {{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 145 | 146 | - name: Wait until Ascender API is Up (This may take between 10-20 mins) 147 | ansible.builtin.uri: 148 | url: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 149 | return_content: yes 150 | validate_certs: no 151 | status_code: 152 | - 200 153 | until: 154 | - uri_output.status|int == 200 155 | - uri_output.url == k8s_lb_protocol + "://" + ascender_ip + ":" + ascender_port + "/api/v2/ping/" 156 | retries: 200 157 | delay: 10 158 | register: uri_output 159 | 160 | - ansible.builtin.debug: 161 | msg: "Ascender API is up" 162 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/tasks/ascender_install_dkp.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - name: Create Namespace 7 | kubernetes.core.k8s: 8 | name: "{{ ASCENDER_NAMESPACE }}" 9 | api_version: v1 10 | kind: Namespace 11 | verify_ssl: false 12 | state: present 13 | 14 | - name: Generate manifest to install AWX Operator 15 | ansible.builtin.template: 16 | src: templates/awx-operator/kustomization.j2 17 | dest: "{{ tmp_dir }}/kustomization.yml" 18 | 19 | - name: Generate manifest to install AWX Operator with timestamp attached, for purposes of operator deletion later 20 | ansible.builtin.template: 21 | src: templates/awx-operator/kustomization.j2 22 | dest: "{{ tmp_dir }}/kustomization.yml.{{ ansible_date_time.iso8601_basic_short }}" 23 | 24 | - name: Copy Operator Source 25 | ansible.builtin.copy: 26 | src: "{{ playbook_dir }}/../offline/awx-operator-{{ ANSIBLE_OPERATOR_VERSION }}/config" 27 | dest: "{{ tmp_dir }}/" 28 | remote_src: true 29 | when: 30 | - k8s_offline 31 | 32 | - name: Install AWX Operator with Kustomize 33 | kubernetes.core.k8s: 34 | definition: "{{ lookup('kubernetes.core.kustomize', dir=tmp_dir) }}" 35 | 36 | - name: delete Kustomization file 37 | ansible.builtin.file: 38 | path: "{{ tmp_dir }}/kustomization.yml" 39 | state: absent 40 | 41 | - name: Wait for Operator deployment to be ready 42 | kubernetes.core.k8s_info: 43 | kind: Deployment 44 | wait: yes 45 | name: awx-operator-controller-manager 46 | namespace: "{{ ASCENDER_NAMESPACE }}" 47 | wait_sleep: 10 48 | wait_timeout: 360 49 | 50 | - name: Generate manifest to install AWX ascender-app k8s object with timestamp attached, for purposes of deletion later 51 | ansible.builtin.template: 52 | src: "ascender-deployment/ascender-deployment-{{ k8s_platform }}.yml" 53 | dest: "{{ tmp_dir }}/ascender-deployment-{{ k8s_platform }}.yml.{{ ansible_date_time.iso8601_basic_short }}" 54 | 55 | - name: "Apply AWX ascender-app manifest for {{ k8s_platform }}" 56 | kubernetes.core.k8s: 57 | state: present 58 | definition: "{{ lookup('ansible.builtin.template', 'ascender-deployment/ascender-deployment-dkp.yml') }}" 59 | 60 | - name: Wait for ascender-app-web Deployment to complete setting up (this may take up to 10 minutes) 61 | kubernetes.core.k8s_info: 62 | kind: Deployment 63 | wait: yes 64 | name: ascender-app-web 65 | namespace: "{{ ASCENDER_NAMESPACE }}" 66 | wait_sleep: 10 67 | wait_timeout: 360 68 | register: ascender_web_deployment 69 | 70 | # - ansible.builtin.debug: 71 | # var: ascender_web_deployment 72 | 73 | - name: Set the Ascender URL 74 | ansible.builtin.set_fact: 75 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 76 | ascender_port: "{{ '443' if k8s_lb_protocol == 'https' else '80' }}" 77 | 78 | - ansible.builtin.debug: 79 | msg: "The Ascender API endpoint is {{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 80 | 81 | - name: Wait until Ascender API is Up (This may take between 10-20 mins) 82 | ansible.builtin.uri: 83 | url: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 84 | return_content: yes 85 | validate_certs: no 86 | status_code: 87 | - 200 88 | until: uri_output.status == 200 89 | retries: 200 90 | delay: 10 91 | register: uri_output 92 | 93 | - ansible.builtin.debug: 94 | msg: "Ascender API is up" 95 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/tasks/ascender_install_k3s.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - name: Create Namespace 7 | kubernetes.core.k8s: 8 | name: "{{ ASCENDER_NAMESPACE }}" 9 | api_version: v1 10 | kind: Namespace 11 | verify_ssl: false 12 | state: present 13 | 14 | - name: Generate manifest to install AWX Operator 15 | ansible.builtin.template: 16 | src: templates/awx-operator/kustomization.j2 17 | dest: "{{ tmp_dir }}/kustomization.yml" 18 | 19 | - name: Generate manifest to install AWX Operator with timestamp attached, for purposes of operator deletion later 20 | ansible.builtin.template: 21 | src: templates/awx-operator/kustomization.j2 22 | dest: "{{ tmp_dir }}/kustomization.yml.{{ ansible_date_time.iso8601_basic_short }}" 23 | 24 | - name: Copy Operator Source 25 | ansible.builtin.copy: 26 | src: "{{ playbook_dir }}/../offline/awx-operator-{{ ANSIBLE_OPERATOR_VERSION }}/config" 27 | dest: "{{ tmp_dir }}/" 28 | remote_src: true 29 | when: k8s_offline | default(false) | bool 30 | 31 | - name: Import Operator Images 32 | ansible.builtin.shell: 33 | cmd: k3s ctr images import {{ item }} 34 | with_fileglob: 35 | - "{{ playbook_dir }}/../offline/images/kube-rbac-proxy*.tar" 36 | - "{{ playbook_dir }}/../offline/images/awx-operator*.tar" 37 | when: k8s_offline | default(false) | bool 38 | 39 | - name: Install AWX Operator with Kustomize 40 | kubernetes.core.k8s: 41 | definition: "{{ lookup('kubernetes.core.kustomize', dir=tmp_dir) }}" 42 | verify_ssl: false 43 | 44 | - name: delete Kustomization file 45 | ansible.builtin.file: 46 | path: "{{ tmp_dir }}/kustomization.yml" 47 | state: absent 48 | 49 | - name: Import Ascender Images 50 | ansible.builtin.shell: 51 | cmd: k3s ctr images import {{ item }} 52 | with_fileglob: 53 | - "{{ playbook_dir }}/../offline/images/ascender*.tar" 54 | - "{{ playbook_dir }}/../offline/images/postgres*.tar" 55 | - "{{ playbook_dir }}/../offline/images/redis*.tar" 56 | when: k8s_offline | default(false) | bool 57 | 58 | - name: Wait for Operator deployment to be ready 59 | kubernetes.core.k8s_info: 60 | kind: Deployment 61 | wait: yes 62 | name: awx-operator-controller-manager 63 | namespace: "{{ ASCENDER_NAMESPACE }}" 64 | wait_sleep: 10 65 | wait_timeout: 360 66 | verify_ssl: false 67 | 68 | - name: Generate manifest to install AWX ascender-app k8s object with timestamp attached, for purposes of deletion later 69 | ansible.builtin.template: 70 | src: "ascender-deployment/ascender-deployment-k3s.yml" 71 | dest: "{{ tmp_dir }}/ascender-deployment-k3s.yml.{{ ansible_date_time.iso8601_basic_short }}" 72 | 73 | - name: "Apply AWX ascender-app manifest for {{ k8s_platform }}" 74 | kubernetes.core.k8s: 75 | state: present 76 | definition: "{{ lookup('ansible.builtin.template', 'ascender-deployment/ascender-deployment-k3s.yml') }}" 77 | verify_ssl: false 78 | 79 | - name: Wait for ascender-app-web Deployment to complete setting up (this may take up to 10 minutes) 80 | kubernetes.core.k8s_info: 81 | kind: Deployment 82 | wait: yes 83 | name: ascender-app-web 84 | namespace: "{{ ASCENDER_NAMESPACE }}" 85 | wait_sleep: 10 86 | wait_timeout: 360 87 | verify_ssl: false 88 | register: ascender_web_deployment 89 | 90 | - name: Set the Ascender URL 91 | ansible.builtin.set_fact: 92 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 93 | ascender_port: "{{ '30080' if k3s_service_type == 'NodePort' else '443' if k8s_lb_protocol == 'https' else '80' }}" 94 | 95 | - ansible.builtin.debug: 96 | msg: "The Ascender API endpoint is {{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 97 | 98 | - name: Wait until Ascender API is Up (This may take between 10-20 mins) 99 | ansible.builtin.uri: 100 | url: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 101 | return_content: yes 102 | validate_certs: no 103 | status_code: 104 | - 200 105 | until: 106 | - uri_output.status|int == 200 107 | - uri_output.url == k8s_lb_protocol + "://" + ascender_ip + ":" + ascender_port + "/api/v2/ping/" 108 | retries: 200 109 | delay: 10 110 | register: uri_output 111 | 112 | - ansible.builtin.debug: 113 | msg: "Ascender API is up" 114 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/tasks/ascender_install_rke2.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - name: Create Namespace 7 | kubernetes.core.k8s: 8 | name: "{{ ASCENDER_NAMESPACE }}" 9 | api_version: v1 10 | kind: Namespace 11 | verify_ssl: false 12 | state: present 13 | 14 | - name: Generate manifest to install AWX Operator 15 | ansible.builtin.template: 16 | src: templates/awx-operator/kustomization.j2 17 | dest: "{{ tmp_dir }}/kustomization.yml" 18 | 19 | - name: Generate manifest to install AWX Operator with timestamp attached, for purposes of operator deletion later 20 | ansible.builtin.template: 21 | src: templates/awx-operator/kustomization.j2 22 | dest: "{{ tmp_dir }}/kustomization.yml.{{ ansible_date_time.iso8601_basic_short }}" 23 | 24 | - name: Copy Operator Source 25 | ansible.builtin.copy: 26 | src: "{{ playbook_dir }}/../offline/awx-operator-{{ ANSIBLE_OPERATOR_VERSION }}/config" 27 | dest: "{{ tmp_dir }}/" 28 | remote_src: true 29 | when: 30 | - k8s_offline 31 | 32 | - name: Install AWX Operator with Kustomize 33 | kubernetes.core.k8s: 34 | definition: "{{ lookup('kubernetes.core.kustomize', dir=tmp_dir) }}" 35 | verify_ssl: false 36 | 37 | - name: delete Kustomization file 38 | ansible.builtin.file: 39 | path: "{{ tmp_dir }}/kustomization.yml" 40 | state: absent 41 | 42 | - name: Wait for Operator deployment to be ready 43 | kubernetes.core.k8s_info: 44 | kind: Deployment 45 | wait: yes 46 | name: awx-operator-controller-manager 47 | namespace: "{{ ASCENDER_NAMESPACE }}" 48 | wait_sleep: 10 49 | wait_timeout: 360 50 | verify_ssl: false 51 | 52 | - name: Generate manifest to install AWX ascender-app k8s object with timestamp attached, for purposes of deletion later 53 | ansible.builtin.template: 54 | src: "ascender-deployment/ascender-deployment-rke2.yml" 55 | dest: "{{ tmp_dir }}/ascender-deployment-rke2.yml.{{ ansible_date_time.iso8601_basic_short }}" 56 | 57 | - name: "Apply AWX ascender-app manifest for {{ k8s_platform }}" 58 | kubernetes.core.k8s: 59 | state: present 60 | definition: "{{ lookup('ansible.builtin.template', 'ascender-deployment/ascender-deployment-rke2.yml') }}" 61 | verify_ssl: false 62 | 63 | - name: Wait for ascender-app-web Deployment to complete setting up (this may take up to 10 minutes) 64 | kubernetes.core.k8s_info: 65 | kind: Deployment 66 | wait: yes 67 | name: ascender-app-web 68 | namespace: "{{ ASCENDER_NAMESPACE }}" 69 | wait_sleep: 10 70 | wait_timeout: 360 71 | verify_ssl: false 72 | register: ascender_web_deployment 73 | 74 | # - ansible.builtin.debug: 75 | # var: ascender_web_deployment 76 | 77 | - name: Set the Ascender URL 78 | ansible.builtin.set_fact: 79 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 80 | ascender_port: "{{ '443' if k8s_lb_protocol == 'https' else '80' }}" 81 | 82 | - ansible.builtin.debug: 83 | msg: "The Ascender API endpoint is {{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 84 | 85 | - name: Wait until Ascender API is Up (This may take between 10-20 mins) 86 | ansible.builtin.uri: 87 | url: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 88 | return_content: yes 89 | validate_certs: no 90 | status_code: 200 91 | until: 92 | - uri_output.status|int == 200 93 | - uri_output.url == k8s_lb_protocol + "://" + ascender_ip + ":" + ascender_port + "/api/v2/ping/" 94 | retries: 200 95 | delay: 10 96 | register: uri_output 97 | 98 | # - ansible.builtin.debug: 99 | # var: uri_output 100 | 101 | # - ansible.builtin.debug: 102 | # msg: "The Ascender API CURL returns HTTP Code {{ uri_output.status }}" 103 | 104 | 105 | - ansible.builtin.debug: 106 | msg: "Ascender API is up" 107 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/templates/ascender-deployment/additional-spec.yml: -------------------------------------------------------------------------------- 1 | {% if k8s_container_registry %} 2 | image: {{ k8s_container_registry | default("ghcr.io/ctrliq", true) ~ "/ascender" }} 3 | {% else %} 4 | image: {{ ASCENDER_IMAGE }} 5 | {% endif %} 6 | image_version: {{ ASCENDER_VERSION | default("25.0.0") }} 7 | {% if k8s_platform == "k3s" and k8s_offline | default (false) | bool %} 8 | image_pull_policy: Never 9 | {% else %} 10 | image_pull_policy: {{ ascender_image_pull_policy | default("Always") }} 11 | {% endif %} 12 | replicas: {{ ascender_replicas | default(1) }} 13 | init_container_image: {{ k8s_container_registry | default("ghcr.io/ctrliq", true) ~ "/ascender-ee" }} 14 | init_container_image_version: latest 15 | control_plane_ee_image: {{ k8s_container_registry | default("ghcr.io/ctrliq", true) ~ "/ascender-ee" }}:latest 16 | postgres_image: {{ k8s_container_registry | default("docker.io", true) ~ "/postgres" }} 17 | postgres_image_version: "13" 18 | redis_image: {{ k8s_container_registry | default("docker.io", true) ~ "/redis" }} 19 | redis_image_version: "7" 20 | {% if k8s_image_pull_secret is defined and k8s_image_pull_secret != 'None' %} 21 | image_pull_secret: {{ k8s_image_pull_secret }} 22 | {% endif %} 23 | {% if k8s_ee_pull_credentials_secret is defined and k8s_ee_pull_credentials_secret != 'None'%} 24 | ee_pull_credentials_secret: {{ k8s_ee_pull_credentials_secret }} 25 | {% endif %} 26 | redis_capabilities: 27 | - CHOWN 28 | - SETUID 29 | - SETGID 30 | {% if ee_images is defined %} 31 | ee_images: 32 | {% for ee in ee_images %} 33 | - name: {{ ee.name }} 34 | image: {{ ee.image}} 35 | {% endfor %} 36 | {% endif %} 37 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/templates/ascender-deployment/ascender-deployment-aks.yml: -------------------------------------------------------------------------------- 1 | # jinja2: keep_trailing_newline:True 2 | --- 3 | # Ascender admin password 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: ascender-app-admin-password 8 | namespace: {{ ASCENDER_NAMESPACE }} 9 | stringData: 10 | password: {{ ASCENDER_ADMIN_PASSWORD }} 11 | 12 | {% if ASCENDER_PGSQL_HOST is defined %} 13 | --- 14 | # Ascender postgres host/port and credentials 15 | apiVersion: v1 16 | kind: Secret 17 | metadata: 18 | name: ascender-app-postgres-configuration 19 | namespace: {{ ASCENDER_NAMESPACE }} 20 | stringData: 21 | host: {{ ASCENDER_PGSQL_HOST }} 22 | port: '{{ ASCENDER_PGSQL_PORT }}' 23 | database: {{ ASCENDER_PGSQL_DB }} 24 | username: {{ ASCENDER_PGSQL_USER }} 25 | password: {{ ASCENDER_PGSQL_PWD }} 26 | sslmode: prefer 27 | type: unmanaged 28 | type: Opaque 29 | {% endif %} 30 | {% if custom_cacert_bundle is defined %} 31 | --- 32 | # Ascender Custom CA Bundle 33 | apiVersion: v1 34 | kind: Secret 35 | metadata: 36 | name: ascender-app-custom-cert-bundle 37 | namespace: {{ ASCENDER_NAMESPACE }} 38 | data: 39 | bundle-ca.crt: {{ lookup('ansible.builtin.file', custom_cacert_bundle) | b64encode }} 40 | {% endif %} 41 | {% if custom_ldap_cacert is defined %} 42 | --- 43 | # Ascender Custom LDAP CA Cert 44 | apiVersion: v1 45 | kind: Secret 46 | metadata: 47 | name: ascender-app-custom-ldap-cacert 48 | namespace: {{ ASCENDER_NAMESPACE }} 49 | data: 50 | ldap-ca.crt: {{ lookup('ansible.builtin.file', custom_ldap_cacert) | b64encode }} 51 | {% endif %} 52 | {% if k8s_lb_protocol == 'https' %} 53 | --- 54 | # Ascender TLS Certificate and Key 55 | apiVersion: v1 56 | data: 57 | tls.crt: {{ lookup('ansible.builtin.file', tls_crt_path) | b64encode }} 58 | tls.key: {{ lookup('ansible.builtin.file', tls_key_path) | b64encode }} 59 | kind: Secret 60 | metadata: 61 | name: ascender-tls-secret 62 | namespace: {{ ASCENDER_NAMESPACE }} 63 | type: kubernetes.io/tls 64 | {% endif %} 65 | --- 66 | apiVersion: awx.ansible.com/v1beta1 67 | kind: AWX 68 | metadata: 69 | name: ascender-app 70 | namespace: {{ ASCENDER_NAMESPACE }} 71 | spec: 72 | {% include 'additional-spec.yml' %} 73 | admin_user: {{ ASCENDER_ADMIN_USER | default('admin')}} 74 | admin_password_secret: ascender-app-admin-password 75 | garbage_collect_secrets: {{ ascender_garbage_collect_secrets | default('false') }} 76 | {% if ASCENDER_PGSQL_HOST is defined %} 77 | postgres_configuration_secret: ascender-app-postgres-configuration 78 | {% endif %} 79 | {% if custom_cacert_bundle is defined %} 80 | bundle_cacert_secret: ascender-app-custom-cert-bundle 81 | {% endif %} 82 | {% if custom_ldap_cacert is defined %} 83 | ldap_cacert_secret: ascender-app-custom-ldap-cacert 84 | {% endif %} 85 | service_type: ClusterIP 86 | {% if k8s_lb_protocol == 'https' %} 87 | ingress_annotations: | 88 | nginx.ingress.kubernetes.io/rewrite-target: / 89 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 90 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 91 | {% endif %} 92 | ingress_type: ingress 93 | ingress_controller: nginx 94 | ingress_class_name: nginx 95 | ingress_path: "/" 96 | ingress_path_type: Prefix 97 | ingress_hosts: 98 | - hostname: {{ ASCENDER_HOSTNAME }} 99 | {% if k8s_lb_protocol == 'https' %} 100 | tls_secret: ascender-tls-secret 101 | {% endif %} 102 | extra_settings: 103 | - setting: CSRF_TRUSTED_ORIGINS 104 | value: 105 | - http{% if k8s_lb_protocol == 'https' %}s{% endif %}://{{ ASCENDER_HOSTNAME }} -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/templates/ascender-deployment/ascender-deployment-dkp.yml: -------------------------------------------------------------------------------- 1 | #jinja2: keep_trailing_newline:True 2 | --- 3 | # Ascender admin password 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: ascender-app-admin-password 8 | namespace: {{ ASCENDER_NAMESPACE }} 9 | stringData: 10 | password: {{ ASCENDER_ADMIN_PASSWORD }} 11 | {% if ASCENDER_PGSQL_HOST is defined %} 12 | --- 13 | # Ascender postgres host/port and credentials 14 | apiVersion: v1 15 | kind: Secret 16 | metadata: 17 | name: ascender-app-postgres-configuration 18 | namespace: {{ ASCENDER_NAMESPACE }} 19 | stringData: 20 | host: {{ ASCENDER_PGSQL_HOST }} 21 | port: '{{ ASCENDER_PGSQL_PORT }}' 22 | database: {{ ASCENDER_PGSQL_DB }} 23 | username: {{ ASCENDER_PGSQL_USER }} 24 | password: {{ ASCENDER_PGSQL_PWD }} 25 | sslmode: prefer 26 | type: unmanaged 27 | type: Opaque 28 | {% endif %} 29 | {% if custom_cacert_bundle is defined %} 30 | --- 31 | # Ascender Custom CA Bundle 32 | apiVersion: v1 33 | kind: Secret 34 | metadata: 35 | name: ascender-app-custom-cert-bundle 36 | namespace: {{ ASCENDER_NAMESPACE }} 37 | data: 38 | bundle-ca.crt: {{ lookup('ansible.builtin.file', custom_cacert_bundle) | b64encode }} 39 | {% endif %} 40 | {% if custom_ldap_cacert is defined %} 41 | --- 42 | # Ascender Custom LDAP CA Cert 43 | apiVersion: v1 44 | kind: Secret 45 | metadata: 46 | name: ascender-app-custom-ldap-cacert 47 | namespace: {{ ASCENDER_NAMESPACE }} 48 | data: 49 | ldap-ca.crt: {{ lookup('ansible.builtin.file', custom_ldap_cacert) | b64encode }} 50 | {% endif %} 51 | {% if k8s_lb_protocol == 'https' %} 52 | --- 53 | # Ascender TLS Certificate and Key 54 | apiVersion: v1 55 | data: 56 | tls.crt: {{ lookup('ansible.builtin.file', tls_crt_path) | b64encode }} 57 | tls.key: {{ lookup('ansible.builtin.file', tls_key_path) | b64encode }} 58 | kind: Secret 59 | metadata: 60 | name: ascender-tls-secret 61 | namespace: {{ ASCENDER_NAMESPACE }} 62 | type: kubernetes.io/tls 63 | {% endif %} 64 | --- 65 | # Ascender AWX Object 66 | apiVersion: awx.ansible.com/v1beta1 67 | kind: AWX 68 | metadata: 69 | name: ascender-app 70 | namespace: {{ ASCENDER_NAMESPACE }} 71 | spec: 72 | {% include 'additional-spec.yml' %} 73 | service_type: ClusterIP 74 | garbage_collect_secrets: {{ ascender_garbage_collect_secrets | default('false') }} 75 | admin_user: {{ ASCENDER_ADMIN_USER | default('admin')}} 76 | admin_password_secret: ascender-app-admin-password 77 | {% if ASCENDER_PGSQL_HOST is defined %} 78 | postgres_configuration_secret: ascender-app-postgres-configuration 79 | {% endif %} 80 | {% if custom_cacert_bundle is defined %} 81 | bundle_cacert_secret: ascender-app-custom-cert-bundle 82 | {% endif %} 83 | {% if custom_ldap_cacert is defined %} 84 | ldap_cacert_secret: ascender-app-custom-ldap-cacert 85 | {% endif %} 86 | ingress_type: ingress 87 | ingress_path: "/" 88 | ingress_path_type: Prefix 89 | {% if k8s_lb_protocol == 'https' %} 90 | ingress_tls_secret: ascender-tls-secret 91 | {% endif %} 92 | hostname: {{ ASCENDER_HOSTNAME }} 93 | extra_settings: 94 | - setting: CSRF_TRUSTED_ORIGINS 95 | value: 96 | - http{% if k8s_lb_protocol == 'https' %}s{% endif %}://{{ ASCENDER_HOSTNAME }} -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/templates/ascender-deployment/ascender-deployment-eks.yml: -------------------------------------------------------------------------------- 1 | # jinja2: keep_trailing_newline:True 2 | --- 3 | # Ascender admin password 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: ascender-app-admin-password 8 | namespace: {{ ASCENDER_NAMESPACE }} 9 | stringData: 10 | password: {{ ASCENDER_ADMIN_PASSWORD }} 11 | 12 | {% if ASCENDER_PGSQL_HOST is defined %} 13 | --- 14 | # Ascender postgres host/port and credentials 15 | apiVersion: v1 16 | kind: Secret 17 | metadata: 18 | name: ascender-app-postgres-configuration 19 | namespace: {{ ASCENDER_NAMESPACE }} 20 | stringData: 21 | host: {{ ASCENDER_PGSQL_HOST }} 22 | port: '{{ ASCENDER_PGSQL_PORT }}' 23 | database: {{ ASCENDER_PGSQL_DB }} 24 | username: {{ ASCENDER_PGSQL_USER }} 25 | password: {{ ASCENDER_PGSQL_PWD }} 26 | sslmode: prefer 27 | type: unmanaged 28 | type: Opaque 29 | {% endif %} 30 | {% if custom_cacert_bundle is defined %} 31 | --- 32 | # Ascender Custom CA Bundle 33 | apiVersion: v1 34 | kind: Secret 35 | metadata: 36 | name: ascender-app-custom-cert-bundle 37 | namespace: {{ ASCENDER_NAMESPACE }} 38 | data: 39 | bundle-ca.crt: {{ lookup('ansible.builtin.file', custom_cacert_bundle) | b64encode }} 40 | {% endif %} 41 | {% if custom_ldap_cacert is defined %} 42 | --- 43 | # Ascender Custom LDAP CA Cert 44 | apiVersion: v1 45 | kind: Secret 46 | metadata: 47 | name: ascender-app-custom-ldap-cacert 48 | namespace: {{ ASCENDER_NAMESPACE }} 49 | data: 50 | ldap-ca.crt: {{ lookup('ansible.builtin.file', custom_ldap_cacert) | b64encode }} 51 | {% endif %} 52 | {% if k8s_lb_protocol == 'https' %} 53 | --- 54 | # Ascender TLS Certificate and Key 55 | apiVersion: v1 56 | data: 57 | tls.crt: {{ lookup('ansible.builtin.file', tls_crt_path) | b64encode }} 58 | tls.key: {{ lookup('ansible.builtin.file', tls_key_path) | b64encode }} 59 | kind: Secret 60 | metadata: 61 | name: ascender-tls-secret 62 | namespace: {{ ASCENDER_NAMESPACE }} 63 | type: kubernetes.io/tls 64 | {% endif %} 65 | --- 66 | apiVersion: awx.ansible.com/v1beta1 67 | kind: AWX 68 | metadata: 69 | name: ascender-app 70 | namespace: {{ ASCENDER_NAMESPACE }} 71 | spec: 72 | {% include 'additional-spec.yml' %} 73 | admin_user: {{ ASCENDER_ADMIN_USER | default('admin')}} 74 | admin_password_secret: ascender-app-admin-password 75 | garbage_collect_secrets: {{ ascender_garbage_collect_secrets | default('false') }} 76 | {% if ASCENDER_PGSQL_HOST is defined %} 77 | postgres_configuration_secret: ascender-app-postgres-configuration 78 | {% endif %} 79 | {% if custom_cacert_bundle is defined %} 80 | bundle_cacert_secret: ascender-app-custom-cert-bundle 81 | {% endif %} 82 | {% if custom_ldap_cacert is defined %} 83 | ldap_cacert_secret: ascender-app-custom-ldap-cacert 84 | {% endif %} 85 | service_type: ClusterIP 86 | {% if k8s_lb_protocol == 'https' %} 87 | ingress_annotations: | 88 | nginx.ingress.kubernetes.io/rewrite-target: / 89 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 90 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 91 | {% endif %} 92 | ingress_type: ingress 93 | ingress_controller: nginx 94 | ingress_class_name: nginx 95 | ingress_path: "/" 96 | ingress_path_type: Prefix 97 | ingress_hosts: 98 | - hostname: {{ ASCENDER_HOSTNAME }} 99 | {% if k8s_lb_protocol == 'https' %} 100 | tls_secret: ascender-tls-secret 101 | {% endif %} 102 | extra_settings: 103 | - setting: CSRF_TRUSTED_ORIGINS 104 | value: 105 | - http{% if k8s_lb_protocol == 'https' %}s{% endif %}://{{ ASCENDER_HOSTNAME }} -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/templates/ascender-deployment/ascender-deployment-gke.yml: -------------------------------------------------------------------------------- 1 | # jinja2: keep_trailing_newline:True 2 | --- 3 | # Ascender admin password 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: ascender-app-admin-password 8 | namespace: {{ ASCENDER_NAMESPACE }} 9 | stringData: 10 | password: {{ ASCENDER_ADMIN_PASSWORD }} 11 | 12 | {% if ASCENDER_PGSQL_HOST is defined %} 13 | --- 14 | # Ascender postgres host/port and credentials 15 | apiVersion: v1 16 | kind: Secret 17 | metadata: 18 | name: ascender-app-postgres-configuration 19 | namespace: {{ ASCENDER_NAMESPACE }} 20 | stringData: 21 | host: {{ ASCENDER_PGSQL_HOST }} 22 | port: '{{ ASCENDER_PGSQL_PORT }}' 23 | database: {{ ASCENDER_PGSQL_DB }} 24 | username: {{ ASCENDER_PGSQL_USER }} 25 | password: {{ ASCENDER_PGSQL_PWD }} 26 | sslmode: prefer 27 | type: unmanaged 28 | type: Opaque 29 | {% endif %} 30 | {% if custom_cacert_bundle is defined %} 31 | --- 32 | # Ascender Custom CA Bundle 33 | apiVersion: v1 34 | kind: Secret 35 | metadata: 36 | name: ascender-app-custom-cert-bundle 37 | namespace: {{ ASCENDER_NAMESPACE }} 38 | data: 39 | bundle-ca.crt: {{ lookup('ansible.builtin.file', custom_cacert_bundle) | b64encode }} 40 | {% endif %} 41 | {% if custom_ldap_cacert is defined %} 42 | --- 43 | # Ascender Custom LDAP CA Cert 44 | apiVersion: v1 45 | kind: Secret 46 | metadata: 47 | name: ascender-app-custom-ldap-cacert 48 | namespace: {{ ASCENDER_NAMESPACE }} 49 | data: 50 | ldap-ca.crt: {{ lookup('ansible.builtin.file', custom_ldap_cacert) | b64encode }} 51 | {% endif %} 52 | {% if k8s_lb_protocol == 'https' %} 53 | --- 54 | # Ascender TLS Certificate and Key 55 | apiVersion: v1 56 | data: 57 | tls.crt: {{ lookup('ansible.builtin.file', tls_crt_path) | b64encode }} 58 | tls.key: {{ lookup('ansible.builtin.file', tls_key_path) | b64encode }} 59 | kind: Secret 60 | metadata: 61 | name: ascender-tls-secret 62 | namespace: {{ ASCENDER_NAMESPACE }} 63 | type: kubernetes.io/tls 64 | {% endif %} 65 | --- 66 | apiVersion: awx.ansible.com/v1beta1 67 | kind: AWX 68 | metadata: 69 | name: ascender-app 70 | namespace: {{ ASCENDER_NAMESPACE }} 71 | spec: 72 | {% include 'additional-spec.yml' %} 73 | admin_user: {{ ASCENDER_ADMIN_USER | default('admin')}} 74 | admin_password_secret: ascender-app-admin-password 75 | garbage_collect_secrets: {{ ascender_garbage_collect_secrets | default('false') }} 76 | {% if ASCENDER_PGSQL_HOST is defined %} 77 | postgres_configuration_secret: ascender-app-postgres-configuration 78 | {% endif %} 79 | {% if custom_cacert_bundle is defined %} 80 | bundle_cacert_secret: ascender-app-custom-cert-bundle 81 | {% endif %} 82 | {% if custom_ldap_cacert is defined %} 83 | ldap_cacert_secret: ascender-app-custom-ldap-cacert 84 | {% endif %} 85 | service_type: ClusterIP 86 | {% if k8s_lb_protocol == 'https' %} 87 | ingress_annotations: | 88 | nginx.ingress.kubernetes.io/rewrite-target: / 89 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 90 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 91 | {% endif %} 92 | ingress_type: ingress 93 | ingress_controller: nginx 94 | ingress_class_name: nginx 95 | ingress_path: "/" 96 | ingress_path_type: Prefix 97 | ingress_hosts: 98 | - hostname: {{ ASCENDER_HOSTNAME }} 99 | {% if k8s_lb_protocol == 'https' %} 100 | tls_secret: ascender-tls-secret 101 | {% endif %} 102 | extra_settings: 103 | - setting: CSRF_TRUSTED_ORIGINS 104 | value: 105 | - http{% if k8s_lb_protocol == 'https' %}s{% endif %}://{{ ASCENDER_HOSTNAME }} -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/templates/ascender-deployment/ascender-deployment-k3s.yml: -------------------------------------------------------------------------------- 1 | #jinja2: keep_trailing_newline:True 2 | --- 3 | # Ascender admin password 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: ascender-app-admin-password 8 | namespace: {{ ASCENDER_NAMESPACE }} 9 | stringData: 10 | password: {{ ASCENDER_ADMIN_PASSWORD }} 11 | {% if ASCENDER_PGSQL_HOST is defined %} 12 | --- 13 | # Ascender postgres host/port and credentials 14 | apiVersion: v1 15 | kind: Secret 16 | metadata: 17 | name: ascender-app-postgres-configuration 18 | namespace: {{ ASCENDER_NAMESPACE }} 19 | stringData: 20 | host: {{ ASCENDER_PGSQL_HOST }} 21 | port: '{{ ASCENDER_PGSQL_PORT }}' 22 | database: {{ ASCENDER_PGSQL_DB }} 23 | username: {{ ASCENDER_PGSQL_USER }} 24 | password: {{ ASCENDER_PGSQL_PWD }} 25 | sslmode: prefer 26 | type: unmanaged 27 | type: Opaque 28 | {% endif %} 29 | {% if custom_cacert_bundle is defined %} 30 | --- 31 | # Ascender Custom CA Bundle 32 | apiVersion: v1 33 | kind: Secret 34 | metadata: 35 | name: ascender-app-custom-cert-bundle 36 | namespace: {{ ASCENDER_NAMESPACE }} 37 | data: 38 | bundle-ca.crt: {{ lookup('ansible.builtin.file', custom_cacert_bundle) | b64encode }} 39 | {% endif %} 40 | {% if custom_ldap_cacert is defined %} 41 | --- 42 | # Ascender Custom LDAP CA Cert 43 | apiVersion: v1 44 | kind: Secret 45 | metadata: 46 | name: ascender-app-custom-ldap-cacert 47 | namespace: {{ ASCENDER_NAMESPACE }} 48 | data: 49 | ldap-ca.crt: {{ lookup('ansible.builtin.file', custom_ldap_cacert) | b64encode }} 50 | {% endif %} 51 | {% if k8s_lb_protocol == 'https' %} 52 | --- 53 | # Ascender TLS Certificate and Key 54 | apiVersion: v1 55 | data: 56 | tls.crt: {{ lookup('ansible.builtin.file', tls_crt_path) | b64encode }} 57 | tls.key: {{ lookup('ansible.builtin.file', tls_key_path) | b64encode }} 58 | kind: Secret 59 | metadata: 60 | name: ascender-tls-secret 61 | namespace: {{ ASCENDER_NAMESPACE }} 62 | type: kubernetes.io/tls 63 | {% endif %} 64 | --- 65 | # Ascender AWX Object 66 | apiVersion: awx.ansible.com/v1beta1 67 | kind: AWX 68 | metadata: 69 | name: ascender-app 70 | namespace: {{ ASCENDER_NAMESPACE }} 71 | spec: 72 | {% include 'additional-spec.yml' %} 73 | {% if k3s_service_type == "NodePort" %} 74 | service_type: nodeport 75 | nodeport_port: 30080 76 | {% else %} 77 | service_type: ClusterIP 78 | ingress_type: ingress 79 | ingress_path: "/" 80 | ingress_path_type: Prefix 81 | {% if k8s_lb_protocol == 'https' %} 82 | ingress_tls_secret: ascender-tls-secret 83 | {% endif %} 84 | hostname: {{ ASCENDER_HOSTNAME }} 85 | {% endif %} 86 | garbage_collect_secrets: {{ ascender_garbage_collect_secrets | default('false') }} 87 | admin_user: {{ ASCENDER_ADMIN_USER | default('admin')}} 88 | admin_password_secret: ascender-app-admin-password 89 | {% if ASCENDER_PGSQL_HOST is defined %} 90 | postgres_configuration_secret: ascender-app-postgres-configuration 91 | {% endif %} 92 | {% if custom_cacert_bundle is defined %} 93 | bundle_cacert_secret: ascender-app-custom-cert-bundle 94 | {% endif %} 95 | {% if custom_ldap_cacert is defined %} 96 | ldap_cacert_secret: ascender-app-custom-ldap-cacert 97 | {% endif %} 98 | extra_settings: 99 | - setting: CSRF_TRUSTED_ORIGINS 100 | value: 101 | - http://{{ ASCENDER_HOSTNAME }} 102 | - https://{{ ASCENDER_HOSTNAME }} 103 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/templates/ascender-deployment/ascender-deployment-rke2.yml: -------------------------------------------------------------------------------- 1 | #jinja2: keep_trailing_newline:True 2 | --- 3 | # Ascender admin password 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: ascender-app-admin-password 8 | namespace: {{ ASCENDER_NAMESPACE }} 9 | stringData: 10 | password: {{ ASCENDER_ADMIN_PASSWORD }} 11 | {% if ASCENDER_PGSQL_HOST is defined %} 12 | --- 13 | # Ascender postgres host/port and credentials 14 | apiVersion: v1 15 | kind: Secret 16 | metadata: 17 | name: ascender-app-postgres-configuration 18 | namespace: {{ ASCENDER_NAMESPACE }} 19 | stringData: 20 | host: {{ ASCENDER_PGSQL_HOST }} 21 | port: '{{ ASCENDER_PGSQL_PORT }}' 22 | database: {{ ASCENDER_PGSQL_DB }} 23 | username: {{ ASCENDER_PGSQL_USER }} 24 | password: {{ ASCENDER_PGSQL_PWD }} 25 | sslmode: prefer 26 | type: unmanaged 27 | type: Opaque 28 | {% endif %} 29 | {% if custom_cacert_bundle is defined %} 30 | --- 31 | # Ascender Custom CA Bundle 32 | apiVersion: v1 33 | kind: Secret 34 | metadata: 35 | name: ascender-app-custom-cert-bundle 36 | namespace: {{ ASCENDER_NAMESPACE }} 37 | data: 38 | bundle-ca.crt: {{ lookup('ansible.builtin.file', custom_cacert_bundle) | b64encode }} 39 | {% endif %} 40 | {% if custom_ldap_cacert is defined %} 41 | --- 42 | # Ascender Custom LDAP CA Cert 43 | apiVersion: v1 44 | kind: Secret 45 | metadata: 46 | name: ascender-app-custom-ldap-cacert 47 | namespace: {{ ASCENDER_NAMESPACE }} 48 | data: 49 | ldap-ca.crt: {{ lookup('ansible.builtin.file', custom_ldap_cacert) | b64encode }} 50 | {% endif %} 51 | {% if k8s_lb_protocol == 'https' %} 52 | --- 53 | # Ascender TLS Certificate and Key 54 | apiVersion: v1 55 | data: 56 | tls.crt: {{ lookup('ansible.builtin.file', tls_crt_path) | b64encode }} 57 | tls.key: {{ lookup('ansible.builtin.file', tls_key_path) | b64encode }} 58 | kind: Secret 59 | metadata: 60 | name: ascender-tls-secret 61 | namespace: {{ ASCENDER_NAMESPACE }} 62 | type: kubernetes.io/tls 63 | {% endif %} 64 | --- 65 | # Ascender AWX Object 66 | apiVersion: awx.ansible.com/v1beta1 67 | kind: AWX 68 | metadata: 69 | name: ascender-app 70 | namespace: {{ ASCENDER_NAMESPACE }} 71 | spec: 72 | {% include 'additional-spec.yml' %} 73 | garbage_collect_secrets: {{ ascender_garbage_collect_secrets | default('false') }} 74 | admin_user: {{ ASCENDER_ADMIN_USER | default('admin')}} 75 | admin_password_secret: ascender-app-admin-password 76 | {% if ASCENDER_PGSQL_HOST is defined %} 77 | postgres_configuration_secret: ascender-app-postgres-configuration 78 | {% endif %} 79 | {% if custom_cacert_bundle is defined %} 80 | bundle_cacert_secret: ascender-app-custom-cert-bundle 81 | {% endif %} 82 | {% if custom_ldap_cacert is defined %} 83 | ldap_cacert_secret: ascender-app-custom-ldap-cacert 84 | {% endif %} 85 | ingress_type: ingress 86 | ingress_path: "/" 87 | ingress_path_type: Prefix 88 | {% if k8s_lb_protocol == 'https' %} 89 | ingress_tls_secret: ascender-tls-secret 90 | {% endif %} 91 | hostname: {{ ASCENDER_HOSTNAME }} 92 | extra_settings: 93 | - setting: CSRF_TRUSTED_ORIGINS 94 | value: 95 | - http{% if k8s_lb_protocol == 'https' %}s{% endif %}://{{ ASCENDER_HOSTNAME }} -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/templates/awx-operator/kustomization.j2: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | # Find the latest tag here: https://github.com/ansible/awx-operator/releases 5 | # - github.com/ansible/awx-operator/config/default?ref= 6 | {% if k8s_offline | default (false) | bool %} 7 | - ./config/default 8 | {% else %} 9 | - github.com/ansible/awx-operator/config/default?ref={{ ANSIBLE_OPERATOR_VERSION }} 10 | {% endif %} 11 | 12 | # Set the image tags to match the git version from above 13 | images: 14 | - name: {{ k8s_container_registry | default("quay.io/ansible", true) ~ "/awx-operator" }} 15 | newTag: {{ ANSIBLE_OPERATOR_VERSION }} 16 | 17 | # Specify a custom namespace in which to install AWX 18 | namespace: {{ ASCENDER_NAMESPACE }} 19 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/templates/storage-classes/aks-sc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: aks-standard 5 | provisioner: kubernetes.io/azure-disk 6 | parameters: 7 | storageaccounttype: Standard_LRS 8 | kind: managed -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/templates/storage-classes/eks-sc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: eks-standard 5 | provisioner: kubernetes.io/aws-ebs 6 | parameters: 7 | type: gp2 8 | fsType: ext4 -------------------------------------------------------------------------------- /playbooks/roles/ascender_install/templates/storage-classes/gke-sc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: gke-standard 5 | provisioner: kubernetes.io/gce-pd 6 | parameters: 7 | type: pd-standard 8 | fstype: ext4 9 | replication-type: none -------------------------------------------------------------------------------- /playbooks/roles/ascender_react/tasks/react_install_aks.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - name: Create Namespace 7 | kubernetes.core.k8s: 8 | name: "{{ REACT_NAMESPACE }}" 9 | api_version: v1 10 | kind: Namespace 11 | verify_ssl: false 12 | state: present 13 | 14 | - name: Ensure React directory exists 15 | ansible.builtin.file: 16 | path: "{{ tmp_dir }}/react/" 17 | state: directory 18 | 19 | - name: Generate manifest to install EDA Operator 20 | ansible.builtin.template: 21 | src: templates/eda-operator/kustomization.j2 22 | dest: "{{ tmp_dir }}/react/kustomization.yml" 23 | 24 | - name: Generate manifest to install EDA Operator with timestamp attached, for purposes of operator deletion later 25 | ansible.builtin.template: 26 | src: templates/eda-operator/kustomization.j2 27 | dest: "{{ tmp_dir }}/react/kustomization.yml.{{ ansible_date_time.iso8601_basic_short }}" 28 | 29 | 30 | 31 | # Import EDA Operator images here 32 | 33 | 34 | - name: Install EDA Operator with Kustomize 35 | kubernetes.core.k8s: 36 | definition: "{{ lookup('kubernetes.core.kustomize', dir=tmp_dir ~'/react/') }}" 37 | verify_ssl: false 38 | 39 | - name: delete Kustomization file 40 | ansible.builtin.file: 41 | path: "{{ tmp_dir }}/react/kustomization.yml" 42 | state: absent 43 | 44 | # Import React images here 45 | 46 | - name: Wait for Operator deployment to be ready 47 | kubernetes.core.k8s_info: 48 | kind: Deployment 49 | wait: yes 50 | name: eda-server-operator-controller-manager 51 | namespace: "{{ REACT_NAMESPACE }}" 52 | wait_sleep: 10 53 | wait_timeout: 360 54 | verify_ssl: false 55 | 56 | - name: Generate manifest to install AWX ascender-app k8s object with timestamp attached, for purposes of deletion later 57 | ansible.builtin.template: 58 | src: "react-deployment/react-deployment-aks.yml" 59 | dest: "{{ tmp_dir }}/react-deployment-aks.yml.{{ ansible_date_time.iso8601_basic_short }}" 60 | 61 | - name: "Apply React manifest for {{ k8s_platform }}" 62 | kubernetes.core.k8s: 63 | state: present 64 | definition: "{{ lookup('ansible.builtin.template', 'react-deployment/react-deployment-aks.yml') }}" 65 | verify_ssl: false 66 | 67 | - name: Wait for ascender-react-scheduler Deployment to complete setting up (this may take up to 10 minutes) 68 | kubernetes.core.k8s_info: 69 | kind: Deployment 70 | wait: yes 71 | name: ascender-react-scheduler 72 | namespace: "{{ REACT_NAMESPACE }}" 73 | wait_sleep: 10 74 | wait_timeout: 360 75 | verify_ssl: false 76 | register: react_deployment 77 | 78 | - name: Set the Ascender React URL 79 | ansible.builtin.set_fact: 80 | react_ip: "{{ REACT_HOSTNAME }}" 81 | react_port: "{{ '443' if k8s_lb_protocol == 'https' else '80' }}" 82 | 83 | - ansible.builtin.debug: 84 | msg: "The Ascender React API endpoint is {{ k8s_lb_protocol }}://{{ react_ip }}:{{ react_port }}/api/v2/ping/" 85 | 86 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_react/tasks/react_install_eks.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - name: Create Namespace 7 | kubernetes.core.k8s: 8 | name: "{{ REACT_NAMESPACE }}" 9 | api_version: v1 10 | kind: Namespace 11 | verify_ssl: false 12 | state: present 13 | 14 | - name: Ensure React directory exists 15 | ansible.builtin.file: 16 | path: "{{ tmp_dir }}/react/" 17 | state: directory 18 | 19 | - name: Generate manifest to install EDA Operator 20 | ansible.builtin.template: 21 | src: templates/eda-operator/kustomization.j2 22 | dest: "{{ tmp_dir }}/react/kustomization.yml" 23 | 24 | - name: Generate manifest to install EDA Operator with timestamp attached, for purposes of operator deletion later 25 | ansible.builtin.template: 26 | src: templates/eda-operator/kustomization.j2 27 | dest: "{{ tmp_dir }}/react/kustomization.yml.{{ ansible_date_time.iso8601_basic_short }}" 28 | 29 | 30 | 31 | # Import EDA Operator images here 32 | 33 | 34 | - name: Install EDA Operator with Kustomize 35 | kubernetes.core.k8s: 36 | definition: "{{ lookup('kubernetes.core.kustomize', dir=tmp_dir ~'/react/') }}" 37 | verify_ssl: false 38 | 39 | - name: delete Kustomization file 40 | ansible.builtin.file: 41 | path: "{{ tmp_dir }}/react/kustomization.yml" 42 | state: absent 43 | 44 | # Import React images here 45 | 46 | - name: Wait for Operator deployment to be ready 47 | kubernetes.core.k8s_info: 48 | kind: Deployment 49 | wait: yes 50 | name: eda-server-operator-controller-manager 51 | namespace: "{{ REACT_NAMESPACE }}" 52 | wait_sleep: 10 53 | wait_timeout: 360 54 | verify_ssl: false 55 | 56 | - name: Generate manifest to install AWX ascender-app k8s object with timestamp attached, for purposes of deletion later 57 | ansible.builtin.template: 58 | src: "react-deployment/react-deployment-eks.yml" 59 | dest: "{{ tmp_dir }}/react-deployment-eks.yml.{{ ansible_date_time.iso8601_basic_short }}" 60 | 61 | - name: "Apply React manifest for {{ k8s_platform }}" 62 | kubernetes.core.k8s: 63 | state: present 64 | definition: "{{ lookup('ansible.builtin.template', 'react-deployment/react-deployment-eks.yml') }}" 65 | verify_ssl: false 66 | 67 | - name: Wait for ascender-react-scheduler Deployment to complete setting up (this may take up to 10 minutes) 68 | kubernetes.core.k8s_info: 69 | kind: Deployment 70 | wait: yes 71 | name: ascender-react-scheduler 72 | namespace: "{{ REACT_NAMESPACE }}" 73 | wait_sleep: 10 74 | wait_timeout: 360 75 | verify_ssl: false 76 | register: react_deployment 77 | 78 | - name: Set the Ascender React URL 79 | ansible.builtin.set_fact: 80 | react_ip: "{{ REACT_HOSTNAME }}" 81 | react_port: "{{ '443' if k8s_lb_protocol == 'https' else '80' }}" 82 | 83 | - ansible.builtin.debug: 84 | msg: "The Ascender React API endpoint is {{ k8s_lb_protocol }}://{{ react_ip }}:{{ react_port }}/api/v2/ping/" 85 | 86 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_react/tasks/react_install_gke.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - name: Create Namespace 7 | kubernetes.core.k8s: 8 | name: "{{ REACT_NAMESPACE }}" 9 | api_version: v1 10 | kind: Namespace 11 | verify_ssl: false 12 | state: present 13 | 14 | - name: Ensure React directory exists 15 | ansible.builtin.file: 16 | path: "{{ tmp_dir }}/react/" 17 | state: directory 18 | 19 | - name: Generate manifest to install EDA Operator 20 | ansible.builtin.template: 21 | src: templates/eda-operator/kustomization.j2 22 | dest: "{{ tmp_dir }}/react/kustomization.yml" 23 | 24 | - name: Generate manifest to install EDA Operator with timestamp attached, for purposes of operator deletion later 25 | ansible.builtin.template: 26 | src: templates/eda-operator/kustomization.j2 27 | dest: "{{ tmp_dir }}/react/kustomization.yml.{{ ansible_date_time.iso8601_basic_short }}" 28 | 29 | 30 | 31 | # Import EDA Operator images here 32 | 33 | 34 | - name: Install EDA Operator with Kustomize 35 | kubernetes.core.k8s: 36 | definition: "{{ lookup('kubernetes.core.kustomize', dir=tmp_dir ~'/react/') }}" 37 | verify_ssl: false 38 | 39 | - name: delete Kustomization file 40 | ansible.builtin.file: 41 | path: "{{ tmp_dir }}/react/kustomization.yml" 42 | state: absent 43 | 44 | # Import React images here 45 | 46 | - name: Wait for Operator deployment to be ready 47 | kubernetes.core.k8s_info: 48 | kind: Deployment 49 | wait: yes 50 | name: eda-server-operator-controller-manager 51 | namespace: "{{ REACT_NAMESPACE }}" 52 | wait_sleep: 10 53 | wait_timeout: 360 54 | verify_ssl: false 55 | 56 | - name: Generate manifest to install AWX ascender-app k8s object with timestamp attached, for purposes of deletion later 57 | ansible.builtin.template: 58 | src: "react-deployment/react-deployment-gke.yml" 59 | dest: "{{ tmp_dir }}/react-deployment-gke.yml.{{ ansible_date_time.iso8601_basic_short }}" 60 | 61 | - name: "Apply React manifest for {{ k8s_platform }}" 62 | kubernetes.core.k8s: 63 | state: present 64 | definition: "{{ lookup('ansible.builtin.template', 'react-deployment/react-deployment-gke.yml') }}" 65 | verify_ssl: false 66 | 67 | - name: Wait for ascender-react-scheduler Deployment to complete setting up (this may take up to 10 minutes) 68 | kubernetes.core.k8s_info: 69 | kind: Deployment 70 | wait: yes 71 | name: ascender-react-scheduler 72 | namespace: "{{ REACT_NAMESPACE }}" 73 | wait_sleep: 10 74 | wait_timeout: 360 75 | verify_ssl: false 76 | register: react_deployment 77 | 78 | - name: Set the Ascender React URL 79 | ansible.builtin.set_fact: 80 | react_ip: "{{ REACT_HOSTNAME }}" 81 | react_port: "{{ '443' if k8s_lb_protocol == 'https' else '80' }}" 82 | 83 | - ansible.builtin.debug: 84 | msg: "The Ascender React API endpoint is {{ k8s_lb_protocol }}://{{ react_ip }}:{{ react_port }}/api/v2/ping/" 85 | 86 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_react/tasks/react_install_k3s.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - name: Create Namespace 7 | kubernetes.core.k8s: 8 | name: "{{ REACT_NAMESPACE }}" 9 | api_version: v1 10 | kind: Namespace 11 | verify_ssl: false 12 | state: present 13 | 14 | - name: Ensure React directory exists 15 | ansible.builtin.file: 16 | path: "{{ tmp_dir }}/react/" 17 | state: directory 18 | 19 | - name: Generate manifest to install EDA Operator 20 | ansible.builtin.template: 21 | src: templates/eda-operator/kustomization.j2 22 | dest: "{{ tmp_dir }}/react/kustomization.yml" 23 | 24 | - name: Generate manifest to install EDA Operator with timestamp attached, for purposes of operator deletion later 25 | ansible.builtin.template: 26 | src: templates/eda-operator/kustomization.j2 27 | dest: "{{ tmp_dir }}/react/kustomization.yml.{{ ansible_date_time.iso8601_basic_short }}" 28 | 29 | 30 | 31 | # Import EDA Operator images here 32 | 33 | 34 | - name: Install EDA Operator with Kustomize 35 | kubernetes.core.k8s: 36 | definition: "{{ lookup('kubernetes.core.kustomize', dir=tmp_dir ~'/react/') }}" 37 | verify_ssl: false 38 | 39 | - name: delete Kustomization file 40 | ansible.builtin.file: 41 | path: "{{ tmp_dir }}/react/kustomization.yml" 42 | state: absent 43 | 44 | # Import React images here 45 | 46 | - name: Wait for Operator deployment to be ready 47 | kubernetes.core.k8s_info: 48 | kind: Deployment 49 | wait: yes 50 | name: eda-server-operator-controller-manager 51 | namespace: "{{ REACT_NAMESPACE }}" 52 | wait_sleep: 10 53 | wait_timeout: 360 54 | verify_ssl: false 55 | 56 | - name: Generate manifest to install AWX ascender-app k8s object with timestamp attached, for purposes of deletion later 57 | ansible.builtin.template: 58 | src: "react-deployment/react-deployment-k3s.yml" 59 | dest: "{{ tmp_dir }}/react-deployment-k3s.yml.{{ ansible_date_time.iso8601_basic_short }}" 60 | 61 | - name: "Apply React manifest for {{ k8s_platform }}" 62 | kubernetes.core.k8s: 63 | state: present 64 | definition: "{{ lookup('ansible.builtin.template', 'react-deployment/react-deployment-k3s.yml') }}" 65 | verify_ssl: false 66 | 67 | - name: Wait for ascender-react-scheduler Deployment to complete setting up (this may take up to 10 minutes) 68 | kubernetes.core.k8s_info: 69 | kind: Deployment 70 | wait: yes 71 | name: ascender-react-scheduler 72 | namespace: "{{ REACT_NAMESPACE }}" 73 | wait_sleep: 10 74 | wait_timeout: 360 75 | verify_ssl: false 76 | register: react_deployment 77 | 78 | - name: Set the Ascender React URL 79 | ansible.builtin.set_fact: 80 | react_ip: "{{ REACT_HOSTNAME }}" 81 | react_port: "{{ '443' if k8s_lb_protocol == 'https' else '80' }}" 82 | 83 | - ansible.builtin.debug: 84 | msg: "The Ascender React API endpoint is {{ k8s_lb_protocol }}://{{ react_ip }}:{{ react_port }}/api/v2/ping/" 85 | 86 | -------------------------------------------------------------------------------- /playbooks/roles/ascender_react/templates/eda-operator/kustomization.j2: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | # Find the latest tag here: https://github.com/ansible/eda-server-operator/releases 5 | # - https://github.com/ansible/eda-server-operator/config/default?ref= 6 | {% if k8s_offline | default (false) | bool %} 7 | - ./config/default 8 | {% else %} 9 | - https://github.com/ansible/eda-server-operator/config/default?ref={{ REACT_OPERATOR_VERSION }} 10 | {% endif %} 11 | 12 | # Set the image tags to match the git version from above 13 | images: 14 | - name: {{ k8s_container_registry | default("quay.io/ansible") ~ "/eda-server-operator" }} 15 | newTag: {{ REACT_OPERATOR_VERSION }} 16 | 17 | # Specify a custom namespace in which to install EDA 18 | namespace: {{ REACT_NAMESPACE }} -------------------------------------------------------------------------------- /playbooks/roles/ascender_react/templates/react-deployment/react-deployment-aks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: ascender-react-admin-password 6 | namespace: {{ REACT_NAMESPACE }} 7 | stringData: 8 | password: {{ REACT_ADMIN_PASSWORD }} 9 | {% if ASCENDER_PGSQL_HOST is defined %} 10 | --- 11 | # Ascender postgres host/port and credentials 12 | apiVersion: v1 13 | kind: Secret 14 | metadata: 15 | name: ascender-app-postgres-configuration 16 | namespace: {{ REACT_NAMESPACE }} 17 | stringData: 18 | host: {{ REACT_PGSQL_HOST }} 19 | port: '{{ REACT_PGSQL_PORT }}' 20 | database: {{ REACT_PGSQL_DB }} 21 | username: {{ REACT_PGSQL_USER }} 22 | password: {{ REACT_PGSQL_PWD }} 23 | sslmode: prefer 24 | type: unmanaged 25 | type: Opaque 26 | {% endif %} 27 | {% if k8s_lb_protocol == 'https' %} 28 | --- 29 | apiVersion: v1 30 | data: 31 | tls.crt: {{ lookup('ansible.builtin.file', react_tls_crt_path) | b64encode }} 32 | tls.key: {{ lookup('ansible.builtin.file', react_tls_key_path) | b64encode }} 33 | kind: Secret 34 | metadata: 35 | name: ascender-react-tls-secret 36 | namespace: {{ REACT_NAMESPACE }} 37 | type: kubernetes.io/tls 38 | {% endif %} 39 | --- 40 | # EDA Server Object 41 | apiVersion: eda.ansible.com/v1alpha1 42 | kind: EDA 43 | metadata: 44 | name: ascender-react 45 | namespace: {{ REACT_NAMESPACE }} 46 | spec: 47 | automation_server_url: https://{{ ASCENDER_HOSTNAME }} 48 | image: {{ REACT_IMAGE }} 49 | image_version: {{ REACT_IMAGE_VERSION }} 50 | image_web: {{ REACT_IMAGE_WEB }} 51 | image_web_version: {{ REACT_IMAGE_WEB_VERSION }} 52 | admin_user: {{ REACT_ADMIN_USER }} 53 | admin_password_secret: ascender-react-admin-password 54 | service_type: ClusterIP 55 | ingress_type: ingress 56 | {% if k8s_lb_protocol == 'https' %} 57 | ingress_tls_secret: ascender-react-tls-secret 58 | {% endif %} 59 | ingress_path: "/" 60 | ingress_path_type: Prefix 61 | hostname: {{ REACT_HOSTNAME }} 62 | no_log: false 63 | websocket_ssl_verify: false 64 | image_pull_policy: Always 65 | extra_settings: 66 | - setting: CSRF_TRUSTED_ORIGINS 67 | value: 68 | - http{% if k8s_lb_protocol == 'https' %}s{% endif %}://{{ REACT_HOSTNAME }} -------------------------------------------------------------------------------- /playbooks/roles/ascender_react/templates/react-deployment/react-deployment-eks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: ascender-react-admin-password 6 | namespace: {{ REACT_NAMESPACE }} 7 | stringData: 8 | password: {{ REACT_ADMIN_PASSWORD }} 9 | {% if ASCENDER_PGSQL_HOST is defined %} 10 | --- 11 | # Ascender postgres host/port and credentials 12 | apiVersion: v1 13 | kind: Secret 14 | metadata: 15 | name: ascender-app-postgres-configuration 16 | namespace: {{ REACT_NAMESPACE }} 17 | stringData: 18 | host: {{ REACT_PGSQL_HOST }} 19 | port: '{{ REACT_PGSQL_PORT }}' 20 | database: {{ REACT_PGSQL_DB }} 21 | username: {{ REACT_PGSQL_USER }} 22 | password: {{ REACT_PGSQL_PWD }} 23 | sslmode: prefer 24 | type: unmanaged 25 | type: Opaque 26 | {% endif %} 27 | {% if k8s_lb_protocol == 'https' %} 28 | --- 29 | apiVersion: v1 30 | data: 31 | tls.crt: {{ lookup('ansible.builtin.file', react_tls_crt_path) | b64encode }} 32 | tls.key: {{ lookup('ansible.builtin.file', react_tls_key_path) | b64encode }} 33 | kind: Secret 34 | metadata: 35 | name: ascender-react-tls-secret 36 | namespace: {{ REACT_NAMESPACE }} 37 | type: kubernetes.io/tls 38 | {% endif %} 39 | --- 40 | # EDA Server Object 41 | apiVersion: eda.ansible.com/v1alpha1 42 | kind: EDA 43 | metadata: 44 | name: ascender-react 45 | namespace: {{ REACT_NAMESPACE }} 46 | spec: 47 | automation_server_url: https://{{ ASCENDER_HOSTNAME }} 48 | image: {{ REACT_IMAGE }} 49 | image_version: {{ REACT_IMAGE_VERSION }} 50 | image_web: {{ REACT_IMAGE_WEB }} 51 | image_web_version: {{ REACT_IMAGE_WEB_VERSION }} 52 | admin_user: {{ REACT_ADMIN_USER }} 53 | admin_password_secret: ascender-react-admin-password 54 | service_type: ClusterIP 55 | ingress_type: ingress 56 | {% if k8s_lb_protocol == 'https' %} 57 | ingress_tls_secret: ascender-react-tls-secret 58 | {% endif %} 59 | ingress_path: "/" 60 | ingress_path_type: Prefix 61 | hostname: {{ REACT_HOSTNAME }} 62 | no_log: false 63 | websocket_ssl_verify: false 64 | image_pull_policy: Always 65 | extra_settings: 66 | - setting: CSRF_TRUSTED_ORIGINS 67 | value: 68 | - http{% if k8s_lb_protocol == 'https' %}s{% endif %}://{{ REACT_HOSTNAME }} -------------------------------------------------------------------------------- /playbooks/roles/ascender_react/templates/react-deployment/react-deployment-gke.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: ascender-react-admin-password 6 | namespace: {{ REACT_NAMESPACE }} 7 | stringData: 8 | password: {{ REACT_ADMIN_PASSWORD }} 9 | {% if ASCENDER_PGSQL_HOST is defined %} 10 | --- 11 | # Ascender postgres host/port and credentials 12 | apiVersion: v1 13 | kind: Secret 14 | metadata: 15 | name: ascender-app-postgres-configuration 16 | namespace: {{ REACT_NAMESPACE }} 17 | stringData: 18 | host: {{ REACT_PGSQL_HOST }} 19 | port: '{{ REACT_PGSQL_PORT }}' 20 | database: {{ REACT_PGSQL_DB }} 21 | username: {{ REACT_PGSQL_USER }} 22 | password: {{ REACT_PGSQL_PWD }} 23 | sslmode: prefer 24 | type: unmanaged 25 | type: Opaque 26 | {% endif %} 27 | {% if k8s_lb_protocol == 'https' %} 28 | --- 29 | apiVersion: v1 30 | data: 31 | tls.crt: {{ lookup('ansible.builtin.file', react_tls_crt_path) | b64encode }} 32 | tls.key: {{ lookup('ansible.builtin.file', react_tls_key_path) | b64encode }} 33 | kind: Secret 34 | metadata: 35 | name: ascender-react-tls-secret 36 | namespace: {{ REACT_NAMESPACE }} 37 | type: kubernetes.io/tls 38 | {% endif %} 39 | --- 40 | # EDA Server Object 41 | apiVersion: eda.ansible.com/v1alpha1 42 | kind: EDA 43 | metadata: 44 | name: ascender-react 45 | namespace: {{ REACT_NAMESPACE }} 46 | spec: 47 | automation_server_url: https://{{ ASCENDER_HOSTNAME }} 48 | image: {{ REACT_IMAGE }} 49 | image_version: {{ REACT_IMAGE_VERSION }} 50 | image_web: {{ REACT_IMAGE_WEB }} 51 | image_web_version: {{ REACT_IMAGE_WEB_VERSION }} 52 | admin_user: {{ REACT_ADMIN_USER }} 53 | admin_password_secret: ascender-react-admin-password 54 | service_type: ClusterIP 55 | ingress_type: ingress 56 | {% if k8s_lb_protocol == 'https' %} 57 | ingress_tls_secret: ascender-react-tls-secret 58 | {% endif %} 59 | ingress_path: "/" 60 | ingress_path_type: Prefix 61 | hostname: {{ REACT_HOSTNAME }} 62 | no_log: false 63 | websocket_ssl_verify: false 64 | image_pull_policy: Always 65 | extra_settings: 66 | - setting: CSRF_TRUSTED_ORIGINS 67 | value: 68 | - http{% if k8s_lb_protocol == 'https' %}s{% endif %}://{{ REACT_HOSTNAME }} -------------------------------------------------------------------------------- /playbooks/roles/ascender_react/templates/react-deployment/react-deployment-k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: ascender-react-admin-password 6 | namespace: {{ REACT_NAMESPACE }} 7 | stringData: 8 | password: {{ REACT_ADMIN_PASSWORD }} 9 | {% if ASCENDER_PGSQL_HOST is defined %} 10 | --- 11 | # Ascender postgres host/port and credentials 12 | apiVersion: v1 13 | kind: Secret 14 | metadata: 15 | name: ascender-app-postgres-configuration 16 | namespace: {{ REACT_NAMESPACE }} 17 | stringData: 18 | host: {{ REACT_PGSQL_HOST }} 19 | port: '{{ REACT_PGSQL_PORT }}' 20 | database: {{ REACT_PGSQL_DB }} 21 | username: {{ REACT_PGSQL_USER }} 22 | password: {{ REACT_PGSQL_PWD }} 23 | sslmode: prefer 24 | type: unmanaged 25 | type: Opaque 26 | {% endif %} 27 | {% if k8s_lb_protocol == 'https' %} 28 | --- 29 | apiVersion: v1 30 | data: 31 | tls.crt: {{ lookup('ansible.builtin.file', react_tls_crt_path) | b64encode }} 32 | tls.key: {{ lookup('ansible.builtin.file', react_tls_key_path) | b64encode }} 33 | kind: Secret 34 | metadata: 35 | name: ascender-react-tls-secret 36 | namespace: {{ REACT_NAMESPACE }} 37 | type: kubernetes.io/tls 38 | {% endif %} 39 | --- 40 | # EDA Server Object 41 | apiVersion: eda.ansible.com/v1alpha1 42 | kind: EDA 43 | metadata: 44 | name: ascender-react 45 | namespace: {{ REACT_NAMESPACE }} 46 | spec: 47 | automation_server_url: https://{{ ASCENDER_HOSTNAME }} 48 | image: {{ REACT_IMAGE }} 49 | image_version: {{ REACT_IMAGE_VERSION }} 50 | image_web: {{ REACT_IMAGE_WEB }} 51 | image_web_version: {{ REACT_IMAGE_WEB_VERSION }} 52 | admin_user: {{ REACT_ADMIN_USER }} 53 | admin_password_secret: ascender-react-admin-password 54 | service_type: ClusterIP 55 | ingress_type: ingress 56 | {% if k8s_lb_protocol == 'https' %} 57 | ingress_tls_secret: ascender-react-tls-secret 58 | {% endif %} 59 | ingress_path: "/" 60 | ingress_path_type: Prefix 61 | hostname: {{ REACT_HOSTNAME }} 62 | no_log: false 63 | websocket_ssl_verify: false 64 | image_pull_policy: Always 65 | extra_settings: 66 | - setting: CSRF_TRUSTED_ORIGINS 67 | value: 68 | - http{% if k8s_lb_protocol == 'https' %}s{% endif %}://{{ REACT_HOSTNAME }} -------------------------------------------------------------------------------- /playbooks/roles/ascender_restore/tasks/ascender_restore.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | # - ansible.builtin.debug: 7 | # var: ansible_date_time 8 | 9 | - name: Generate manifest to install AWXRestore k8s object with timestamp attached, for purposes of deletion later 10 | ansible.builtin.template: 11 | src: "ascender-restore.yml" 12 | dest: "{{ tmp_dir }}/ascender-restore.yml.{{ ansible_date_time.iso8601_basic_short }}" 13 | 14 | - name: "Apply AWXRestore k8s manifest" 15 | kubernetes.core.k8s: 16 | state: present 17 | definition: "{{ lookup('ansible.builtin.template', 'ascender-restore.yml') }}" 18 | 19 | # - name: Wait for ascender-app-web Deployment to complete setting up 20 | # kubernetes.core.k8s_info: 21 | # kind: Deployment 22 | # wait: yes 23 | # name: ascender-app-web 24 | # namespace: "{{ ASCENDER_NAMESPACE }}" 25 | # wait_sleep: 10 26 | # wait_timeout: 360 27 | # register: ascender_web_deployment 28 | 29 | # - ansible.builtin.debug: 30 | # var: ascender_web_deployment -------------------------------------------------------------------------------- /playbooks/roles/ascender_restore/templates/ascender-restore.yml: -------------------------------------------------------------------------------- 1 | apiVersion: awx.ansible.com/v1beta1 2 | kind: AWXBackup 3 | metadata: 4 | name: ascender-restore-{{ ansible_date_time.date }}-{{ ansible_date_time.epoch }} 5 | namespace: {{ ASCENDER_NAMESPACE }} 6 | spec: 7 | deployment_name: ascender-app 8 | backup_name: -------------------------------------------------------------------------------- /playbooks/roles/common/tasks/aks_packages.yml: -------------------------------------------------------------------------------- 1 | - name: Read collections paths from ansible.cfg 2 | ansible.builtin.set_fact: 3 | collections_paths: "{{ lookup('ini', 'collections_path section=defaults file=../ansible.cfg') }}" 4 | when: k8s_platform == "aks" 5 | 6 | - name: Install pip packages for Azure collections 7 | ansible.builtin.pip: 8 | chdir: "{{ playbook_dir }}/.." 9 | requirements: "{{ collections_paths.split(':') | first }}/ansible_collections/azure/azcollection/requirements.txt" 10 | when: k8s_platform == "aks" 11 | become: yes 12 | 13 | - name: Import GPG keys 14 | ansible.builtin.rpm_key: 15 | state: present 16 | key: "{{ item}}" 17 | with_items: "{{ azure_gpg_keys[ansible_distribution_major_version|int] }}" 18 | become: true 19 | 20 | - name: Set Hashicorp repository configuration for Rocky Linux 8 21 | ansible.builtin.copy: 22 | dest: /etc/yum.repos.d/hashicorp.repo 23 | content: | 24 | [hashicorp] 25 | name=HashiCorp Stable - $basearch 26 | baseurl=https://rpm.releases.hashicorp.com/RHEL/8/$basearch/stable 27 | enabled=1 28 | gpgcheck=1 29 | gpgkey=https://rpm.releases.hashicorp.com/gpg 30 | when: ansible_distribution_major_version|int == 8 31 | become: true 32 | 33 | - name: Set repository configuration for Rocky Linux 9 34 | ansible.builtin.copy: 35 | dest: /etc/yum.repos.d/hashicorp.repo 36 | content: | 37 | [hashicorp] 38 | name=HashiCorp Stable - $basearch 39 | baseurl=https://rpm.releases.hashicorp.com/RHEL/9/$basearch/stable 40 | enabled=1 41 | gpgcheck=1 42 | gpgkey=https://rpm.releases.hashicorp.com/gpg 43 | when: ansible_distribution_major_version|int == 9 44 | become: true 45 | 46 | - name: Install microsoft repo for Microsoft Azure 47 | ansible.builtin.dnf: 48 | name: "{{ azure_microsoft_repo[ansible_distribution_major_version|int] }}" 49 | # update_cache: true 50 | state: present 51 | become: true 52 | 53 | - name: Update repository cache 54 | ansible.builtin.dnf: 55 | update_cache: yes 56 | state: latest 57 | become: true 58 | 59 | - name: Install necessary pip packages for Microsoft Azure 60 | ansible.builtin.pip: 61 | name: "{{ azure_pip_packages[ansible_distribution_major_version|int] }}" 62 | # update_cache: true 63 | state: present 64 | become: true 65 | 66 | - name: Install necessary rpm packages for Microsoft Azure 67 | ansible.builtin.dnf: 68 | name: "{{ azure_rpm_packages[ansible_distribution_major_version|int] }}" 69 | # update_cache: true 70 | state: present 71 | become: true -------------------------------------------------------------------------------- /playbooks/roles/common/tasks/eks_packages.yml: -------------------------------------------------------------------------------- 1 | - name: Read collections paths from ansible.cfg 2 | ansible.builtin.set_fact: 3 | collections_paths: "{{ lookup('ini', 'collections_path section=defaults file=../ansible.cfg') }}" 4 | 5 | - name: Install pip packages for AWS collections 6 | ansible.builtin.pip: 7 | chdir: "{{ playbook_dir }}/.." 8 | requirements: "{{ collections_paths.split(':') | first }}/ansible_collections/amazon/aws/requirements.txt" 9 | become: yes 10 | 11 | # - name: Import GPG keys 12 | # ansible.builtin.rpm_key: 13 | # state: present 14 | # key: "{{ item}}" 15 | # with_items: "{{ azure_gpg_keys[ansible_distribution_major_version|int] }}" 16 | # become: true 17 | 18 | - name: Set Hashicorp repository configuration for Rocky Linux 8 19 | ansible.builtin.copy: 20 | dest: /etc/yum.repos.d/hashicorp.repo 21 | content: | 22 | [hashicorp] 23 | name=HashiCorp Stable - $basearch 24 | baseurl=https://rpm.releases.hashicorp.com/RHEL/8/$basearch/stable 25 | enabled=1 26 | gpgcheck=1 27 | gpgkey=https://rpm.releases.hashicorp.com/gpg 28 | when: ansible_distribution_major_version|int == 8 29 | become: true 30 | 31 | - name: Set repository configuration for Rocky Linux 9 32 | ansible.builtin.copy: 33 | dest: /etc/yum.repos.d/hashicorp.repo 34 | content: | 35 | [hashicorp] 36 | name=HashiCorp Stable - $basearch 37 | baseurl=https://rpm.releases.hashicorp.com/RHEL/9/$basearch/stable 38 | enabled=1 39 | gpgcheck=1 40 | gpgkey=https://rpm.releases.hashicorp.com/gpg 41 | when: ansible_distribution_major_version|int == 9 42 | become: true 43 | 44 | - name: Update repository cache 45 | ansible.builtin.dnf: 46 | update_cache: yes 47 | state: latest 48 | become: true 49 | 50 | - name: Enable EPEL Repo (Online) 51 | ansible.builtin.dnf: 52 | name: epel-release 53 | state: present 54 | become: true 55 | 56 | - name: Check for existing eksctl installation 57 | ansible.builtin.stat: 58 | path: /usr/local/bin/eksctl 59 | register: eksctl_file 60 | 61 | # - ansible.builtin.debug: 62 | # var: eksctl_file 63 | 64 | 65 | - name: Ensure eksctl is installed 66 | block: 67 | - name: Download eksctl 68 | ansible.builtin.get_url: 69 | url: https://github.com/eksctl-io/eksctl/releases/latest/download/eksctl_linux_amd64.tar.gz 70 | dest: "{{ tmp_dir }}/eksctl_linux_amd64.tar.gz" 71 | 72 | - name: 73 | ansible.builtin.unarchive: 74 | src: "{{ tmp_dir }}/eksctl_linux_amd64.tar.gz" 75 | dest: /usr/local/bin/ 76 | become: true 77 | 78 | - name: 79 | ansible.builtin.file: 80 | path: "{{ tmp_dir }}/eksctl_linux_amd64.tar.gz" 81 | state: absent 82 | 83 | when: 84 | - not eksctl_file.stat.exists or not eksctl_file.stat.xusr or not eksctl_file.stat.xgrp or not eksctl_file.stat.xoth 85 | 86 | - name: Install necessary pip packages for AWS 87 | ansible.builtin.pip: 88 | name: "{{ aws_pip_packages[ansible_distribution_major_version|int] }}" 89 | state: present 90 | become: true 91 | 92 | - name: Install necessary rpm packages for AWS 93 | ansible.builtin.dnf: 94 | name: "{{ aws_rpm_packages[ansible_distribution_major_version|int] }}" 95 | state: present 96 | become: true 97 | 98 | -------------------------------------------------------------------------------- /playbooks/roles/common/tasks/gke_packages.yml: -------------------------------------------------------------------------------- 1 | - name: Read collections paths from ansible.cfg 2 | ansible.builtin.set_fact: 3 | collections_paths: "{{ lookup('ini', 'collections_path section=defaults file=../ansible.cfg') }}" 4 | 5 | - name: Install pip packages for Google Cloud collections 6 | ansible.builtin.pip: 7 | chdir: "{{ playbook_dir }}/.." 8 | requirements: "{{ collections_paths.split(':') | first }}/ansible_collections/google/cloud/requirements.txt" 9 | become: yes 10 | 11 | # - name: Import GPG keys 12 | # ansible.builtin.rpm_key: 13 | # state: present 14 | # key: "{{ item}}" 15 | # with_items: "{{ azure_gpg_keys[ansible_distribution_major_version|int] }}" 16 | # become: true 17 | 18 | - name: Set Hashicorp repository configuration for Rocky Linux 8 19 | ansible.builtin.copy: 20 | dest: /etc/yum.repos.d/hashicorp.repo 21 | content: | 22 | [hashicorp] 23 | name=HashiCorp Stable - $basearch 24 | baseurl=https://rpm.releases.hashicorp.com/RHEL/8/$basearch/stable 25 | enabled=1 26 | gpgcheck=1 27 | gpgkey=https://rpm.releases.hashicorp.com/gpg 28 | when: ansible_distribution_major_version|int == 8 29 | become: true 30 | 31 | - name: Set repository configuration for Rocky Linux 9 32 | ansible.builtin.copy: 33 | dest: /etc/yum.repos.d/hashicorp.repo 34 | content: | 35 | [hashicorp] 36 | name=HashiCorp Stable - $basearch 37 | baseurl=https://rpm.releases.hashicorp.com/RHEL/9/$basearch/stable 38 | enabled=1 39 | gpgcheck=1 40 | gpgkey=https://rpm.releases.hashicorp.com/gpg 41 | when: ansible_distribution_major_version|int == 9 42 | become: true 43 | 44 | - name: Update repository cache 45 | ansible.builtin.dnf: 46 | update_cache: yes 47 | state: latest 48 | become: true 49 | 50 | - name: Enable EPEL Repo (Online) 51 | ansible.builtin.dnf: 52 | name: epel-release 53 | state: present 54 | become: true 55 | 56 | - name: Install necessary pip packages for Google Cloud 57 | ansible.builtin.pip: 58 | name: "{{ gcloud_pip_packages[ansible_distribution_major_version|int] }}" 59 | state: present 60 | become: true 61 | 62 | - name: Install necessary rpm packages for Google Cloud 63 | ansible.builtin.dnf: 64 | name: "{{ gcloud_rpm_packages[ansible_distribution_major_version|int] }}" 65 | state: present 66 | become: true 67 | 68 | -------------------------------------------------------------------------------- /playbooks/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # - name: Ensure Firewall is disabled 2 | # ansible.builtin.systemd: 3 | # name: firewalld 4 | # state: stopped 5 | # enabled: false 6 | # failed_when: false 7 | 8 | # - name: Disable SELinux 9 | # ansible.posix.selinux: 10 | # state: disabled 11 | 12 | - name: Retrieve the Enterprise Linux details 13 | ansible.builtin.setup: 14 | gather_subset: 15 | - distribution_major_version 16 | - os_family 17 | 18 | - name: Ensure {{ tmp_dir }} exists 19 | ansible.builtin.file: 20 | path: "{{ tmp_dir }}" 21 | state: directory 22 | 23 | - name: Enable EPEL Repo (Online) 24 | ansible.builtin.dnf: 25 | name: epel-release 26 | state: present 27 | become: true 28 | when: not k8s_offline 29 | 30 | - name: Add the Kubernetes yum repository 31 | ansible.builtin.copy: 32 | dest: /etc/yum.repos.d/kubernetes.repo 33 | content: | 34 | [kubernetes] 35 | name=Kubernetes 36 | baseurl=https://pkgs.k8s.io/core:/stable:/v1.28/rpm/ 37 | enabled=1 38 | gpgcheck=1 39 | gpgkey=https://pkgs.k8s.io/core:/stable:/v1.28/rpm/repodata/repomd.xml.key 40 | become: true 41 | when: not k8s_offline 42 | 43 | - name: Install Necessary RPMs (if offline) 44 | ansible.builtin.dnf: 45 | name: "{{ lookup('ansible.builtin.fileglob', '{{ playbook_dir }}/../offline/packages/*.rpm') }}" 46 | state: present 47 | disable_gpg_check: true 48 | cacheonly: true 49 | become: true 50 | when: k8s_offline 51 | 52 | - name: Install necessary packages for Enterprise Linux 53 | ansible.builtin.dnf: 54 | name: "{{ common_packages[ansible_distribution_major_version|int] }}" 55 | # update_cache: true 56 | state: present 57 | become: true 58 | when: not k8s_offline 59 | 60 | - name: Install AKS Packages 61 | ansible.builtin.include_tasks: aks_packages.yml 62 | when: k8s_platform == "aks" 63 | 64 | - name: Install GKE Packages 65 | ansible.builtin.include_tasks: gke_packages.yml 66 | when: k8s_platform == "gke" 67 | 68 | - name: Install EKS Packages 69 | ansible.builtin.include_tasks: eks_packages.yml 70 | when: k8s_platform == "eks" 71 | 72 | -------------------------------------------------------------------------------- /playbooks/roles/common/vars/main.yml: -------------------------------------------------------------------------------- 1 | common_packages: 2 | 8: 3 | - jq 4 | - platform-python-setuptools 5 | - python3-pip 6 | - kubectl 7 | - python3-kubernetes 8 | 9: 9 | - jq 10 | - python-setuptools 11 | - python3-pip 12 | - kubectl 13 | - python3-kubernetes 14 | 15 | azure_gpg_keys: 16 | 8: 17 | - https://rpm.releases.hashicorp.com/gpg 18 | - https://packages.microsoft.com/keys/microsoft.asc 19 | 9: 20 | - https://rpm.releases.hashicorp.com/gpg 21 | - https://packages.microsoft.com/keys/microsoft.asc 22 | 23 | azure_microsoft_repo: 24 | 8: 25 | - https://packages.microsoft.com/config/rhel/8/packages-microsoft-prod.rpm 26 | 9: 27 | - https://packages.microsoft.com/config/rhel/9.0/packages-microsoft-prod.rpm 28 | 29 | azure_pip_packages: 30 | 8: 31 | - azure-cli-core 32 | 9: 33 | - azure-cli-core 34 | 35 | azure_rpm_packages: 36 | 8: 37 | - azure-cli 38 | - yum-utils 39 | - terraform 40 | 9: 41 | - azure-cli 42 | - yum-utils 43 | - terraform 44 | 45 | gcloud_gpg_keys: 46 | 8: 47 | - https://rpm.releases.hashicorp.com/gpg 48 | 9: 49 | - https://rpm.releases.hashicorp.com/gpg 50 | 51 | gcloud_rpm_packages: 52 | 8: 53 | - google-cloud-sdk-gke-gcloud-auth-plugin 54 | - yum-utils 55 | - terraform 56 | 9: 57 | - google-cloud-sdk-gke-gcloud-auth-plugin 58 | - yum-utils 59 | - terraform 60 | 61 | gcloud_pip_packages: 62 | 8: 63 | - jmespath 64 | 9: 65 | - jmespath 66 | 67 | aws_rpm_packages: 68 | 8: 69 | - yum-utils 70 | - terraform 71 | 9: 72 | - yum-utils 73 | - terraform 74 | 75 | aws_pip_packages: 76 | 8: 77 | - boto3 78 | - botocore 79 | - jmespath 80 | 9: 81 | - boto3 82 | - botocore 83 | - jmespath -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/files/aks_deploy/main.tf: -------------------------------------------------------------------------------- 1 | # Generate random resource group name 2 | resource "random_pet" "rg_name" { 3 | prefix = var.resource_group_name_prefix 4 | } 5 | 6 | resource "azurerm_resource_group" "rg" { 7 | location = var.resource_group_location 8 | name = random_pet.rg_name.id 9 | } 10 | 11 | resource "random_pet" "azurerm_kubernetes_cluster_name" { 12 | prefix = "cluster" 13 | } 14 | 15 | resource "random_pet" "azurerm_kubernetes_cluster_dns_prefix" { 16 | prefix = "dns" 17 | } 18 | 19 | resource "random_pet" "ssh_key_name" { 20 | prefix = "ssh" 21 | separator = "" 22 | } 23 | 24 | resource "azapi_resource_action" "ssh_public_key_gen" { 25 | type = "Microsoft.Compute/sshPublicKeys@2022-11-01" 26 | resource_id = azapi_resource.ssh_public_key.id 27 | action = "generateKeyPair" 28 | method = "POST" 29 | 30 | response_export_values = ["publicKey", "privateKey"] 31 | } 32 | 33 | resource "azapi_resource" "ssh_public_key" { 34 | type = "Microsoft.Compute/sshPublicKeys@2022-11-01" 35 | name = random_pet.ssh_key_name.id 36 | location = azurerm_resource_group.rg.location 37 | parent_id = azurerm_resource_group.rg.id 38 | } 39 | 40 | resource "azurerm_kubernetes_cluster" "k8s" { 41 | location = azurerm_resource_group.rg.location 42 | name = var.aks_cluster_name 43 | resource_group_name = azurerm_resource_group.rg.name 44 | dns_prefix = random_pet.azurerm_kubernetes_cluster_dns_prefix.id 45 | /* http_application_routing_enabled = true */ 46 | 47 | identity { 48 | type = "SystemAssigned" 49 | } 50 | 51 | default_node_pool { 52 | name = "agentpool" 53 | vm_size = var.azure_vm_size 54 | node_count = var.node_count 55 | os_disk_size_gb = var.azure_disk_size 56 | } 57 | linux_profile { 58 | admin_username = var.username 59 | 60 | ssh_key { 61 | key_data = azapi_resource_action.ssh_public_key_gen.output.publicKey 62 | } 63 | } 64 | network_profile { 65 | network_plugin = "kubenet" 66 | load_balancer_sku = "standard" 67 | } 68 | } 69 | 70 | # Obtain the kubeconfig for the cluster 71 | data "azurerm_kubernetes_cluster" "k8s" { 72 | name = azurerm_kubernetes_cluster.k8s.name 73 | resource_group_name = azurerm_kubernetes_cluster.k8s.resource_group_name 74 | } 75 | 76 | resource "local_file" "kubeconfig" { 77 | content = data.azurerm_kubernetes_cluster.k8s.kube_config_raw 78 | // filename = "$HOME/.kube/config" 79 | filename = "${var.home_dir}/.kube/config" 80 | } 81 | 82 | # Output the kubeconfig file location 83 | output "kubeconfig_file" { 84 | value = local_file.kubeconfig.filename 85 | } -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/files/aks_deploy/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.0" 3 | 4 | required_providers { 5 | azapi = { 6 | source = "azure/azapi" 7 | version = "~>1.5" 8 | } 9 | azurerm = { 10 | source = "hashicorp/azurerm" 11 | version = "~>3.0" 12 | } 13 | random = { 14 | source = "hashicorp/random" 15 | version = "~>3.0" 16 | } 17 | time = { 18 | source = "hashicorp/time" 19 | version = "0.9.1" 20 | } 21 | } 22 | } 23 | 24 | provider "azurerm" { 25 | features {} 26 | } -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/files/aks_deploy/vars.tf: -------------------------------------------------------------------------------- 1 | variable "resource_group_location" { 2 | type = string 3 | default = "eastus" 4 | description = "Location of the resource group." 5 | } 6 | 7 | variable "resource_group_name_prefix" { 8 | type = string 9 | default = "rg" 10 | description = "Prefix of the resource group name that's combined with a random ID so name is unique in your Azure subscription." 11 | } 12 | 13 | variable "node_count" { 14 | type = number 15 | description = "The initial quantity of nodes for the node pool." 16 | default = 3 17 | } 18 | 19 | variable "azure_vm_size" { 20 | type = string 21 | description = "The VM Size that will be used for the worker nodes" 22 | default = "Standard_D2_v2" 23 | } 24 | 25 | variable "azure_disk_size" { 26 | type = number 27 | description = "The Disk Size that will be used for the worker nodes" 28 | default = 50 29 | } 30 | 31 | variable "msi_id" { 32 | type = string 33 | description = "The Managed Service Identity ID. Set this value if you're running this example using Managed Identity as the authentication method." 34 | default = null 35 | } 36 | 37 | variable "username" { 38 | type = string 39 | description = "The admin username for the new cluster." 40 | default = "azureadmin" 41 | } 42 | 43 | variable "home_dir" { 44 | description = "Home directory path" 45 | type = string 46 | default = "/home/default" 47 | } 48 | 49 | variable "aks_cluster_name" { 50 | type = string 51 | default = "aks_cluster" 52 | description = "Name of the AKS Kubernetes cluster" 53 | } -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/files/eks_deploy/main.tf: -------------------------------------------------------------------------------- 1 | # Data source for AWS availability zones 2 | data "aws_availability_zones" "available" {} 3 | 4 | # Create a new VPC 5 | resource "aws_vpc" "ascender_eks_vpc" { 6 | cidr_block = "10.0.0.0/16" 7 | tags = { 8 | Name = "ascender-eks-vpc" 9 | } 10 | } 11 | 12 | # Create public subnets 13 | resource "aws_subnet" "ascender_eks_public_subnets" { 14 | count = 2 15 | vpc_id = aws_vpc.ascender_eks_vpc.id 16 | cidr_block = cidrsubnet(aws_vpc.ascender_eks_vpc.cidr_block, 8, count.index) 17 | availability_zone = element(data.aws_availability_zones.available.names, count.index) 18 | map_public_ip_on_launch = true 19 | } 20 | 21 | # Create an internet gateway 22 | resource "aws_internet_gateway" "ascender_eks_igw" { 23 | vpc_id = aws_vpc.ascender_eks_vpc.id 24 | tags = { 25 | Name = "ascender-eks-igw" 26 | } 27 | } 28 | 29 | # Create route table 30 | resource "aws_route_table" "ascender_eks_route_table" { 31 | vpc_id = aws_vpc.ascender_eks_vpc.id 32 | 33 | route { 34 | cidr_block = "0.0.0.0/0" 35 | gateway_id = aws_internet_gateway.ascender_eks_igw.id 36 | } 37 | tags = { 38 | Name = "ascender-eks-route-table" 39 | } 40 | } 41 | 42 | # Associate route table with subnets 43 | resource "aws_route_table_association" "eks_route_table_association" { 44 | count = 2 45 | subnet_id = element(aws_subnet.ascender_eks_public_subnets.*.id, count.index) 46 | route_table_id = aws_route_table.ascender_eks_route_table.id 47 | } 48 | 49 | # IAM role for eks 50 | resource "aws_iam_role" "ascender_eks_cluster_role" { 51 | name = "ascender-eks-cluster-role" 52 | 53 | assume_role_policy = jsonencode({ 54 | Version = "2012-10-17", 55 | Statement = [ 56 | { 57 | Effect = "Allow", 58 | Principal = { 59 | Service = "eks.amazonaws.com" 60 | }, 61 | Action = "sts:AssumeRole" 62 | } 63 | ] 64 | }) 65 | } 66 | 67 | resource "aws_iam_role_policy_attachment" "ascender_eks_cluster_policy" { 68 | role = aws_iam_role.ascender_eks_cluster_role.name 69 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" 70 | } 71 | 72 | # Create EKS Cluster 73 | resource "aws_eks_cluster" "ascender_eks_cluster" { 74 | name = var.eks_cluster_name 75 | version = var.kubernetes_version # Specify Kubernetes version here 76 | role_arn = aws_iam_role.ascender_eks_cluster_role.arn 77 | 78 | vpc_config { 79 | subnet_ids = aws_subnet.ascender_eks_public_subnets.*.id 80 | } 81 | } 82 | 83 | resource "aws_iam_role" "ascender_eks_node_group_role" { 84 | name = "ascender-eks-node-group-role" 85 | 86 | assume_role_policy = jsonencode({ 87 | Version = "2012-10-17", 88 | Statement = [{ 89 | Effect = "Allow", 90 | Principal = { 91 | Service = "ec2.amazonaws.com" 92 | }, 93 | Action = "sts:AssumeRole" 94 | }] 95 | }) 96 | } 97 | 98 | resource "aws_iam_role_policy_attachment" "example-AmazonEKSWorkerNodePolicy" { 99 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" 100 | role = aws_iam_role.ascender_eks_node_group_role.name 101 | } 102 | 103 | resource "aws_iam_role_policy_attachment" "example-AmazonEKS_CNI_Policy" { 104 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" 105 | role = aws_iam_role.ascender_eks_node_group_role.name 106 | } 107 | 108 | resource "aws_iam_role_policy_attachment" "example-AmazonEC2ContainerRegistryReadOnly" { 109 | policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" 110 | role = aws_iam_role.ascender_eks_node_group_role.name 111 | } 112 | 113 | resource "aws_iam_role_policy_attachment" "example-AmazonEBSCSIDriverPolicy" { 114 | policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy" 115 | role = aws_iam_role.ascender_eks_node_group_role.name 116 | } 117 | 118 | resource "aws_eks_node_group" "ascender_nodes" { 119 | cluster_name = aws_eks_cluster.ascender_eks_cluster.name 120 | node_group_name = "ascender-nodes" 121 | node_role_arn = aws_iam_role.ascender_eks_node_group_role.arn 122 | subnet_ids = aws_subnet.ascender_eks_public_subnets.*.id 123 | 124 | scaling_config { 125 | desired_size = var.num_nodes 126 | max_size = var.num_nodes 127 | min_size = var.num_nodes 128 | } 129 | 130 | instance_types = [var.aws_vm_size] 131 | disk_size = var.volume_size 132 | } 133 | -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/files/eks_deploy/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~>5.5" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "~>3.0" 12 | } 13 | time = { 14 | source = "hashicorp/time" 15 | version = "0.9.1" 16 | } 17 | } 18 | } 19 | 20 | provider "aws" { 21 | region = var.region 22 | } -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/files/eks_deploy/vars.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | description = "AWS region" 3 | type = string 4 | default = "us-east-1" 5 | } 6 | 7 | variable "eks_cluster_name" { 8 | description = "Name of the EKS cluster" 9 | type = string 10 | default = "ascender-eks-cluster" 11 | } 12 | 13 | variable "kubernetes_version" { 14 | description = "Kubernetes version for the EKS cluster" 15 | type = string 16 | default = "1.29" 17 | } 18 | 19 | variable "num_nodes" { 20 | description = "Number of nodes to create in the EKS node group" 21 | type = number 22 | default = 3 23 | } 24 | 25 | variable "aws_vm_size" { 26 | description = "Instance type for the EKS nodes" 27 | type = string 28 | default = "t3.large" 29 | } 30 | 31 | variable "volume_size" { 32 | description = "Disk size for each EKS worker node" 33 | type = number 34 | default = 100 35 | } -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/files/gke_deploy/main.tf: -------------------------------------------------------------------------------- 1 | resource "google_container_cluster" "primary" { 2 | name = var.gke_cluster_name 3 | min_master_version = var.kubernetes_version 4 | location = var.zone 5 | initial_node_count = var.num_nodes 6 | 7 | node_config { 8 | machine_type = var.gcloud_vm_size 9 | } 10 | } 11 | 12 | output "cluster_name" { 13 | value = google_container_cluster.primary.name 14 | } 15 | 16 | output "cluster_endpoint" { 17 | value = google_container_cluster.primary.endpoint 18 | } 19 | 20 | output "cluster_ca_certificate" { 21 | value = google_container_cluster.primary.master_auth.0.cluster_ca_certificate 22 | } 23 | 24 | -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/files/gke_deploy/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=1.0" 3 | 4 | required_providers { 5 | google = { 6 | source = "hashicorp/google" 7 | version = "~>3.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "~>3.0" 12 | } 13 | time = { 14 | source = "hashicorp/time" 15 | version = "0.9.1" 16 | } 17 | } 18 | } 19 | 20 | provider "google" { 21 | project = var.project_id 22 | zone = var.zone 23 | } -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/files/gke_deploy/vars.tf: -------------------------------------------------------------------------------- 1 | /* 2 | variable "use_existing_project" { 3 | description = "Boolean to decide whether to use an existing project or create a new one." 4 | type = bool 5 | default = false 6 | } 7 | */ 8 | 9 | variable "project_id" { 10 | description = "The ID of the existing project to use. Only required if use_existing_project is true." 11 | type = string 12 | default = "dummy_project" 13 | } 14 | 15 | variable "kubernetes_version" { 16 | description = "The Kubernetes version for the master and nodes." 17 | type = string 18 | default = "1.27.11-gke.1062004" 19 | } 20 | 21 | variable "zone" { 22 | description = "The zone to deploy the GKE cluster." 23 | type = string 24 | default = "us-central1-a" 25 | } 26 | 27 | variable "gke_cluster_name" { 28 | description = "The name of the GKE cluster" 29 | type = string 30 | default = "ascender-gke-cluster" 31 | } 32 | 33 | variable "num_nodes" { 34 | description = "The number of nodes in the node pool." 35 | type = number 36 | default = 3 37 | } 38 | 39 | variable "gcloud_vm_size" { 40 | description = "The size of the instance (machine type) for the worker nodes." 41 | type = string 42 | default = "n1-standard-1" 43 | } 44 | 45 | variable "volume_size" { 46 | description = "The size of the boot disk for the worker nodes, in GB." 47 | type = number 48 | default = 100 49 | } 50 | 51 | variable "home_dir" { 52 | description = "Home directory path" 53 | type = string 54 | default = "/home/default" 55 | } 56 | -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/tasks/k8s_setup_aks.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - name: Determine if ~/.kube/config already exists 7 | ansible.builtin.stat: 8 | path: ~/.kube/config 9 | register: existing_kubeconfig 10 | 11 | - name: create backup of existing ~/.kube/config 12 | ansible.builtin.copy: 13 | src: ~/.kube/config 14 | dest: "~/.kube/config.{{ ansible_date_time.iso8601_basic_short }}" 15 | when: 16 | - existing_kubeconfig.stat.exists 17 | - download_kubeconfig 18 | 19 | - name: Delete existing ~/.kube/config 20 | ansible.builtin.file: 21 | path: ~/.kube/config 22 | state: absent 23 | when: 24 | - existing_kubeconfig.stat.exists 25 | - download_kubeconfig 26 | 27 | # Check for existing aks cluster 28 | - name: Get list of all AKS clusters 29 | ansible.builtin.shell: az aks list --query '[].{name:name, resourceGroup:resourceGroup}' -o json 30 | register: aks_clusters 31 | 32 | - name: Find the AKS cluster by name 33 | ansible.builtin.set_fact: 34 | aks_cluster: "{{ aks_clusters.stdout | from_json | selectattr('name', 'equalto', AKS_CLUSTER_NAME) | list }}" 35 | 36 | - name: Check if AKS cluster exists 37 | ansible.builtin.set_fact: 38 | aks_cluster_exists: "{{ aks_cluster | length > 0 }}" 39 | 40 | - ansible.builtin.debug: 41 | msg: AKS cluster {{ AKS_CLUSTER_NAME }} exists 42 | when: aks_cluster_exists 43 | 44 | - ansible.builtin.debug: 45 | msg: AKS cluster {{ AKS_CLUSTER_NAME }} DOES NOT exist 46 | when: not aks_cluster_exists 47 | 48 | 49 | # If AKS_CLUSTER_NAME does not exist, set up new aks cluster 50 | - name: "If target cluster {{ AKS_CLUSTER_NAME }} does not exist, create it" 51 | block: 52 | 53 | - name: Ensure that the aks_deploy directory exists 54 | ansible.builtin.file: 55 | path: "{{ playbook_dir }}/../ascender_install_artifacts/aks_deploy" 56 | state: directory 57 | 58 | - name: Copy AKS Terraform Directory 59 | ansible.builtin.copy: 60 | src: files/aks_deploy 61 | dest: "{{ playbook_dir }}/../ascender_install_artifacts" 62 | mode: 0777 63 | 64 | - name: Initialize Terraform 65 | ansible.builtin.command: 66 | cmd: terraform init 67 | chdir: "{{ playbook_dir }}/../ascender_install_artifacts/aks_deploy" 68 | 69 | - name: Terraform Plan 70 | ansible.builtin.command: 71 | cmd: terraform plan 72 | chdir: "{{ playbook_dir }}/../ascender_install_artifacts/aks_deploy" 73 | 74 | - name: Provision AKS cluster via Terraform 75 | community.general.terraform: 76 | project_path: "{{ playbook_dir }}/../ascender_install_artifacts/aks_deploy" 77 | state: present 78 | variables: 79 | home_dir: "/home/{{ ansible_user}}" 80 | aks_cluster_name: "{{ AKS_CLUSTER_NAME }}" 81 | resource_group_location: "{{ AKS_CLUSTER_REGION }}" 82 | node_count: "{{ AKS_NUM_WORKER_NODES }}" 83 | azure_vm_size: "{{ AKS_INSTANCE_TYPE }}" 84 | azure_disk_size: "{{ AKS_WORKER_VOLUME_SIZE }}" 85 | register: terraform_output 86 | 87 | - name: Ensure that the KUBECONFIG file can successfully authenticate to the AKS cluster 88 | kubernetes.core.k8s_info: 89 | api_version: v1 90 | kind: Node 91 | register: result 92 | ignore_errors: yes 93 | retries: 5 94 | delay: 10 95 | until: result.resources is defined and result.resources | length > 0 96 | 97 | - name: Fail the playbook if AKS authentication check is unsuccessful 98 | ansible.builtin.fail: 99 | msg: "Unable to retrieve AKS nodes after multiple retries." 100 | when: result.resources is not defined or result.resources | length == 0 101 | 102 | when: not aks_cluster_exists and AKS_CLUSTER_STATUS=="provision" 103 | 104 | - name: Retrieve the resource group of the AKS cluster 105 | ansible.builtin.command: az aks list --query "[?name=='{{ AKS_CLUSTER_NAME }}'].{ResourceGroup:resourceGroup}" -o tsv 106 | register: aks_resource_group 107 | when: 108 | - download_kubeconfig 109 | 110 | - name: Retrieve kubeconfig for the AKS cluster 111 | ansible.builtin.command: > 112 | az aks get-credentials --resource-group {{ aks_resource_group.stdout }} --name {{ AKS_CLUSTER_NAME }} --file "~/.kube/config" 113 | register: get_credentials_result 114 | when: 115 | - download_kubeconfig -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/tasks/k8s_setup_dkp.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | # - name: Ensure that ~/.kube directory exists 7 | # ansible.builtin.file: 8 | # path: ~/.kube 9 | # state: directory 10 | # delegate_to: localhost 11 | 12 | # - name: Check for an existing ~/.kube/config file 13 | # ansible.builtin.stat: 14 | # path: "~/.kube/config" 15 | # register: kubeconfig_data 16 | # delegate_to: localhost 17 | 18 | # - name: Back up existing ~/.kube/config if there is one 19 | # ansible.builtin.copy: 20 | # src: ~/.kube/config 21 | # dest: "~/.kube/config.{{ ansible_date_time.iso8601_basic_short }}" 22 | # when: kubeconfig_data.stat 23 | # delegate_to: localhost 24 | 25 | # - name: Retrieve DKP cluster KUBECONFIG 26 | # ansible.builtin.command: "dkp get kubeconfig -c {{ DKP_CLUSTER_NAME }} > ~/.kube/config_dkp" 27 | # # delegate_to: localhost 28 | 29 | - name: Get a list of all nodes 30 | kubernetes.core.k8s_info: 31 | api_version: v1 32 | kind: Node 33 | validate_certs: false 34 | delegate_to: localhost 35 | environment: 36 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config_dkp" 37 | 38 | - name: Get Traefik LoadBalancer IP Address, to add to /etc/hosts 39 | kubernetes.core.k8s_info: 40 | kind: Service 41 | wait: yes 42 | name: traefik 43 | namespace: default 44 | wait_sleep: 10 45 | wait_timeout: 360 46 | validate_certs: false 47 | delegate_to: localhost 48 | register: svc_output 49 | environment: 50 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config_dkp" 51 | 52 | - name: "Ensure a local DNS entry for {{ ASCENDER_HOSTNAME }} exists" 53 | ansible.builtin.lineinfile: 54 | path: /etc/hosts 55 | regexp: "{{ ASCENDER_HOSTNAME }}" 56 | line: "{{ svc_output.resources[0].status.loadBalancer.ingress[0].ip }} {{ ASCENDER_HOSTNAME }}" 57 | owner: root 58 | group: root 59 | mode: '0644' 60 | become: true 61 | delegate_to: localhost 62 | when: use_etc_hosts 63 | 64 | - name: "Ensure a local DNS entry for {{ LEDGER_HOSTNAME }} exists" 65 | ansible.builtin.lineinfile: 66 | path: /etc/hosts 67 | regexp: "{{ LEDGER_HOSTNAME }}" 68 | line: "{{ svc_output.resources[0].status.loadBalancer.ingress[0].ip }} {{ LEDGER_HOSTNAME }}" 69 | owner: root 70 | group: root 71 | mode: '0644' 72 | become: true 73 | delegate_to: localhost 74 | when: 75 | - use_etc_hosts 76 | - LEDGER_INSTALL 77 | 78 | # - name: "Ensure a local DNS entry for {{ LEDGER_HOSTNAME }} is removed is using external DNS" 79 | # ansible.builtin.lineinfile: 80 | # path: /etc/hosts 81 | # regexp: "{{ LEDGER_HOSTNAME }}" 82 | # state: absent 83 | # owner: root 84 | # group: root 85 | # mode: '0644' 86 | # become: true 87 | # delegate_to: localhost 88 | # when: 89 | # - not use_etc_hosts 90 | # - LEDGER_INSTALL 91 | 92 | # - name: "Ensure a local DNS entry for {{ ASCENDER_HOSTNAME }} is removed if using external DNS" 93 | # ansible.builtin.lineinfile: 94 | # path: /etc/hosts 95 | # regexp: "{{ ASCENDER_HOSTNAME }}" 96 | # state: absent 97 | # owner: root 98 | # group: root 99 | # mode: '0644' 100 | # become: true 101 | # delegate_to: localhost 102 | # when: not use_etc_hosts -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/tasks/k8s_setup_gke.yml: -------------------------------------------------------------------------------- 1 | - name: Retrieve the current time in order to timestamp files 2 | ansible.builtin.setup: 3 | gather_subset: 4 | - date_time 5 | 6 | - name: Ensure .kube directory exists 7 | ansible.builtin.file: 8 | path: "~/.kube" 9 | state: directory 10 | mode: '0755' 11 | 12 | - name: Determine if ~/.kube/config already exists 13 | ansible.builtin.stat: 14 | path: ~/.kube/config 15 | register: existing_kubeconfig 16 | 17 | - name: create backup of existing ~/.kube/config 18 | ansible.builtin.copy: 19 | src: ~/.kube/config 20 | dest: "~/.kube/config.{{ ansible_date_time.iso8601_basic_short }}" 21 | when: 22 | - existing_kubeconfig.stat.exists 23 | - download_kubeconfig 24 | 25 | - name: Delete existing ~/.kube/config 26 | ansible.builtin.file: 27 | path: ~/.kube/config 28 | state: absent 29 | when: 30 | - existing_kubeconfig.stat.exists 31 | - download_kubeconfig 32 | 33 | # Check for existing gke cluster 34 | - name: Get list of all GKE clusters 35 | ansible.builtin.command: > 36 | gcloud container clusters list --format="json" 37 | {% if GKE_CLUSTER_ZONE %} 38 | --zone {{ GKE_CLUSTER_ZONE }} 39 | {% endif %} 40 | register: gke_clusters 41 | 42 | - name: Parse GKE clusters JSON 43 | ansible.builtin.set_fact: 44 | clusters_list: "{{ gke_clusters.stdout | from_json }}" 45 | 46 | - name: Check if the specified GKE cluster exists 47 | ansible.builtin.set_fact: 48 | gke_cluster_exists: "{{ GKE_CLUSTER_NAME in clusters_list | map(attribute='name') | list }}" 49 | 50 | - name: Debug output 51 | ansible.builtin.debug: 52 | msg: > 53 | The cluster {{ GKE_CLUSTER_NAME }} 54 | {% if gke_cluster_exists %}exists{% else %}does not exist{% endif %}. 55 | 56 | 57 | # # If GKE_CLUSTER_NAME does not exist, set up new gke cluster 58 | - name: "If target cluster {{ GKE_CLUSTER_NAME }} does not exist, create it" 59 | block: 60 | 61 | - name: Ensure that the gke_deploy directory exists 62 | ansible.builtin.file: 63 | path: "{{ playbook_dir }}/../ascender_install_artifacts/gke_deploy" 64 | state: directory 65 | 66 | - name: Copy GKE Terraform Directory 67 | ansible.builtin.copy: 68 | src: files/gke_deploy 69 | dest: "{{ playbook_dir }}/../ascender_install_artifacts" 70 | mode: 0777 71 | 72 | - name: Initialize Terraform 73 | ansible.builtin.command: 74 | cmd: terraform init 75 | chdir: "{{ playbook_dir }}/../ascender_install_artifacts/gke_deploy" 76 | 77 | - name: Terraform Plan 78 | ansible.builtin.command: 79 | cmd: terraform plan 80 | chdir: "{{ playbook_dir }}/../ascender_install_artifacts/gke_deploy" 81 | 82 | - name: Provision GKE cluster via Terraform 83 | community.general.terraform: 84 | project_path: "{{ playbook_dir }}/../ascender_install_artifacts/gke_deploy" 85 | state: present 86 | variables: 87 | home_dir: "/home/{{ ansible_user}}" 88 | gke_cluster_name: "{{ GKE_CLUSTER_NAME }}" 89 | kubernetes_version: "{{ GKE_K8S_VERSION }}" 90 | project_id: "{{ GKE_PROJECT_ID }}" 91 | zone: "{{ GKE_CLUSTER_ZONE }}" 92 | num_nodes: "{{ GKE_NUM_WORKER_NODES }}" 93 | gcloud_vm_size: "{{ GKE_INSTANCE_TYPE }}" 94 | volume_size: "{{ GKE_WORKER_VOLUME_SIZE }}" 95 | register: terraform_output 96 | 97 | when: not gke_cluster_exists and GKE_CLUSTER_STATUS=="provision" 98 | 99 | - name: Ensure kubeconfig exists 100 | ansible.builtin.file: 101 | path: "/home/{{ ansible_user }}/.kube/config" 102 | state: touch 103 | owner: "{{ ansible_user }}" 104 | mode: '0644' 105 | when: 106 | - download_kubeconfig 107 | 108 | - name: Retrieve kubeconfig for the GKE cluster 109 | ansible.builtin.shell: 110 | cmd: "gcloud container clusters get-credentials {{ GKE_CLUSTER_NAME }} --zone {{ GKE_CLUSTER_ZONE }}" 111 | register: get_credentials_result 112 | environment: 113 | KUBECONFIG: "/home/{{ ansible_user }}/.kube/config" 114 | when: 115 | - download_kubeconfig 116 | 117 | - name: Ensure that the KUBECONFIG file can successfully authenticate to the GKE cluster 118 | kubernetes.core.k8s_info: 119 | api_version: v1 120 | kind: Node 121 | register: result 122 | ignore_errors: yes 123 | retries: 5 124 | delay: 10 125 | until: result.resources is defined and result.resources | length > 0 126 | 127 | - name: Fail the playbook if GKE authentication check is unsuccessful 128 | ansible.builtin.fail: 129 | msg: "Unable to retrieve GKE nodes after multiple retries." 130 | when: result.resources is not defined or result.resources | length == 0 -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/tasks/k8s_setup_k3s.yml: -------------------------------------------------------------------------------- 1 | - name: Wait 600 seconds for target connection to become reachable/usable 2 | ansible.builtin.wait_for_connection: 3 | delay: 15 4 | 5 | - name: Populate service facts 6 | ansible.builtin.service_facts: 7 | 8 | - name: Stop firewall service 9 | ansible.builtin.service: 10 | name: firewalld 11 | state: stopped 12 | enabled: false 13 | when: 14 | - firewalld_disable | bool 15 | - "'firewalld.service' in services" 16 | 17 | - name: Install K3s Online (this may take up to 5 minutes) 18 | ansible.builtin.shell: curl -sfL https://get.k3s.io | sh - 19 | when: 20 | - kube_install 21 | - "'k3s.service' not in services" 22 | - not k8s_offline 23 | 24 | - name: K3s Offline Install 25 | when: 26 | - kube_install 27 | - "'k3s.service' not in services" 28 | - k8s_offline 29 | block: 30 | - name: Copy k3s Binary 31 | ansible.builtin.copy: 32 | src: "{{ playbook_dir }}/../offline/packages/k3s" 33 | dest: /usr/local/bin/k3s 34 | mode: '0755' 35 | 36 | - name: Create images directory 37 | ansible.builtin.file: 38 | path: /var/lib/rancher/k3s/agent/images/ 39 | state: directory 40 | 41 | - name: Copy images 42 | ansible.builtin.copy: 43 | src: "{{ item }}" 44 | dest: "/var/lib/rancher/k3s/agent/images/{{ item | basename }}" 45 | remote_src: true 46 | with_fileglob: "{{ playbook_dir }}/../offline/images/*" 47 | 48 | - name: Make Installer executable 49 | ansible.builtin.file: 50 | path: "{{ playbook_dir }}/../offline/packages/k3s_install.sh" 51 | state: file 52 | mode: "0755" 53 | 54 | - name: Install K3s Offline 55 | ansible.builtin.shell: INSTALL_K3S_SKIP_DOWNLOAD=true INSTALL_K3S_EXEC="--write-kubeconfig-mode 644" INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_SELINUX_WARN=true {{ playbook_dir }}/../offline/packages/k3s_install.sh 56 | 57 | - name: Ensure that ~/.kube directory exists 58 | ansible.builtin.file: 59 | path: ~/.kube 60 | state: directory 61 | delegate_to: localhost 62 | 63 | - name: Copy kubeconfig file from default location to the ~/.kube directory" 64 | ansible.builtin.fetch: 65 | src: /etc/rancher/k3s/k3s.yaml 66 | dest: ~/.kube/config 67 | flat: true 68 | become: true 69 | when: download_kubeconfig 70 | 71 | - name: "Replace the kubeconfig cluster kube-api server IP with the public IP address, if the cluster is remote " 72 | ansible.builtin.replace: 73 | path: ~/.kube/config 74 | regexp: '127.0.0.1' 75 | replace: "{{ k3s_master_node_ip }}" 76 | delegate_to: localhost 77 | when: 78 | - ansible_host != "localhost" 79 | - download_kubeconfig 80 | 81 | - name: Get a list of all nodes 82 | kubernetes.core.k8s_info: 83 | api_version: v1 84 | kind: Node 85 | validate_certs: false 86 | delegate_to: localhost 87 | environment: 88 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config" 89 | 90 | - name: "Ensure a local DNS entry for {{ ASCENDER_HOSTNAME }} exists" 91 | ansible.builtin.lineinfile: 92 | path: /etc/hosts 93 | regexp: "{{ ASCENDER_HOSTNAME }}" 94 | line: "{{ k3s_master_node_ip }} {{ ASCENDER_HOSTNAME }}" 95 | owner: root 96 | group: root 97 | mode: '0644' 98 | become: true 99 | delegate_to: localhost 100 | when: use_etc_hosts 101 | 102 | # - name: "Ensure a local DNS entry for {{ ASCENDER_HOSTNAME }} is removed if using external DNS" 103 | # ansible.builtin.lineinfile: 104 | # path: /etc/hosts 105 | # regexp: "{{ ASCENDER_HOSTNAME }}" 106 | # state: absent 107 | # owner: root 108 | # group: root 109 | # mode: '0644' 110 | # become: true 111 | # delegate_to: localhost 112 | # when: not use_etc_hosts 113 | 114 | - name: "Ensure a local DNS entry for {{ LEDGER_HOSTNAME }} exists" 115 | ansible.builtin.lineinfile: 116 | path: /etc/hosts 117 | regexp: "{{ LEDGER_HOSTNAME }}" 118 | line: "{{ k3s_master_node_ip }} {{ LEDGER_HOSTNAME }}" 119 | owner: root 120 | group: root 121 | mode: '0644' 122 | become: true 123 | delegate_to: localhost 124 | when: 125 | - use_etc_hosts 126 | - LEDGER_INSTALL 127 | 128 | - name: "Ensure a local DNS entry for {{ REACT_HOSTNAME }} exists" 129 | ansible.builtin.lineinfile: 130 | path: /etc/hosts 131 | regexp: "{{ REACT_HOSTNAME }}" 132 | line: "{{ k3s_master_node_ip }} {{ REACT_HOSTNAME }}" 133 | owner: root 134 | group: root 135 | mode: '0644' 136 | become: true 137 | delegate_to: localhost 138 | when: 139 | - use_etc_hosts 140 | - REACT_INSTALL 141 | 142 | 143 | # - name: "Ensure a local DNS entry for {{ LEDGER_HOSTNAME }} is removed is using external DNS" 144 | # ansible.builtin.lineinfile: 145 | # path: /etc/hosts 146 | # regexp: "{{ LEDGER_HOSTNAME }}" 147 | # state: absent 148 | # owner: root 149 | # group: root 150 | # mode: '0644' 151 | # become: true 152 | # delegate_to: localhost 153 | # when: 154 | # - not use_etc_hosts 155 | # - LEDGER_INSTALL -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/tasks/k8s_setup_rke2.yml: -------------------------------------------------------------------------------- 1 | - name: Wait 600 seconds for target connection to become reachable/usable 2 | ansible.builtin.wait_for_connection: 3 | delay: 15 4 | 5 | - name: If download_kubeconfig is set to true, retrieve the RKE2 kubeconfig file from the kubeapi-server 6 | block: 7 | 8 | - name: Ensure that ~/.kube directory exists 9 | ansible.builtin.file: 10 | path: ~/.kube 11 | state: directory 12 | delegate_to: localhost 13 | 14 | - name: Copy kubeconfig file from default location to the ~/.kube directory 15 | ansible.builtin.fetch: 16 | src: /etc/rancher/rke2/rke2.yaml 17 | dest: ~/.kube/config 18 | flat: true 19 | become: true 20 | 21 | - name: "Replace the kubeconfig cluster kube-api server IP with the public IP address, if the cluster is remote " 22 | ansible.builtin.replace: 23 | path: ~/.kube/config 24 | regexp: '127.0.0.1' 25 | replace: "{{ kubeapi_server_ip }}" 26 | delegate_to: localhost 27 | when: ansible_host != "localhost" 28 | when: download_kubeconfig 29 | 30 | - name: Get a list of all nodes 31 | kubernetes.core.k8s_info: 32 | api_version: v1 33 | kind: Node 34 | validate_certs: false 35 | delegate_to: localhost 36 | environment: 37 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config" 38 | 39 | - name: "Ensure a local DNS entry for {{ ASCENDER_HOSTNAME }} exists" 40 | ansible.builtin.lineinfile: 41 | path: /etc/hosts 42 | regexp: "{{ ASCENDER_HOSTNAME }}" 43 | line: "{{ kubeapi_server_ip }} {{ ASCENDER_HOSTNAME }}" 44 | owner: root 45 | group: root 46 | mode: '0644' 47 | become: true 48 | delegate_to: localhost 49 | when: use_etc_hosts 50 | 51 | # - name: "Ensure a local DNS entry for {{ ASCENDER_HOSTNAME }} is removed if using external DNS" 52 | # ansible.builtin.lineinfile: 53 | # path: /etc/hosts 54 | # regexp: "{{ ASCENDER_HOSTNAME }}" 55 | # state: absent 56 | # owner: root 57 | # group: root 58 | # mode: '0644' 59 | # become: true 60 | # delegate_to: localhost 61 | # when: not use_etc_hosts 62 | 63 | - name: "Ensure a local DNS entry for {{ LEDGER_HOSTNAME }} exists" 64 | ansible.builtin.lineinfile: 65 | path: /etc/hosts 66 | regexp: "{{ LEDGER_HOSTNAME }}" 67 | line: "{{ kubeapi_server_ip }} {{ LEDGER_HOSTNAME }}" 68 | owner: root 69 | group: root 70 | mode: '0644' 71 | become: true 72 | delegate_to: localhost 73 | when: 74 | - use_etc_hosts 75 | - LEDGER_INSTALL 76 | 77 | # - name: "Ensure a local DNS entry for {{ LEDGER_HOSTNAME }} is removed is using external DNS" 78 | # ansible.builtin.lineinfile: 79 | # path: /etc/hosts 80 | # regexp: "{{ LEDGER_HOSTNAME }}" 81 | # state: absent 82 | # owner: root 83 | # group: root 84 | # mode: '0644' 85 | # become: true 86 | # delegate_to: localhost 87 | # when: 88 | # - not use_etc_hosts 89 | # - LEDGER_INSTALL -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/templates/eks/ebs-scsi-driver-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": [ 7 | "ec2:CreateSnapshot", 8 | "ec2:AttachVolume", 9 | "ec2:DetachVolume", 10 | "ec2:ModifyVolume", 11 | "ec2:DescribeAvailabilityZones", 12 | "ec2:DescribeInstances", 13 | "ec2:DescribeSnapshots", 14 | "ec2:DescribeTags", 15 | "ec2:DescribeVolumes", 16 | "ec2:DescribeVolumesModifications" 17 | ], 18 | "Resource": "*" 19 | }, 20 | { 21 | "Effect": "Allow", 22 | "Action": [ 23 | "ec2:CreateTags" 24 | ], 25 | "Resource": [ 26 | "arn:aws:ec2:*:*:volume/*", 27 | "arn:aws:ec2:*:*:snapshot/*" 28 | ], 29 | "Condition": { 30 | "StringEquals": { 31 | "ec2:CreateAction": [ 32 | "CreateVolume", 33 | "CreateSnapshot" 34 | ] 35 | } 36 | } 37 | }, 38 | { 39 | "Effect": "Allow", 40 | "Action": [ 41 | "ec2:DeleteTags" 42 | ], 43 | "Resource": [ 44 | "arn:aws:ec2:*:*:volume/*", 45 | "arn:aws:ec2:*:*:snapshot/*" 46 | ] 47 | }, 48 | { 49 | "Effect": "Allow", 50 | "Action": [ 51 | "ec2:CreateVolume" 52 | ], 53 | "Resource": "*", 54 | "Condition": { 55 | "StringLike": { 56 | "aws:RequestTag/ebs.csi.aws.com/cluster": "true" 57 | } 58 | } 59 | }, 60 | { 61 | "Effect": "Allow", 62 | "Action": [ 63 | "ec2:CreateVolume" 64 | ], 65 | "Resource": "*", 66 | "Condition": { 67 | "StringLike": { 68 | "aws:RequestTag/CSIVolumeName": "*" 69 | } 70 | } 71 | }, 72 | { 73 | "Effect": "Allow", 74 | "Action": [ 75 | "ec2:DeleteVolume" 76 | ], 77 | "Resource": "*", 78 | "Condition": { 79 | "StringLike": { 80 | "ec2:ResourceTag/ebs.csi.aws.com/cluster": "true" 81 | } 82 | } 83 | }, 84 | { 85 | "Effect": "Allow", 86 | "Action": [ 87 | "ec2:DeleteVolume" 88 | ], 89 | "Resource": "*", 90 | "Condition": { 91 | "StringLike": { 92 | "ec2:ResourceTag/CSIVolumeName": "*" 93 | } 94 | } 95 | }, 96 | { 97 | "Effect": "Allow", 98 | "Action": [ 99 | "ec2:DeleteVolume" 100 | ], 101 | "Resource": "*", 102 | "Condition": { 103 | "StringLike": { 104 | "ec2:ResourceTag/kubernetes.io/created-for/pvc/name": "*" 105 | } 106 | } 107 | }, 108 | { 109 | "Effect": "Allow", 110 | "Action": [ 111 | "ec2:DeleteSnapshot" 112 | ], 113 | "Resource": "*", 114 | "Condition": { 115 | "StringLike": { 116 | "ec2:ResourceTag/CSIVolumeSnapshotName": "*" 117 | } 118 | } 119 | }, 120 | { 121 | "Effect": "Allow", 122 | "Action": [ 123 | "ec2:DeleteSnapshot" 124 | ], 125 | "Resource": "*", 126 | "Condition": { 127 | "StringLike": { 128 | "ec2:ResourceTag/ebs.csi.aws.com/cluster": "true" 129 | } 130 | } 131 | } 132 | ] 133 | } -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/templates/eks/ebs-scsi-driver-role.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": [ 7 | "ec2:CreateSnapshot", 8 | "ec2:AttachVolume", 9 | "ec2:DetachVolume", 10 | "ec2:ModifyVolume", 11 | "ec2:DescribeAvailabilityZones", 12 | "ec2:DescribeInstances", 13 | "ec2:DescribeSnapshots", 14 | "ec2:DescribeTags", 15 | "ec2:DescribeVolumes", 16 | "ec2:DescribeVolumesModifications" 17 | ], 18 | "Resource": "*" 19 | }, 20 | { 21 | "Effect": "Allow", 22 | "Action": [ 23 | "ec2:CreateTags" 24 | ], 25 | "Resource": [ 26 | "arn:aws:ec2:*:*:volume/*", 27 | "arn:aws:ec2:*:*:snapshot/*" 28 | ], 29 | "Condition": { 30 | "StringEquals": { 31 | "ec2:CreateAction": [ 32 | "CreateVolume", 33 | "CreateSnapshot" 34 | ] 35 | } 36 | } 37 | }, 38 | { 39 | "Effect": "Allow", 40 | "Action": [ 41 | "ec2:DeleteTags" 42 | ], 43 | "Resource": [ 44 | "arn:aws:ec2:*:*:volume/*", 45 | "arn:aws:ec2:*:*:snapshot/*" 46 | ] 47 | }, 48 | { 49 | "Effect": "Allow", 50 | "Action": [ 51 | "ec2:CreateVolume" 52 | ], 53 | "Resource": "*", 54 | "Condition": { 55 | "StringLike": { 56 | "aws:RequestTag/ebs.csi.aws.com/cluster": "true" 57 | } 58 | } 59 | }, 60 | { 61 | "Effect": "Allow", 62 | "Action": [ 63 | "ec2:CreateVolume" 64 | ], 65 | "Resource": "*", 66 | "Condition": { 67 | "StringLike": { 68 | "aws:RequestTag/CSIVolumeName": "*" 69 | } 70 | } 71 | }, 72 | { 73 | "Effect": "Allow", 74 | "Action": [ 75 | "ec2:DeleteVolume" 76 | ], 77 | "Resource": "*", 78 | "Condition": { 79 | "StringLike": { 80 | "ec2:ResourceTag/ebs.csi.aws.com/cluster": "true" 81 | } 82 | } 83 | }, 84 | { 85 | "Effect": "Allow", 86 | "Action": [ 87 | "ec2:DeleteVolume" 88 | ], 89 | "Resource": "*", 90 | "Condition": { 91 | "StringLike": { 92 | "ec2:ResourceTag/CSIVolumeName": "*" 93 | } 94 | } 95 | }, 96 | { 97 | "Effect": "Allow", 98 | "Action": [ 99 | "ec2:DeleteVolume" 100 | ], 101 | "Resource": "*", 102 | "Condition": { 103 | "StringLike": { 104 | "ec2:ResourceTag/kubernetes.io/created-for/pvc/name": "*" 105 | } 106 | } 107 | }, 108 | { 109 | "Effect": "Allow", 110 | "Action": [ 111 | "ec2:DeleteSnapshot" 112 | ], 113 | "Resource": "*", 114 | "Condition": { 115 | "StringLike": { 116 | "ec2:ResourceTag/CSIVolumeSnapshotName": "*" 117 | } 118 | } 119 | }, 120 | { 121 | "Effect": "Allow", 122 | "Action": [ 123 | "ec2:DeleteSnapshot" 124 | ], 125 | "Resource": "*", 126 | "Condition": { 127 | "StringLike": { 128 | "ec2:ResourceTag/ebs.csi.aws.com/cluster": "true" 129 | } 130 | } 131 | } 132 | ] 133 | } -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/templates/eks/eks-cluster-manifest.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: eksctl.io/v1alpha5 3 | kind: ClusterConfig 4 | metadata: 5 | name: {{ EKS_CLUSTER_NAME }} 6 | region: {{ EKS_CLUSTER_REGION }} 7 | version: "{{ EKS_K8S_VERSION }}" 8 | vpc: 9 | cidr: {{ EKS_CLUSTER_CIDR }} 10 | hostnameType: resource-name 11 | clusterEndpoints: 12 | publicAccess: true 13 | privateAccess: true 14 | nodeGroups: 15 | - name: ascender-nodes 16 | instanceType: {{ EKS_INSTANCE_TYPE }} 17 | minSize: {{ EKS_MIN_WORKER_NODES }} 18 | maxSize: {{ EKS_MAX_WORKER_NODES }} 19 | desiredCapacity: {{ EKS_NUM_WORKER_NODES }} 20 | volumeSize: {{ EKS_WORKER_VOLUME_SIZE }} 21 | tags: 22 | nodegroup-role: worker 23 | iam: 24 | withOIDC: true 25 | addons: 26 | - name: kube-proxy 27 | - name: coredns 28 | - name: vpc-cni 29 | - name: aws-ebs-csi-driver 30 | attachPolicyARNs: 31 | - {{ ebs_scsi_driver_policy_arn }} -------------------------------------------------------------------------------- /playbooks/roles/k8s_setup/templates/eks/ingress-class-params.yml: -------------------------------------------------------------------------------- 1 | apiVersion: elbv2.k8s.aws/v1beta1 2 | kind: IngressClassParams 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: aws-load-balancer-controller 6 | name: alb 7 | --- 8 | apiVersion: networking.k8s.io/v1 9 | kind: IngressClass 10 | metadata: 11 | labels: 12 | app.kubernetes.io/name: aws-load-balancer-controller 13 | name: alb 14 | spec: 15 | controller: ingress.k8s.aws/alb 16 | parameters: 17 | apiGroup: elbv2.k8s.aws 18 | kind: IngressClassParams 19 | name: alb 20 | -------------------------------------------------------------------------------- /playbooks/roles/ledger_install/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # Determines whether or not Ledger will be installed 2 | LEDGER_INSTALL: true 3 | 4 | # DNS resolvable hostname for Ledger service. This is required for install. 5 | LEDGER_HOSTNAME: ledger.example.com 6 | 7 | # The OCI container image for Ledger 8 | LEDGER_WEB_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-web 9 | 10 | # The number of ledger web pods - this is good to ensure high availability 11 | ledger_web_replicas: 1 12 | 13 | # The OCI container image for the Ledger Parser 14 | LEDGER_PARSER_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-parser 15 | 16 | # The number of ledger parser pods - this is good to ensure high availability 17 | ledger_parser_replicas: 1 18 | 19 | # The OCI container image for the Ledger Database 20 | LEDGER_DB_IMAGE: ghcr.io/ctrliq/ascender-ledger/ledger-db 21 | 22 | # The image tag indicating the version of Ledger you wish to install 23 | LEDGER_VERSION: latest 24 | 25 | # The Kubernetes namespace in which Ledger objects will live 26 | LEDGER_NAMESPACE: ledger 27 | 28 | # Admin password for Ledger (the username is admin by default) 29 | LEDGER_ADMIN_PASSWORD: "myadminpassword" 30 | 31 | # Password for Ledger database 32 | LEDGER_DB_PASSWORD: "mydbpassword" 33 | 34 | k8s_container_registry: ghcr.io/ctrliq/ascender-ledger 35 | -------------------------------------------------------------------------------- /playbooks/roles/ledger_install/templates/ledger_deployment_registry_secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: ledger-registry-secret 5 | type: kubernetes.io/dockerconfigjson 6 | data: 7 | .dockerconfigjson: {{ { 8 | "auths": { 9 | LEDGER_REGISTRY.BASE: { 10 | "auth": (LEDGER_REGISTRY.USERNAME ~ ":" ~ LEDGER_REGISTRY.PASSWORD) | b64encode 11 | } 12 | } 13 | } | to_json | b64encode }} 14 | -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/ascender_credentials_aks.yml: -------------------------------------------------------------------------------- 1 | - name: Set the Ascender URL for HTTPS 2 | ansible.builtin.set_fact: 3 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 4 | ascender_port: 443 5 | when: k8s_lb_protocol == "https" 6 | 7 | - name: Set the Ascender URL for HTTP 8 | ansible.builtin.set_fact: 9 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 10 | ascender_port: 80 11 | when: k8s_lb_protocol == "http" 12 | 13 | - ansible.builtin.debug: 14 | msg: "The Ascender API endpoint is {{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 15 | 16 | - name: Wait until Ascender API is Up and the Database is populated 17 | awx.awx.organization: 18 | controller_host: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}" 19 | controller_username: "{{ ASCENDER_ADMIN_USER }}" 20 | controller_password: "{{ ASCENDER_ADMIN_PASSWORD }}" 21 | name: Default 22 | state: exists 23 | until: org.id is defined and org.id == 1 24 | retries: 200 25 | delay: 10 26 | register: org 27 | -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/ascender_credentials_dkp.yml: -------------------------------------------------------------------------------- 1 | - name: Set the Ascender URL for HTTPS 2 | ansible.builtin.set_fact: 3 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 4 | ascender_port: 443 5 | when: k8s_lb_protocol == "https" 6 | 7 | - name: Set the Ascender URL for HTTP 8 | ansible.builtin.set_fact: 9 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 10 | ascender_port: 80 11 | when: k8s_lb_protocol == "http" 12 | 13 | - ansible.builtin.debug: 14 | msg: "The Ascender API endpoint is {{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 15 | 16 | - name: Wait until Ascender API is Up and the Database is populated 17 | awx.awx.organization: 18 | controller_host: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}" 19 | controller_username: "{{ ASCENDER_ADMIN_USER }}" 20 | controller_password: "{{ ASCENDER_ADMIN_PASSWORD }}" 21 | name: Default 22 | state: exists 23 | validate_certs: false 24 | until: org.id == 1 25 | retries: 200 26 | delay: 10 27 | register: org 28 | -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/ascender_credentials_eks.yml: -------------------------------------------------------------------------------- 1 | - name: Set the Ascender URL for HTTPS 2 | ansible.builtin.set_fact: 3 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 4 | ascender_port: 443 5 | when: k8s_lb_protocol == "https" 6 | 7 | - name: Set the Ascender URL for HTTP 8 | ansible.builtin.set_fact: 9 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 10 | ascender_port: 80 11 | when: k8s_lb_protocol == "http" 12 | 13 | - ansible.builtin.debug: 14 | msg: "The Ascender API endpoint is {{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 15 | 16 | - name: Wait until Ascender API is Up and the Database is populated 17 | awx.awx.organization: 18 | controller_host: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}" 19 | controller_username: "{{ ASCENDER_ADMIN_USER }}" 20 | controller_password: "{{ ASCENDER_ADMIN_PASSWORD }}" 21 | name: Default 22 | state: exists 23 | validate_certs: false 24 | until: org.id == 1 25 | retries: 200 26 | delay: 10 27 | register: org 28 | -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/ascender_credentials_gke.yml: -------------------------------------------------------------------------------- 1 | - name: Set the Ascender URL for HTTPS 2 | ansible.builtin.set_fact: 3 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 4 | ascender_port: 443 5 | when: k8s_lb_protocol == "https" 6 | 7 | - name: Set the Ascender URL for HTTP 8 | ansible.builtin.set_fact: 9 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 10 | ascender_port: 80 11 | when: k8s_lb_protocol == "http" 12 | 13 | - ansible.builtin.debug: 14 | msg: "The Ascender API endpoint is {{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 15 | 16 | - name: Wait until Ascender API is Up and the Database is populated 17 | awx.awx.organization: 18 | controller_host: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}" 19 | controller_username: "{{ ASCENDER_ADMIN_USER }}" 20 | controller_password: "{{ ASCENDER_ADMIN_PASSWORD }}" 21 | name: Default 22 | state: exists 23 | until: org.id is defined and org.id == 1 24 | retries: 200 25 | delay: 10 26 | register: org 27 | -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/ascender_credentials_k3s.yml: -------------------------------------------------------------------------------- 1 | - name: Set the Ascender URL for HTTPS 2 | ansible.builtin.set_fact: 3 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 4 | ascender_port: "{{ '30080' if k3s_service_type == 'NodePort' else '443' if k8s_lb_protocol == 'https' else '80' }}" 5 | 6 | - ansible.builtin.debug: 7 | msg: "The Ascender API endpoint is {{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 8 | 9 | - name: Wait until Ascender API is Up and the Database is populated 10 | awx.awx.organization: 11 | controller_host: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}" 12 | controller_username: "{{ ASCENDER_ADMIN_USER }}" 13 | controller_password: "{{ ASCENDER_ADMIN_PASSWORD }}" 14 | name: Default 15 | state: exists 16 | validate_certs: false 17 | until: org.id is defined and org.id == 1 18 | retries: 200 19 | delay: 10 20 | register: org 21 | -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/ascender_credentials_rke2.yml: -------------------------------------------------------------------------------- 1 | - name: Set the Ascender URL for HTTPS 2 | ansible.builtin.set_fact: 3 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 4 | ascender_port: 443 5 | when: k8s_lb_protocol == "https" 6 | 7 | - name: Set the Ascender URL for HTTP 8 | ansible.builtin.set_fact: 9 | ascender_ip: "{{ ASCENDER_HOSTNAME }}" 10 | ascender_port: 80 11 | when: k8s_lb_protocol == "http" 12 | 13 | - ansible.builtin.debug: 14 | msg: "The Ascender API endpoint is {{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}/api/v2/ping/" 15 | 16 | - name: Wait until Ascender API is Up and the Database is populated 17 | awx.awx.organization: 18 | controller_host: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}" 19 | controller_username: "{{ ASCENDER_ADMIN_USER }}" 20 | controller_password: "{{ ASCENDER_ADMIN_PASSWORD }}" 21 | name: Default 22 | state: exists 23 | validate_certs: false 24 | until: org.id is defined and org.id == 1 25 | retries: 200 26 | delay: 10 27 | register: org 28 | -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/inventories.yml: -------------------------------------------------------------------------------- 1 | - name: Create Inventory - {{ inventory.name }} 2 | awx.awx.inventory: 3 | controller_host: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}" 4 | controller_username: "{{ ASCENDER_ADMIN_USER }}" 5 | controller_password: "{{ ASCENDER_ADMIN_PASSWORD }}" 6 | name: "{{ inventory.name }}" 7 | description: "{{ project.description | default('') }}" 8 | organization: Default 9 | validate_certs: false 10 | register: result 11 | until: result is succeeded 12 | retries: 20 13 | delay: 5 -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/main.yml: -------------------------------------------------------------------------------- 1 | #- name: Create Credentials 2 | 3 | - name: Create Inventories 4 | include_tasks: inventories.yml 5 | loop: "{{ inventories}}" 6 | loop_control: 7 | label: "{{ inventory.name }}" 8 | loop_var: inventory 9 | 10 | - name: Create Projects 11 | include_tasks: projects.yml 12 | loop: "{{ projects }}" 13 | loop_control: 14 | label: "{{ project.name }}" 15 | loop_var: project 16 | pause: 1 17 | 18 | - name: Create Templates 19 | include_tasks: templates.yml 20 | loop: "{{ templates }}" 21 | loop_control: 22 | label: "{{ template.name }}" 23 | loop_var: template 24 | 25 | - name: Create Workflow Templates 26 | include_tasks: workflow_templates.yml 27 | loop: "{{ workflow_templates }}" 28 | loop_control: 29 | label: "{{ workflow_template.name }}" 30 | loop_var: workflow_template 31 | -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/projects.yml: -------------------------------------------------------------------------------- 1 | - name: Create Project - {{ project.name }} 2 | awx.awx.project: 3 | controller_host: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}" 4 | controller_username: "{{ ASCENDER_ADMIN_USER }}" 5 | controller_password: "{{ ASCENDER_ADMIN_PASSWORD }}" 6 | name: "{{ project.name }}" 7 | description: "{{ project.description }}" 8 | organization: Default 9 | scm_type: "{{ project.scm_type }}" 10 | scm_url: "{{ project.scm_url }}" 11 | scm_update_on_launch: "{{ project.scm_update_on_launch | default(omit) }}" 12 | validate_certs: false 13 | register: result 14 | until: result is succeeded 15 | retries: 20 16 | delay: 5 17 | 18 | 19 | -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/surveys/patching.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "", 3 | "spec": [ 4 | { 5 | "max": 1024, 6 | "min": 0, 7 | "type": "multiplechoice", 8 | "choices": [ 9 | "Yes", 10 | "No" 11 | ], 12 | "default": "No", 13 | "required": true, 14 | "variable": "security_updates", 15 | "new_question": true, 16 | "question_name": "Only install Security Updates?", 17 | "question_description": "" 18 | }, 19 | { 20 | "max": 1024, 21 | "min": 0, 22 | "type": "multiplechoice", 23 | "choices": [ 24 | "Yes", 25 | "No" 26 | ], 27 | "default": "Yes", 28 | "required": true, 29 | "variable": "update_kernel", 30 | "new_question": false, 31 | "question_name": "Install Kernel Updates?", 32 | "question_description": "" 33 | }, 34 | { 35 | "max": 1024, 36 | "min": 0, 37 | "type": "multiplechoice", 38 | "choices": [ 39 | "Yes", 40 | "No" 41 | ], 42 | "default": "Yes", 43 | "required": true, 44 | "variable": "reboot_kernel", 45 | "new_question": true, 46 | "question_name": "Reboot after Kernel Updates? (if updated)", 47 | "question_description": "" 48 | } 49 | ], 50 | "description": "" 51 | } -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/surveys/selinux.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "", 3 | "spec": [ 4 | { 5 | "max": 1024, 6 | "min": 0, 7 | "type": "multiplechoice", 8 | "choices": [ 9 | "enforcing", 10 | "permissive", 11 | "disabled" 12 | ], 13 | "default": "enforcing", 14 | "required": true, 15 | "variable": "selinux_state", 16 | "new_question": true, 17 | "question_name": "State", 18 | "question_description": "The SELinux Mode" 19 | }, 20 | { 21 | "max": 64, 22 | "min": 1, 23 | "type": "text", 24 | "choices": "", 25 | "default": "targeted", 26 | "required": true, 27 | "variable": "selinux_policy", 28 | "new_question": true, 29 | "question_name": "Policy", 30 | "question_description": "Name of the SELinux Policy" 31 | }, 32 | { 33 | "max": 1024, 34 | "min": 1, 35 | "type": "text", 36 | "choices": "", 37 | "default": "/etc/selinux/config", 38 | "required": true, 39 | "variable": "selinux_configfile", 40 | "new_question": true, 41 | "question_name": "Configuration File", 42 | "question_description": "Path to the Selinux Configuration File" 43 | }, 44 | { 45 | "max": 1024, 46 | "min": 0, 47 | "type": "multiplechoice", 48 | "choices": [ 49 | "Yes", 50 | "No" 51 | ], 52 | "default": "Yes", 53 | "required": true, 54 | "variable": "selinux_reboot", 55 | "new_question": true, 56 | "question_name": "Reboot if Necessary", 57 | "question_description": "If enabled and a change is made to SELinux that requires a reboot, the server will reboot immediately" 58 | } 59 | ], 60 | "description": "" 61 | } -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/templates.yml: -------------------------------------------------------------------------------- 1 | - name: Create Job Template - {{ template.name }} 2 | awx.awx.job_template: 3 | controller_host: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}" 4 | controller_username: "{{ ASCENDER_ADMIN_USER }}" 5 | controller_password: "{{ ASCENDER_ADMIN_PASSWORD }}" 6 | name: "{{ template.name }}" 7 | job_type: "{{ template.job_type | default('run') }}" 8 | organization: Default 9 | inventory: "{{ template.inventory | default('Default Inventory') }}" 10 | project: "{{ template.project }}" 11 | playbook: "{{ template.playbook }}" 12 | state: present 13 | survey_enabled: "{{ template.survey_enabled | default(omit) }}" 14 | survey_spec: "{% if template.survey_file is defined %}{{ lookup('file', template.survey_file) }}{% else %}{{ ''| default(omit, true) }}{% endif %}" 15 | ask_credential_on_launch: "{{ template.ask_credential_on_launch | default(omit) }}" 16 | diff_mode: "{{ template.diff_mode | default(omit) }}" 17 | extra_vars: "{{ template.extra_vars | default(omit) }}" 18 | validate_certs: false 19 | limit: "{{ template.limit | default(omit) }}" 20 | register: result 21 | until: result is succeeded 22 | retries: 20 23 | delay: 5 24 | -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/tasks/workflow_templates.yml: -------------------------------------------------------------------------------- 1 | - name: Create Workflow Job Template - {{ workflow_template.name }} 2 | awx.awx.workflow_job_template: 3 | controller_host: "{{ k8s_lb_protocol }}://{{ ascender_ip }}:{{ ascender_port }}" 4 | controller_username: "{{ ASCENDER_ADMIN_USER }}" 5 | controller_password: "{{ ASCENDER_ADMIN_PASSWORD }}" 6 | name: "{{ workflow_template.name }}" 7 | organization: Default 8 | inventory: "{{ workflow_template.inventory | default('Default Inventory') }}" 9 | state: present 10 | survey_enabled: "{{ workflow_template.survey_enabled | default(omit) }}" 11 | survey_spec: "{% if workflow_template.survey_file is defined %}{{ lookup('file', workflow_template.survey_file) }}{% else %}{{ ''| default(omit, true) }}{% endif %}" 12 | ask_inventory_on_launch: "{{ workflow_template.ask_inventory_on_launch | default(omit) }}" 13 | ask_variables_on_launch: "{{ workflow_template.ask_variables_on_launch | default(omit) }}" 14 | diff_mode: "{{ workflow_template.diff_mode | default(omit) }}" 15 | validate_certs: false 16 | workflow_nodes: "{{ workflow_template.workflow_nodes | default(omit) }}" 17 | register: result 18 | until: result is succeeded 19 | retries: 20 20 | delay: 5 21 | -------------------------------------------------------------------------------- /playbooks/roles/setup_playbooks/vars/main.yml: -------------------------------------------------------------------------------- 1 | inventories: 2 | - name: Default Inventory 3 | 4 | projects: 5 | - name: Ascender Playbooks 6 | description: Administrative Playbooks for Rocky Linux 7 | scm_type: git 8 | scm_url: https://github.com/ctrliq/ascender-playbooks.git 9 | scm_update_on_launch: false 10 | 11 | - name: Ansible Lockdown - CIS - EL9 12 | description: CIS Benchmark Hardening for Enterprise Linux 9 13 | scm_type: git 14 | scm_url: https://github.com/ansible-lockdown/RHEL9-CIS.git 15 | scm_update_on_launch: false 16 | 17 | - name: Ansible Lockdown - CIS - EL8 18 | description: CIS Benchmark Hardening for Enterprise Linux 8 19 | scm_type: git 20 | scm_url: https://github.com/ansible-lockdown/RHEL8-CIS.git 21 | scm_update_on_launch: false 22 | 23 | - name: Ansible Lockdown - STIG - EL8 24 | description: STIG Hardening for Enterprise Linux 8 25 | scm_type: git 26 | scm_url: https://github.com/ansible-lockdown/RHEL8-STIG.git 27 | scm_update_on_launch: false 28 | 29 | - name: Fuzzball Orchestrate 30 | description: Fuzzball Orchestrate 31 | scm_type: git 32 | scm_url: https://github.com/ctrliq/fuzzball-ansible.git 33 | scm_update_on_launch: false 34 | 35 | templates: 36 | - name: Configure SELinux 37 | description: "" 38 | job_type: run 39 | inventory: Default Inventory 40 | project: Ascender Playbooks 41 | playbook: selinux.yml 42 | survey_enabled: true 43 | survey_file: surveys/selinux.json 44 | ask_credential_on_launch: true 45 | diff_mode: true 46 | 47 | - name: Gather System Facts 48 | description: "" 49 | job_type: run 50 | inventory: Default Inventory 51 | project: Ascender Playbooks 52 | playbook: gather_facts.yml 53 | ask_credential_on_launch: true 54 | 55 | - name: Patch Enterprise Linux 56 | description: "" 57 | job_type: run 58 | inventory: Default Inventory 59 | project: Ascender Playbooks 60 | playbook: patching.yml 61 | survey_enabled: true 62 | survey_file: surveys/patching.json 63 | ask_credential_on_launch: true 64 | 65 | - name: CIS Benchmark Hardening for Enterprise Linux 9 66 | description: CIS Benchmark Hardening for Enterprise Linux 9 67 | job_type: run 68 | inventory: Default Inventory 69 | project: Ansible Lockdown - CIS - EL9 70 | playbook: site.yml 71 | ask_credential_on_launch: true 72 | diff_mode: true 73 | extra_vars: 74 | rhel9cis_set_boot_pass: false 75 | 76 | - name: CIS Benchmark Hardening for Enterprise Linux 8 77 | description: CIS Benchmark Hardening for Enterprise Linux 8 78 | job_type: run 79 | inventory: Default Inventory 80 | project: Ansible Lockdown - CIS - EL8 81 | playbook: site.yml 82 | ask_credential_on_launch: true 83 | diff_mode: true 84 | extra_vars: 85 | rhel8cis_allow_authselect_updates: false 86 | 87 | - name: STIG Hardening for Enterprise Linux 8 88 | description: STIG Hardening for Enterprise Linux 8 89 | job_type: run 90 | inventory: Default Inventory 91 | project: Ansible Lockdown - STIG - EL8 92 | playbook: site.yml 93 | ask_credential_on_launch: true 94 | diff_mode: true 95 | 96 | - name: Deploy RKE2 for Fuzzball 97 | description: Deploy RKE2 for Fuzzball 98 | job_type: run 99 | project: Fuzzball Orchestrate 100 | playbook: playbooks/deploy_rke2.yaml 101 | ask_inventory_on_launch: true 102 | limit: controller 103 | diff_mode: true 104 | 105 | - name: Deploy Fuzzball CLI 106 | description: Deploy Fuzzball CLI 107 | job_type: run 108 | project: Fuzzball Orchestrate 109 | playbook: playbooks/deploy_fuzzball_cli.yaml 110 | ask_inventory_on_launch: true 111 | limit: controller 112 | diff_mode: true 113 | 114 | - name: Deploy Fuzzball NFS Server 115 | description: Deploy Fuzzball NFS Server 116 | job_type: run 117 | project: Fuzzball Orchestrate 118 | playbook: playbooks/deploy_nfs_server.yaml 119 | ask_inventory_on_launch: true 120 | limit: admin 121 | diff_mode: true 122 | 123 | - name: Deploy Fuzzball Orchestrate 124 | description: Deploy Fuzzball Orchestrate 125 | job_type: run 126 | project: Fuzzball Orchestrate 127 | playbook: playbooks/deploy_fuzzball_orchestrate.yaml 128 | ask_inventory_on_launch: true 129 | limit: controller 130 | diff_mode: true 131 | 132 | - name: Deploy Fuzzball Substrate 133 | description: Deploy Fuzzball Substrate 134 | job_type: run 135 | project: Fuzzball Orchestrate 136 | playbook: playbooks/deploy_fuzzball_substrate.yaml 137 | ask_inventory_on_launch: true 138 | limit: compute 139 | diff_mode: true 140 | 141 | workflow_templates: 142 | 143 | - name: Deploy Fuzzball Orchestrate On-Premise 144 | description: Deploy Fuzzball Orchestrate On-Premise 145 | ask_inventory_on_launch: true 146 | ask_variables_on_launch: true 147 | workflow_nodes: 148 | - identifier: Deploy RKE2 for Fuzzball 149 | unified_job_template: 150 | name: Deploy RKE2 for Fuzzball 151 | type: job_template 152 | related: 153 | success_nodes: 154 | - identifier: Deploy Fuzzball Orchestrate 155 | - identifier: Deploy Fuzzball CLI 156 | unified_job_template: 157 | name: Deploy Fuzzball CLI 158 | type: job_template 159 | - identifier: Deploy Fuzzball NFS Server 160 | unified_job_template: 161 | name: Deploy Fuzzball NFS Server 162 | type: job_template 163 | related: 164 | success_nodes: 165 | - identifier: Deploy Fuzzball Orchestrate 166 | - identifier: Deploy Fuzzball Substrate 167 | - identifier: Deploy Fuzzball Orchestrate 168 | all_parents_must_converge: true 169 | unified_job_template: 170 | name: Deploy Fuzzball Orchestrate 171 | type: job_template 172 | - identifier: Deploy Fuzzball Substrate 173 | unified_job_template: 174 | name: Deploy Fuzzball Substrate 175 | type: job_template 176 | -------------------------------------------------------------------------------- /playbooks/setup.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024, Ctrl IQ, Inc. All rights reserved. 2 | 3 | - name: Ensure Correct architecture for K3s Target 4 | ansible.builtin.import_playbook: assertions.yml 5 | 6 | - name: Ensure Kubernetes Cluster Access 7 | ansible.builtin.import_playbook: kubernetes_setup.yml 8 | 9 | - name: Install Ascender 10 | ansible.builtin.import_playbook: install_ascender.yml 11 | 12 | - name: Install Ascender React 13 | ansible.builtin.import_playbook: install_react.yml 14 | when: REACT_INSTALL 15 | 16 | - name: Install Ledger 17 | ansible.builtin.import_playbook: install_ledger.yml 18 | when: LEDGER_INSTALL 19 | 20 | - name: Configure Initial Playbooks 21 | ansible.builtin.import_playbook: setup_playbooks.yml 22 | when: 23 | - ascender_setup_playbooks | bool 24 | - not k8s_offline | default(false) 25 | -------------------------------------------------------------------------------- /playbooks/setup_playbooks.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Ctrl IQ, Inc. All rights reserved. 2 | 3 | - hosts: localhost 4 | gather_facts: no 5 | connection: local 6 | 7 | vars_files: 8 | - ["../custom.config.yml", "../default.config.yml"] 9 | 10 | environment: 11 | K8S_AUTH_KUBECONFIG: "{{ lookup('env', 'HOME') }}/.kube/config" 12 | PATH: "/usr/local/bin:{{ lookup('env', 'PATH') }}" #required as the aws cli lives at /usr/local/bin/aws 13 | 14 | tasks: 15 | 16 | - name: "Run ascender_credentials_{{ k8s_platform }}.yml" 17 | ansible.builtin.include_role: 18 | name: setup_playbooks 19 | tasks_from: "ascender_credentials_{{ k8s_platform }}" 20 | 21 | - name: "Configure Ascender with CIQ resources" 22 | ansible.builtin.include_role: 23 | name: setup_playbooks -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright (c) 2023, Ctrl IQ, Inc. All rights reserved. 4 | 5 | # Determine the configuration file to use 6 | config_file="" 7 | 8 | if [ -f "custom.config.yml" ]; then 9 | config_file="custom.config.yml" 10 | elif [ -f "default.config.yml" ]; then 11 | config_file="default.config.yml" 12 | else 13 | echo "Error: Neither custom.config.yml nor default.config.yml found." 14 | exit 1 15 | fi 16 | 17 | # Read the k8s_platform value from the configuration file 18 | k8s_platform=$(grep '^k8s_platform:' "$config_file" | awk '{print $2}') 19 | 20 | # Check if the k8s_platform is either "eks", "gke" or "aks" 21 | if [[ "$k8s_platform" == "eks" || "$k8s_platform" == "gke" || "$k8s_platform" == "aks" ]]; then 22 | # Check if the script is run as root or with sudo 23 | if [ "$(id -u)" -eq 0 ]; then 24 | echo "Error: This script must not be run as root or with sudo when k8s_platform is $k8s_platform." 25 | exit 1 26 | fi 27 | 28 | # Check if the system is RHEL or Rocky Linux version 9 or higher 29 | os_family=$(grep -oP '(?<=^ID_LIKE=).+' /etc/os-release | tr -d '"') 30 | os_version=$(grep '^VERSION_ID=' /etc/os-release | cut -d'=' -f2 | tr -d '"' | cut -d. -f1) 31 | 32 | if [[ "$os_family" == *"rhel"* || "$os_family" == *"fedora"* || "$os_family" == *"centos"* ]]; then 33 | if [ "$os_version" -lt 9 ]; then 34 | echo "Error: This script must be run on RHEL or Rocky Linux version 9 or higher when k8s_platform is $k8s_platform." 35 | exit 1 36 | fi 37 | else 38 | echo "Error: Unsupported OS family $os_family. This script must be run on RHEL or Rocky Linux when k8s_platform is $k8s_platform." 39 | exit 1 40 | fi 41 | fi 42 | 43 | # Verify that the CPU architecture of the local machine is x86_64 44 | LINUX_ARCH=$(arch) 45 | if [[ $LINUX_ARCH != "x86_64" ]]; then 46 | echo "CPU architecture must be x86_64."; exit $ERRCODE; 47 | fi 48 | 49 | # Verify that the Operating system of the local machine is in the centos/rocky family 50 | OS_FAMILY=$(grep -oP '(?<=^ID_LIKE=).+' /etc/os-release) 51 | if !([[ "$OS_FAMILY" =~ "rhel" || "$OS_FAMILY" =~ "fedora" || "$OS_FAMILY" =~ "centos" ]]); then 52 | echo "The OS family must be rocky, rhel, fedora or centos"; exit $ERRCODE; 53 | fi 54 | 55 | # Verify that the Operating System major version of the local machine is either 8 or 9 56 | LINUX_VERSION=$(grep -oP '(?<=^VERSION_ID=).+' /etc/os-release | tr -d '"' | cut -d. -f1) 57 | if [[ $LINUX_VERSION != "9" && $LINUX_VERSION != "8" ]]; then 58 | echo "Linux major version must be 8 or 9."; exit $ERRCODE; 59 | fi 60 | 61 | # COLORIZE THE ANSIBLE SHELL 62 | if [ -t "0" ]; then 63 | ANSIBLE_FORCE_COLORS=True 64 | fi 65 | 66 | if [ -f "$(dirname $0)/inventory.yml" ]; then 67 | INVENTORY_FILE="$(dirname $0)/inventory.yml" 68 | else 69 | INVENTORY_FILE="$(dirname $0)/inventory" 70 | fi 71 | 72 | echo "Using Inventory File: ${INVENTORY_FILE}" 73 | 74 | check_ansible() { 75 | type -p ansible-playbook > /dev/null 76 | } 77 | 78 | check_collections() { 79 | ansible-doc -t module -l | grep ansible.posix.selinux > /dev/null 80 | if [ $? -ne 0 ]; then 81 | return 0 82 | fi 83 | ansible-doc -t module -l | grep awx.awx.settings > /dev/null 84 | if [ $? -ne 0 ]; then 85 | return 0 86 | fi 87 | ansible-doc -t module -l | grep kubernetes.core.k8s > /dev/null 88 | if [ $? -ne 0 ]; then 89 | return 0 90 | fi 91 | ansible-doc -t module -l | grep amazon.aws.ec2_instance > /dev/null 92 | if [ $? -ne 0 ]; then 93 | return 0 94 | fi 95 | 96 | return 1 97 | } 98 | 99 | # ------------------------- # 100 | 101 | check_ansible 102 | if [ $? -ne 0 ]; then 103 | echo "#### INSTALLING ANSIBLE ####" 104 | sudo dnf install -y ansible-core 105 | fi 106 | 107 | check_collections 108 | if [ $? -ne 1 ]; then 109 | echo "#### INSTALLING COLLECTIONS ####" 110 | if [ -f "$(dirname $0)/offline/collections/ansible-posix-1.5.4.tar.gz" ]; then 111 | ansible-galaxy collection install $(dirname $0)/offline/collections/ansible-posix-1.5.4.tar.gz 112 | ansible-galaxy collection install $(dirname $0)/offline/collections/awx-awx-22.3.0.tar.gz 113 | ansible-galaxy collection install $(dirname $0)/offline/collections/kubernetes-core-2.4.0.tar.gz 114 | ansible-galaxy collection install $(dirname $0)/offline/collections/amazon-aws-6.5.0.tar.gz 115 | else 116 | ansible-galaxy install -r collections/requirements.yml 117 | fi 118 | fi 119 | 120 | PASSED_ARG=$@ 121 | if [[ ${#PASSED_ARG} -ne 0 ]] 122 | then 123 | while getopts "pbr" ARG; do 124 | 125 | case $ARG in 126 | 127 | p) 128 | 129 | printf "\nCREATE CLOUD PERMISSIONS ARTIFACTS\n" 130 | 131 | ansible-playbook -i "${INVENTORY_FILE}" playbooks/apply_cloud_permissions.yml 132 | 133 | printf "\n\nNOTE: Check the ./ascender_install_artifacts directory for cloud permissions files.\n\n" 134 | ;; 135 | b) 136 | 137 | printf "\nBACKUP\n" 138 | 139 | ansible-playbook -i "${INVENTORY_FILE}" playbooks/backup.yml 140 | ;; 141 | r) 142 | 143 | echo "RESTORE" 144 | 145 | ansible-playbook -i "${INVENTORY_FILE}" playbooks/restore.yml 146 | ;; 147 | \?) 148 | 149 | exit 150 | ;; 151 | esac 152 | done 153 | else 154 | ansible-playbook -i "${INVENTORY_FILE}" playbooks/setup.yml 155 | 156 | RC=$? 157 | if [ ${RC} -ne 0 ]; then 158 | echo "ERROR OCCURRED DURING SETUP" 159 | else 160 | echo "ASCENDER SUCCESSFULLY SETUP" 161 | fi 162 | fi --------------------------------------------------------------------------------