├── private └── .gitignore ├── state.tfvars.example ├── .gitignore ├── resources ├── replaybot-legacy │ ├── 00-namespace.yml │ ├── readme.md │ ├── parser.yml │ └── discord.yml ├── cordbots │ ├── readme.md │ ├── bogs.yml │ └── terry.yml └── replaybot │ └── replaybot-preprod.yml ├── bin ├── flux-identity ├── _helpers ├── readme.md ├── add-client ├── setup-vpn ├── set-secrets └── ipupdate.py ├── ansible ├── roles │ ├── platform │ │ ├── tasks │ │ │ ├── example.yml │ │ │ ├── flux.yml │ │ │ ├── newrelic.yml │ │ │ ├── externaldns.yml │ │ │ ├── cert-manager.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── example.yml │ │ │ ├── cert-manager.yml │ │ │ ├── externaldns.yml │ │ │ ├── flux.yml │ │ │ └── newrelic.yml │ ├── openvpn │ │ └── tasks │ │ │ └── main.yml │ ├── microk8s │ │ ├── tasks │ │ │ ├── main.yml │ │ │ ├── modifications.yml │ │ │ └── remote-hacks.yml │ │ └── templates │ │ │ └── ingress.yml │ └── k3s │ │ ├── tasks │ │ └── main.yml │ │ ├── templates │ │ └── ingress.yml │ │ └── files │ │ └── install.sh └── main.yml ├── variables.tf ├── .env.example ├── readme.md └── main.tf /private/.gitignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /state.tfvars.example: -------------------------------------------------------------------------------- 1 | region = "" 2 | bucket = "" 3 | key = "" 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .terraform* 2 | .env 3 | state.tfvars 4 | private/* 5 | !private/.gitignore 6 | -------------------------------------------------------------------------------- /resources/replaybot-legacy/00-namespace.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: replaybot-legacy 5 | -------------------------------------------------------------------------------- /bin/flux-identity: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | source bin/_helpers 4 | ensure-environment 5 | KUBECONFIG=${PRIVATE_DIR}/kubeconfig fluxctl identity --k8s-fwd-ns flux 6 | -------------------------------------------------------------------------------- /ansible/roles/platform/tasks/example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: write example manifest 3 | template: 4 | src: ../templates/example.yml 5 | dest: $PWD/example.yml 6 | 7 | - name: apply example manifest 8 | shell: "{{ engine }} kubectl apply -f $PWD/example.yml" 9 | when: deploy_example 10 | -------------------------------------------------------------------------------- /ansible/roles/platform/tasks/flux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: write flux manifest 3 | template: 4 | src: ../templates/flux.yml 5 | dest: $PWD/flux.yml 6 | 7 | - name: apply flux manifest 8 | shell: "{{ engine }} kubectl apply -f $PWD/flux.yml" 9 | 10 | - name: remove tmp flux manifest 11 | file: 12 | path: $PWD/flux.yml 13 | state: absent 14 | -------------------------------------------------------------------------------- /ansible/roles/platform/tasks/newrelic.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: write new-relic agent manifest 3 | template: 4 | src: ../templates/newrelic.yml 5 | dest: $PWD/newrelic.yml 6 | 7 | - name: apply newrelic manifest 8 | shell: "{{ engine }} kubectl apply -f $PWD/newrelic.yml" 9 | 10 | - name: remove newrelic manifest 11 | file: 12 | path: $PWD/newrelic.yml 13 | state: absent 14 | -------------------------------------------------------------------------------- /bin/_helpers: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ensure-environment() { 4 | APP_VERSION=2.3 5 | NAMESPACE=ovpn 6 | CLIENT_NAME=${CLIENT_NAME:=client} 7 | VPN_HOSTNAME=$TF_VAR_domain_name 8 | VPN_PORT=31304 9 | DNS_SERVER=1.1.1.1 10 | VPN_PROTOCOL=tcp 11 | VPN_URI=${VPN_PROTOCOL}://${VPN_HOSTNAME}:${VPN_PORT} 12 | PRIVATE_DIR=${PWD}/private/${VPN_HOSTNAME} 13 | } 14 | -------------------------------------------------------------------------------- /ansible/roles/platform/tasks/externaldns.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: write externaldns manifest 3 | template: 4 | src: ../templates/externaldns.yml 5 | dest: $PWD/externaldns.yml 6 | 7 | - name: apply externaldns manifest 8 | shell: "{{ engine }} kubectl apply -f $PWD/externaldns.yml" 9 | 10 | - name: remove tmp externaldns manifest 11 | file: 12 | path: $PWD/externaldns.yml 13 | state: absent 14 | -------------------------------------------------------------------------------- /bin/readme.md: -------------------------------------------------------------------------------- 1 | # vpn setup scripts 2 | taken from https://github.com/suda/k8s-ovpn-chart 3 | 4 | * run `bin/setup-vpn` to setup the vpn for the first time. your client config will be placed in `private/client.ovpn`. set the `CLIENT_NAME` environment variable to customize the name used 5 | * to add a new client, run `CLIENT_NAME=some_new_client bin/add-client`, and the new client configuration will be placed in `private/some_new_client.ovpn` 6 | -------------------------------------------------------------------------------- /bin/add-client: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | source bin/_helpers 4 | ensure-environment 5 | 6 | echo "🔢 Generating client certificate and config..." 7 | docker run --net=none --rm -it -v ${PRIVATE_DIR}/ovpn0:/etc/openvpn kylemanna/openvpn:${APP_VERSION} easyrsa build-client-full ${CLIENT_NAME} 8 | docker run --net=none --rm -v ${PRIVATE_DIR}/ovpn0:/etc/openvpn kylemanna/openvpn:${APP_VERSION} ovpn_getclient ${CLIENT_NAME} > private/${VPN_HOSTNAME}/${CLIENT_NAME}.ovpn 9 | -------------------------------------------------------------------------------- /resources/replaybot-legacy/readme.md: -------------------------------------------------------------------------------- 1 | # replaybot-legacy 2 | deployment files for the current version of replaybot 3 | 4 | provide a secret in the cluster as follows: 5 | 6 | `echo -n 'secret-value' | base64` 7 | 8 | ``` 9 | apiVersion: v1 10 | kind: Secret 11 | metadata: 12 | name: replaybot-secrets 13 | namespace: replaybot-legacy 14 | type: Opaque 15 | data: 16 | NEW_RELIC_LICENSE_KEY: [encoded key] 17 | BOT_TOKEN: [encoded key] 18 | BOT_SHARED_KEY: [encoded key] 19 | ``` 20 | -------------------------------------------------------------------------------- /resources/cordbots/readme.md: -------------------------------------------------------------------------------- 1 | # cordbots 2 | provide a couple of secrets with the api keys for the bots 3 | 4 | `echo -n 'discord-api-key' | base64` 5 | 6 | ``` 7 | apiVersion: v1 8 | kind: Secret 9 | metadata: 10 | name: terry-secrets 11 | namespace: terry 12 | type: Opaque 13 | data: 14 | discord: [encoded key] 15 | --- 16 | apiVersion: v1 17 | kind: Secret 18 | metadata: 19 | name: bogs-secrets 20 | namespace: bogs 21 | type: Opaque 22 | data: 23 | discord: [encoded key] 24 | ``` 25 | -------------------------------------------------------------------------------- /bin/setup-vpn: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | source bin/_helpers 4 | ensure-environment 5 | 6 | echo "🔢 Generating OpenVPN config..." 7 | docker run --net=none --rm -it -v ${PRIVATE_DIR}/ovpn0:/etc/openvpn kylemanna/openvpn:${APP_VERSION} ovpn_genconfig \ 8 | -u ${VPN_URI} \ 9 | -C 'AES-256-CBC' -a 'SHA384' \ 10 | -b -n ${DNS_SERVER} 11 | 12 | echo "🔑 Initialising keys..." 13 | docker run --net=none --rm -it -v ${PRIVATE_DIR}/ovpn0:/etc/openvpn kylemanna/openvpn:${APP_VERSION} ovpn_initpki 14 | docker run --net=none --rm -it -v ${PRIVATE_DIR}/ovpn0:/etc/openvpn kylemanna/openvpn:${APP_VERSION} ovpn_copy_server_files 15 | 16 | bin/set-secrets 17 | bin/add-client 18 | -------------------------------------------------------------------------------- /resources/cordbots/bogs.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: bogs 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | name: bogs 10 | namespace: bogs 11 | labels: 12 | app: bogs 13 | spec: 14 | selector: 15 | matchLabels: 16 | app: bogs 17 | template: 18 | metadata: 19 | labels: 20 | app: bogs 21 | spec: 22 | containers: 23 | - name: bogs 24 | image: leigholiver/cryptoapi 25 | env: 26 | - name: DISCORD_TOKEN 27 | valueFrom: 28 | secretKeyRef: 29 | name: bogs-secrets 30 | key: discord 31 | -------------------------------------------------------------------------------- /resources/cordbots/terry.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: terry 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | name: terry 10 | namespace: terry 11 | labels: 12 | app: terry 13 | spec: 14 | selector: 15 | matchLabels: 16 | app: terry 17 | template: 18 | metadata: 19 | labels: 20 | app: terry 21 | spec: 22 | containers: 23 | - name: terry 24 | image: leigholiver/terry 25 | env: 26 | - name: DISCORD_TOKEN 27 | valueFrom: 28 | secretKeyRef: 29 | name: terry-secrets 30 | key: discord 31 | -------------------------------------------------------------------------------- /ansible/roles/platform/tasks/cert-manager.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install cert-manager 3 | shell: "{{ engine }} kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v0.16.1/cert-manager.yaml" 4 | 5 | - name: write cert-manager manifest 6 | template: 7 | src: ../templates/cert-manager.yml 8 | dest: $PWD/cert-manager.yml 9 | 10 | - name: apply cert-manager manifest 11 | shell: "{{ engine }} kubectl apply -f $PWD/cert-manager.yml" 12 | # todo: failed calling webhook \"webhook.cert-manager.io\" connection refused? 13 | retries: 5 14 | delay: 10 15 | register: result 16 | until: result.rc == 0 17 | 18 | - name: remove tmp cert-manager manifest 19 | file: 20 | path: $PWD/cert-manager.yml 21 | state: absent 22 | -------------------------------------------------------------------------------- /ansible/roles/platform/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # readme or `./templates/example.yml` to see how to use these 3 | 4 | # externaldns, to automatically create dns records for ingresses 5 | - import_tasks: "{{ role_path }}/tasks/externaldns.yml" 6 | 7 | # cert-manager, to automatically create ssl certificates for ingresses 8 | - import_tasks: "{{ role_path }}/tasks/cert-manager.yml" 9 | 10 | # flux cd, to manage deployments from a git repo as source of truth 11 | - import_tasks: "{{ role_path }}/tasks/flux.yml" 12 | 13 | # example service to ensure everything is working correctly 14 | - import_tasks: "{{ role_path }}/tasks/example.yml" 15 | 16 | # newrelic agent for monitoring 17 | - import_tasks: "{{ role_path }}/tasks/newrelic.yml" 18 | when: newrelic_license_key != "" 19 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | # should be provided in a .env file, see .env.example 2 | variable "cloudflare_email" {} 3 | variable "cloudflare_apikey" {} 4 | variable "cloudflare_account_id" {} 5 | variable "cloudflare_zone" {} 6 | variable "public_key_path" {} 7 | variable "private_key_path" {} 8 | variable "ssh_allowed_CIDR" {} 9 | variable "domain_name" {} 10 | 11 | variable "instance_type" { 12 | description = "Instance size to provision (minimum t3a.small)" 13 | default = "t3a.small" 14 | } 15 | 16 | variable "root_vol_size" { 17 | description = "Size of the root volume" 18 | default = 25 19 | } 20 | 21 | variable "instance_name" { 22 | description = "Name of the instance" 23 | default = "platform" 24 | } 25 | 26 | variable "engine" { 27 | description = "Kubernetes engine to use, 'microk8s' or 'k3s'" 28 | default = "k3s" 29 | } 30 | -------------------------------------------------------------------------------- /ansible/roles/openvpn/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensure snapd is installed 3 | apt: 4 | name: snapd 5 | state: present 6 | 7 | - name: wait for snap to be loaded... 8 | shell: snap wait system seed.loaded 9 | 10 | - name: install helm --classic 11 | snap: 12 | name: helm 13 | classic: yes 14 | 15 | - name: add openvpn helm repo 16 | shell: helm repo add k8s-ovpn https://raw.githubusercontent.com/suda/k8s-ovpn-chart/master && helm repo update 17 | environment: 18 | KUBECONFIG: "{{ '/etc/rancher/k3s/k3s.yaml' if engine == 'k3s' else '' }}" 19 | 20 | - name: install openvpn helm chart 21 | shell: helm install openvpn k8s-ovpn/k8s-ovpn-chart 22 | environment: 23 | KUBECONFIG: "{{ '/etc/rancher/k3s/k3s.yaml' if engine == 'k3s' else '' }}" 24 | register: command_result 25 | ignore_errors: True 26 | 27 | - fail: 28 | msg: command_result.stderr 29 | when: "command_result.stderr != '' and command_result.stderr is not match('Error: cannot re-use a name that is still in use')" 30 | -------------------------------------------------------------------------------- /resources/replaybot-legacy/parser.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: parser 5 | namespace: replaybot-legacy 6 | labels: 7 | app: parser 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: parser 12 | template: 13 | metadata: 14 | labels: 15 | app: parser 16 | spec: 17 | containers: 18 | - name: parser 19 | image: leigholiver/replaybot_parser 20 | env: 21 | - name: NEW_RELIC_APP_NAME 22 | value: replaybot_parser 23 | - name: NEW_RELIC_LICENSE_KEY 24 | valueFrom: 25 | secretKeyRef: 26 | name: replaybot-secrets 27 | key: NEW_RELIC_LICENSE_KEY 28 | ports: 29 | - containerPort: 5001 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: parser 35 | namespace: replaybot-legacy 36 | spec: 37 | selector: 38 | app: parser 39 | ports: 40 | - protocol: TCP 41 | port: 5001 42 | targetPort: 5001 43 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # path to the public SSH key to use to access instance 2 | export TF_VAR_public_key_path= 3 | 4 | # path to the private key, only used by ansible to provision the instance 5 | export TF_VAR_private_key_path= 6 | 7 | # Cloudflare credentials for DNS/certs 8 | export TF_VAR_cloudflare_email= 9 | export TF_VAR_cloudflare_account_id= 10 | export TF_VAR_cloudflare_apikey= 11 | export TF_VAR_cloudflare_zone= 12 | 13 | # domain name to deploy to 14 | export TF_VAR_domain_name= 15 | 16 | # CIDR range to allow ssh/kubectl from 17 | # eg 1.2.3.4/32 18 | export TF_VAR_ssh_allowed_CIDR= 19 | 20 | # kubernetes engine to use, microk8s or k3s 21 | export TF_VAR_engine= 22 | 23 | # flux cd configuration 24 | # github username of the repo owner 25 | export GITHUB_USERNAME= 26 | 27 | # repository name to watch for manifests 28 | export GITHUB_REPO= 29 | 30 | # branch to watch 31 | # if ommitted, will watch `master` 32 | export GITHUB_BRANCH= 33 | 34 | # comma seperated list of directories to watch 35 | # if ommitted, will watch the whole repo 36 | export GITHUB_NAMESPACES= 37 | 38 | # optional newrelic license key for monitoring 39 | export NEWRELIC_LICENSE_KEY= 40 | -------------------------------------------------------------------------------- /ansible/roles/platform/templates/example.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx 9 | template: 10 | metadata: 11 | labels: 12 | app: nginx 13 | spec: 14 | containers: 15 | - name: nginx 16 | image: nginx:1.14.2 17 | ports: 18 | - containerPort: 80 19 | --- 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | name: nginx-service 24 | spec: 25 | selector: 26 | app: nginx 27 | ports: 28 | - protocol: TCP 29 | port: 80 30 | targetPort: 80 31 | --- 32 | apiVersion: extensions/v1beta1 33 | kind: Ingress 34 | metadata: 35 | name: nginx-service-ingress 36 | annotations: 37 | cert-manager.io/cluster-issuer: "letsencrypt-prod" 38 | external-dns.alpha.kubernetes.io/hostname: nginx.{{ domain_name }} 39 | spec: 40 | tls: 41 | - hosts: 42 | - nginx.{{ domain_name }} 43 | secretName: nginx-service-ingress-tls 44 | rules: 45 | - host: nginx.{{ domain_name }} 46 | http: 47 | paths: 48 | - path: / 49 | backend: 50 | serviceName: nginx-service 51 | servicePort: 80 52 | -------------------------------------------------------------------------------- /resources/replaybot-legacy/discord.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: discord 5 | namespace: replaybot-legacy 6 | labels: 7 | app: discord 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: discord 12 | template: 13 | metadata: 14 | labels: 15 | app: discord 16 | spec: 17 | containers: 18 | - name: discord 19 | image: leigholiver/replaybot_discord 20 | env: 21 | - name: NEW_RELIC_APP_NAME 22 | value: replaybot_discord 23 | - name: NEW_RELIC_LICENSE_KEY 24 | valueFrom: 25 | secretKeyRef: 26 | name: replaybot-secrets 27 | key: NEW_RELIC_LICENSE_KEY 28 | - name: BOT_TOKEN 29 | valueFrom: 30 | secretKeyRef: 31 | name: replaybot-secrets 32 | key: BOT_TOKEN 33 | - name: BOT_SHARED_KEY 34 | valueFrom: 35 | secretKeyRef: 36 | name: replaybot-secrets 37 | key: BOT_SHARED_KEY 38 | - name: PARSER_ENDPOINT 39 | value: http://parser:5001 40 | - name: API_ENDPOINT 41 | value: https://replaybot.com/api 42 | -------------------------------------------------------------------------------- /ansible/roles/microk8s/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensure snapd is installed 3 | apt: 4 | name: snapd 5 | state: present 6 | 7 | - name: wait for snap to be loaded... 8 | shell: snap wait system seed.loaded 9 | 10 | - name: install microk8s --classic --channel={{ microk8s_channel | default("1.18/stable") }} 11 | snap: 12 | name: microk8s 13 | classic: yes 14 | channel: "{{ microk8s_channel | default('1.18/stable') }}" 15 | 16 | - name: wait for microk8s to become ready... 17 | shell: microk8s status --wait-ready 18 | 19 | - name: enable microk8s services 20 | shell: microk8s enable dns helm 21 | 22 | - name: microk8s tweaks 23 | import_tasks: "{{ role_path }}/tasks/modifications.yml" 24 | 25 | - name: generate kubeconfig 26 | shell: microk8s config > $PWD/kubeconfig 27 | 28 | - name: ensure external ip address is in kubeconfig 29 | replace: 30 | path: $PWD/kubeconfig 31 | regexp: "server: https://[0-9\\.]+:16443" 32 | replace: "server: https://{{ external_ip }}:16443" 33 | 34 | - fetch: 35 | src: $PWD/kubeconfig 36 | dest: "{{ private_dir }}/kubeconfig" 37 | flat: yes 38 | - file: 39 | path: "$PWD/kubeconfig" 40 | state: absent 41 | 42 | 43 | - name: helm init 44 | shell: microk8s helm init 45 | 46 | - name: ensure coredns has started... 47 | shell: microk8s kubectl rollout status deployment/coredns -n kube-system 48 | -------------------------------------------------------------------------------- /ansible/roles/platform/templates/cert-manager.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: cloudflare-api-key-secret 5 | namespace: cert-manager 6 | type: Opaque 7 | stringData: 8 | api-key: "{{ cloudflare_apikey }}" 9 | --- 10 | apiVersion: cert-manager.io/v1alpha2 11 | kind: ClusterIssuer 12 | metadata: 13 | name: letsencrypt-staging 14 | namespace: kube-system 15 | spec: 16 | acme: 17 | server: https://acme-staging-v02.api.letsencrypt.org/directory 18 | email: "{{ cloudflare_email }}" 19 | privateKeySecretRef: 20 | name: letsencrypt-staging 21 | solvers: 22 | - selector: {} 23 | dns01: 24 | cloudflare: 25 | email: "{{ cloudflare_email }}" 26 | apiKeySecretRef: 27 | name: cloudflare-api-key-secret 28 | key: api-key 29 | --- 30 | apiVersion: cert-manager.io/v1alpha2 31 | kind: ClusterIssuer 32 | metadata: 33 | name: letsencrypt-prod 34 | namespace: kube-system 35 | spec: 36 | acme: 37 | server: https://acme-v02.api.letsencrypt.org/directory 38 | email: "{{ cloudflare_email }}" 39 | privateKeySecretRef: 40 | name: letsencrypt-prod 41 | solvers: 42 | - selector: {} 43 | dns01: 44 | cloudflare: 45 | email: "{{ cloudflare_email }}" 46 | apiKeySecretRef: 47 | name: cloudflare-api-key-secret 48 | key: api-key 49 | -------------------------------------------------------------------------------- /ansible/roles/microk8s/tasks/modifications.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://github.com/ubuntu/microk8s/blob/0b22fbb8fb44fdf673698c4703496a5235fc9d3f/microk8s-resources/actions/ingress.yaml 3 | # with `--publish-status-address={{ inventory_hostname }}` arg added 4 | - name: enable munged microk8s ingress, allowing external access 5 | template: 6 | src: ../templates/ingress.yml 7 | dest: $PWD/ingress.yml 8 | 9 | - name: apply microk8s ingress manifest 10 | shell: microk8s kubectl apply -f $PWD/ingress.yml 11 | 12 | - name: remove tmp microk8s ingress manifest 13 | file: 14 | path: $PWD/ingress.yml 15 | state: absent 16 | 17 | - name: workaround to be able to connect to microk8s remotely 18 | import_tasks: "{{ role_path }}/tasks/remote-hacks.yml" 19 | 20 | - name: check if allow-privileged is already set 21 | shell: "cat /var/snap/microk8s/current/args/kube-apiserver | grep 'allow-privileged' | wc -l" 22 | register: ip_included 23 | changed_when: False 24 | 25 | - name: allow privileged (for newrelic agent) 26 | shell: echo -n "--allow-privileged" >> /var/snap/microk8s/current/args/kube-apiserver 27 | when: ip_included.stdout_lines[0]|int == 0 28 | 29 | - name: stop microk8s 30 | shell: microk8s stop 31 | when: ip_included.stdout_lines[0]|int == 0 32 | 33 | - name: restart microk8s 34 | shell: microk8s start 35 | when: ip_included.stdout_lines[0]|int == 0 36 | 37 | - name: wait for microk8s to become ready... 38 | shell: microk8s status --wait-ready 39 | -------------------------------------------------------------------------------- /bin/set-secrets: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | source bin/_helpers 3 | ensure-environment 4 | 5 | echo "Removing any old secrets..." 6 | kubectl --kubeconfig ${PRIVATE_DIR}/kubeconfig delete -n ${NAMESPACE} secret ovpn0-key 7 | kubectl --kubeconfig ${PRIVATE_DIR}/kubeconfig delete -n ${NAMESPACE} secret ovpn0-cert 8 | kubectl --kubeconfig ${PRIVATE_DIR}/kubeconfig delete -n ${NAMESPACE} secret ovpn0-pki 9 | kubectl --kubeconfig ${PRIVATE_DIR}/kubeconfig delete -n ${NAMESPACE} configmap ovpn0-conf 10 | kubectl --kubeconfig ${PRIVATE_DIR}/kubeconfig delete -n ${NAMESPACE} configmap ccd0 11 | 12 | echo "🔑 Setting secrets..." 13 | set -e 14 | kubectl --kubeconfig ${PRIVATE_DIR}/kubeconfig create -n ${NAMESPACE} secret generic ovpn0-key --from-file=private/${VPN_HOSTNAME}/ovpn0/server/pki/private/${VPN_HOSTNAME}.key 15 | kubectl --kubeconfig ${PRIVATE_DIR}/kubeconfig create -n ${NAMESPACE} secret generic ovpn0-cert --from-file=private/${VPN_HOSTNAME}/ovpn0/server/pki/issued/${VPN_HOSTNAME}.crt 16 | kubectl --kubeconfig ${PRIVATE_DIR}/kubeconfig create -n ${NAMESPACE} secret generic ovpn0-pki \ 17 | --from-file=private/${VPN_HOSTNAME}/ovpn0/server/pki/ca.crt --from-file=private/${VPN_HOSTNAME}/ovpn0/server/pki/dh.pem --from-file=private/${VPN_HOSTNAME}/ovpn0/server/pki/ta.key 18 | kubectl --kubeconfig ${PRIVATE_DIR}/kubeconfig create -n ${NAMESPACE} configmap ovpn0-conf --from-file=private/${VPN_HOSTNAME}/ovpn0/server/ 19 | kubectl --kubeconfig ${PRIVATE_DIR}/kubeconfig create -n ${NAMESPACE} configmap ccd0 --from-file=private/${VPN_HOSTNAME}/ovpn0/server/ccd 20 | -------------------------------------------------------------------------------- /ansible/roles/k3s/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensure install script exists 3 | copy: 4 | src: ../files/install.sh 5 | dest: $PWD/k3s-install.sh 6 | mode: '0755' 7 | 8 | - name: run k3s install script, setting external ip 9 | shell: INSTALL_K3S_EXEC="--tls-san {{ external_ip }} --no-deploy traefik" sh $PWD/k3s-install.sh 10 | 11 | - name: wait for node to be ready... 12 | command: k3s kubectl get node 13 | 14 | # stealing the munged microk8s ingress as i cant get traefik to show the proper ip for some reason? 15 | # https://github.com/ubuntu/microk8s/blob/0b22fbb8fb44fdf673698c4703496a5235fc9d3f/microk8s-resources/actions/ingress.yaml 16 | # with `--publish-status-address={{ inventory_hostname }}` arg added 17 | - name: enable munged microk8s ingress, allowing external access 18 | template: 19 | src: ../templates/ingress.yml 20 | dest: $PWD/ingress.yml 21 | 22 | - name: apply microk8s ingress manifest 23 | shell: k3s kubectl apply -f $PWD/ingress.yml 24 | 25 | - name: remove tmp microk8s ingress manifest 26 | file: 27 | path: $PWD/ingress.yml 28 | state: absent 29 | 30 | - name: copy kubeconfig 31 | shell: cp /etc/rancher/k3s/k3s.yaml $PWD/kubeconfig 32 | 33 | - name: ensure external ip address is in kubeconfig 34 | replace: 35 | path: $PWD/kubeconfig 36 | regexp: "server: https://127.0.0.1:6443" 37 | replace: "server: https://{{ external_ip }}:6443" 38 | 39 | - fetch: 40 | src: $PWD/kubeconfig 41 | dest: "{{ private_dir }}/kubeconfig" 42 | flat: yes 43 | - file: 44 | path: "$PWD/kubeconfig" 45 | state: absent 46 | -------------------------------------------------------------------------------- /ansible/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: ubuntu 4 | become: yes 5 | become_method: sudo 6 | 7 | vars: 8 | - deploy_example: false 9 | - external_dns_debug: false 10 | 11 | # local directory to store kubeconfig/openvpn config 12 | - private_dir: "{{ inventory_dir }}/../../private/{{ domain_name }}" 13 | 14 | # kubernetes to use, "k3s" or "microk8s" 15 | - engine: "{{ lookup('env','TF_VAR_engine') }}" 16 | 17 | # domain name to deploy to 18 | - domain_name: "{{ lookup('env','TF_VAR_domain_name') }}" 19 | 20 | # newrelic license key for monitoring 21 | - newrelic_license_key: "{{ lookup('env','NEWRELIC_LICENSE_KEY') }}" 22 | 23 | # cloudflare auth for externaldns and cert-manager 24 | - cloudflare_email: "{{ lookup('env','TF_VAR_cloudflare_email') }}" 25 | - cloudflare_apikey: "{{ lookup('env','TF_VAR_cloudflare_apikey') }}" 26 | 27 | # github username and repo info for flux cd 28 | - github_username: "{{ lookup('env','GITHUB_USERNAME') }}" 29 | - github_repo: "{{ lookup('env','GITHUB_REPO') }}" 30 | - github_branch: "{{ lookup('env','GITHUB_BRANCH') }}" 31 | 32 | # comma seperated list of directories in the github repo to watch 33 | - github_namespaces: "{{ lookup('env','GITHUB_NAMESPACES') }}" 34 | 35 | roles: 36 | - role: k3s 37 | when: engine == 'k3s' 38 | - role: microk8s 39 | when: engine == 'microk8s' 40 | - platform 41 | - openvpn 42 | 43 | pre_tasks: 44 | - name: set external ip address host fact 45 | uri: 46 | url: http://icanhazip.com 47 | return_content: yes 48 | register: canhaz_output 49 | - set_fact: 50 | external_ip: "{{ canhaz_output.content | replace('\n', '') }}" 51 | -------------------------------------------------------------------------------- /ansible/roles/platform/templates/externaldns.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: external-dns 5 | --- 6 | apiVersion: rbac.authorization.k8s.io/v1beta1 7 | kind: ClusterRole 8 | metadata: 9 | name: external-dns 10 | namespace: production 11 | rules: 12 | - apiGroups: [""] 13 | resources: ["services"] 14 | verbs: ["get","watch","list"] 15 | - apiGroups: [""] 16 | resources: ["pods"] 17 | verbs: ["get","watch","list"] 18 | - apiGroups: ["extensions"] 19 | resources: ["ingresses"] 20 | verbs: ["get","watch","list"] 21 | - apiGroups: [""] 22 | resources: ["nodes"] 23 | verbs: ["list"] 24 | - apiGroups: [""] 25 | resources: ["endpoints"] 26 | verbs: ["get","watch","list"] 27 | --- 28 | apiVersion: rbac.authorization.k8s.io/v1beta1 29 | kind: ClusterRoleBinding 30 | metadata: 31 | name: external-dns-viewer 32 | roleRef: 33 | apiGroup: rbac.authorization.k8s.io 34 | kind: ClusterRole 35 | name: external-dns 36 | subjects: 37 | - kind: ServiceAccount 38 | name: external-dns 39 | namespace: default 40 | --- 41 | apiVersion: apps/v1 42 | kind: Deployment 43 | metadata: 44 | name: external-dns 45 | spec: 46 | strategy: 47 | type: Recreate 48 | selector: 49 | matchLabels: 50 | app: external-dns 51 | template: 52 | metadata: 53 | labels: 54 | app: external-dns 55 | spec: 56 | serviceAccountName: external-dns 57 | containers: 58 | - name: external-dns 59 | image: k8s.gcr.io/external-dns/external-dns:v0.7.3 60 | args: 61 | - --source=ingress 62 | - --provider=cloudflare 63 | - --txt-owner-id={{ domain_name }} 64 | {% if external_dns_debug is defined and external_dns_debug %}- --log-level=debug{% endif %} 65 | 66 | env: 67 | - name: CF_API_KEY 68 | value: "{{ cloudflare_apikey }}" 69 | - name: CF_API_EMAIL 70 | value: "{{ cloudflare_email }}" 71 | -------------------------------------------------------------------------------- /ansible/roles/microk8s/tasks/remote-hacks.yml: -------------------------------------------------------------------------------- 1 | # Borrowed from 2 | # https://github.com/pfisterer/edsc-microk8s-playbook/blob/master/roles/microk8s/tasks/main.yaml 3 | # ---------------------------------------------------------- 4 | # Workaround to be able to connect to microk8s remotely 5 | # https://github.com/ubuntu/microk8s/issues/421 6 | # ---------------------------------------------------------- 7 | --- 8 | - name: Check IP is already included in the template 9 | shell: "cat /var/snap/microk8s/current/certs/csr.conf.template | grep '= {{external_ip}}' | wc -l" 10 | register: ip_included 11 | changed_when: False 12 | 13 | - name: Get highest entry in the list of IPs (conf) 14 | shell: "cat /var/snap/microk8s/current/certs/csr.conf | sed -nr 's/IP\\.([0-9]+).*/\\1/p' | sort | tail -n 1" 15 | register: csr_output 16 | when: ip_included.stdout_lines[0]|int == 0 17 | 18 | - name: Get highest entry in the list of IPs (template) 19 | shell: "cat /var/snap/microk8s/current/certs/csr.conf.template | sed -nr 's/IP\\.([0-9]+).*/\\1/p' | sort | tail -n 1" 20 | register: csr_template_output 21 | when: ip_included.stdout_lines[0]|int == 0 22 | 23 | - name: Add IP entry 24 | lineinfile: 25 | path: /var/snap/microk8s/current/certs/csr.conf.template 26 | insertafter: "^IP.{{csr_template_output.stdout_lines[0]}} = .*" 27 | line: "IP.{{csr_output.stdout_lines[0]|int + 1}} = {{external_ip}}" 28 | register: csr_mod_result 29 | when: ip_included.stdout_lines[0]|int == 0 30 | 31 | - name: Restart microk8s (stop) 32 | shell: "microk8s.stop" 33 | when: ip_included.stdout_lines[0]|int == 0 34 | 35 | - name: Restart microk8s (start) 36 | shell: "microk8s.start" 37 | when: ip_included.stdout_lines[0]|int == 0 38 | ignore_errors: True 39 | 40 | - name: Wait for microk8s to be ready after updating the CSR 41 | shell: "microk8s status --wait-ready" 42 | changed_when: False 43 | when: ip_included.stdout_lines[0]|int == 0 44 | -------------------------------------------------------------------------------- /bin/ipupdate.py: -------------------------------------------------------------------------------- 1 | import requests, boto3 2 | 3 | # keep the security group updated with my current IP address 4 | SEC_GROUP_NAME = 'platform_sec' 5 | 6 | MY_IP = "%s/32" % requests.get("https://checkip.amazonaws.com").text.strip() 7 | PORTS = [22, 16443, 6443] # SSH, microk8s, k3s 8 | tasks = { 'missing': [], 'revoke': [] } 9 | 10 | ec2 = boto3.client('ec2') 11 | response = ec2.describe_security_groups(GroupNames=[SEC_GROUP_NAME]) 12 | sec_group = response['SecurityGroups'][0] 13 | 14 | # find any rules we need to add/revoke 15 | seen_ports = [] 16 | for rule in sec_group['IpPermissions']: 17 | if rule['FromPort'] in PORTS: 18 | seen_ports.append(rule['FromPort']) 19 | rule_ips = [rng['CidrIp'] for rng in rule['IpRanges']] 20 | if MY_IP not in rule_ips: 21 | tasks['missing'].append(rule['FromPort']) 22 | if len(rule_ips) > 0: 23 | tasks['revoke'].append({'port': rule['FromPort'], 'ips': rule_ips}) 24 | 25 | # add any ports we havent seen 26 | for port in PORTS: 27 | if port not in seen_ports: 28 | tasks['missing'].append(port) 29 | 30 | # perform the tasks 31 | if len(tasks['revoke']) > 0: 32 | print("Revoking %d rules" % len(tasks['revoke'])) 33 | print(tasks['revoke']) 34 | ec2.revoke_security_group_ingress( 35 | GroupId=sec_group['GroupId'], 36 | IpPermissions=[ 37 | { 38 | 'IpProtocol': 'tcp', 39 | 'FromPort': r['port'], 40 | 'ToPort': r['port'], 41 | 'IpRanges': [{'CidrIp': ip} for ip in r['ips']] 42 | } 43 | for r in tasks['revoke'] 44 | ] 45 | ) 46 | 47 | if len(tasks['missing']) > 0: 48 | print("Adding %d rules" % len(tasks['missing'])) 49 | print(tasks['missing']) 50 | ec2.authorize_security_group_ingress( 51 | GroupId=sec_group['GroupId'], 52 | IpPermissions=[ 53 | { 54 | 'IpProtocol': 'tcp', 55 | 'FromPort': port, 56 | 'ToPort': port, 57 | 'IpRanges': [ 58 | {'CidrIp': MY_IP} 59 | ] 60 | } 61 | for port in tasks['missing'] 62 | ] 63 | ) 64 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # platform 2 | my personal development platform 3 | * k3s or microk8s 4 | * external-dns to manage dns records 5 | * cert-manager to manage ssl certificates 6 | * flux cd to manage cluster state from the git repo 7 | * openvpn 8 | 9 | ### deployment 10 | copy `.env.example` to `.env` and fill in the required values, then `source .env`. copy `state.tfvars.example` to `state.tfvars` and fill in the state bucket info 11 | 12 | run `terraform init -backend-config=state.tfvars` and then `terraform apply` to deploy. the instance will be provisioned via ansible and your kubeconfig will be downloaded to `private/${hostname}/kubeconfig` 13 | 14 | ### flux cd setup 15 | you will need to add flux's ssh key to the github repo. get the public key using `bin/flux-identity` and add it to your repo at `https://github.com/${github_username}/${github_repo}/settings/keys` 16 | 17 | ### adding k8s services 18 | commit the manifests to the repo you specified for flux cd, and it should all get picked up automatically 19 | 20 | to take advantage of the automated dns name/cert creation you need to create an ingress and set the following annotations: 21 | - `cert-manager.io/cluster-issuer`: "letsencrypt-staging" or "letsencrypt-prod" 22 | - `external-dns.alpha.kubernetes.io/hostname`: your desired domain name 23 | 24 | set `spec.tls.hosts` and `spec.rules.host` to your desired domain name 25 | 26 | set `spec.tls.secretName` to a unique name for the secret to hold the tls cert in 27 | 28 | an example manifest can be seen at `ansible/roles/platform/templates/example.yml` 29 | 30 | ### openvpn setup 31 | you will need kubectl installed to set the necessary secrets for the vpn 32 | 33 | run `bin/setup-vpn` and follow the instructions. your vpn config will be placed in `private/${hostname}client.ovpn` 34 | 35 | if you need to use `sudo` for docker commands, you will need to use `sudo -E bin/setup-vpn` to pass through some variables from `.env` 36 | 37 | to add a new client, run `CLIENT_NAME=some_new_client bin/add-client`, and the new client configuration will be placed in `private/${hostname}/some_new_client.ovpn` 38 | 39 | in KDE for whatever reason the vpn connection doesnt work when imported directly into network-manager, so it must be started with `sudo openvpn --config private/${hostname}/client.ovpn`. 40 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" {} 3 | } 4 | 5 | provider "aws" { 6 | region = "eu-west-2" 7 | } 8 | 9 | provider "cloudflare" { 10 | email = var.cloudflare_email 11 | api_key = var.cloudflare_apikey 12 | account_id = var.cloudflare_account_id 13 | } 14 | 15 | resource "aws_key_pair" "keys" { 16 | key_name = "${var.instance_name}_keys" 17 | public_key = file(var.public_key_path) 18 | } 19 | 20 | resource "aws_eip" "eip" { 21 | instance = aws_instance.host.id 22 | } 23 | 24 | resource "aws_instance" "host" { 25 | ami = data.aws_ami.ubuntu.id 26 | instance_type = var.instance_type 27 | key_name = aws_key_pair.keys.key_name 28 | security_groups = [aws_security_group.host_sec_group.name] 29 | 30 | tags = { 31 | Name = var.instance_name 32 | } 33 | 34 | root_block_device { 35 | volume_type = "gp2" 36 | volume_size = var.root_vol_size 37 | } 38 | 39 | # wait for the instance to come up 40 | # to run the playbook against it 41 | provisioner "remote-exec" { 42 | inline = ["echo ping"] 43 | 44 | connection { 45 | type = "ssh" 46 | user = "ubuntu" 47 | host = aws_instance.host.public_ip 48 | private_key = file(var.private_key_path) 49 | } 50 | } 51 | 52 | lifecycle { 53 | create_before_destroy = true 54 | } 55 | } 56 | 57 | # provision the instance with the ansible playbook 58 | resource "null_resource" "reprovisioner" { 59 | # run every time the playbook changes 60 | triggers = { 61 | policy_sha1 = sha1(file("ansible/main.yml")) 62 | } 63 | 64 | provisioner "local-exec" { 65 | command = "ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i ${aws_eip.eip.public_ip}, ./ansible/main.yml --key-file ${var.private_key_path}" 66 | } 67 | } 68 | 69 | resource "aws_security_group" "host_sec_group" { 70 | name = "${var.instance_name}_sec" 71 | description = "Security group for ${var.instance_name} host" 72 | 73 | # web from anywhere 74 | ingress { 75 | from_port = 80 76 | to_port = 80 77 | protocol = "tcp" 78 | cidr_blocks = ["0.0.0.0/0"] 79 | } 80 | 81 | ingress { 82 | from_port = 443 83 | to_port = 443 84 | protocol = "tcp" 85 | cidr_blocks = ["0.0.0.0/0"] 86 | } 87 | 88 | # ssh 89 | ingress { 90 | from_port = 22 91 | to_port = 22 92 | protocol = "tcp" 93 | cidr_blocks = [var.ssh_allowed_CIDR] 94 | } 95 | 96 | # kubectl 97 | ingress { 98 | from_port = var.engine == "microk8s"? 16443 : 6443 99 | to_port = var.engine == "microk8s"? 16443 : 6443 100 | protocol = "tcp" 101 | cidr_blocks = [var.ssh_allowed_CIDR] 102 | } 103 | 104 | # vpn from anywhere 105 | ingress { 106 | from_port = 31304 107 | to_port = 31304 108 | protocol = "tcp" 109 | cidr_blocks = ["0.0.0.0/0"] 110 | } 111 | 112 | # outgoing traffic 113 | egress { 114 | from_port = 0 115 | to_port = 0 116 | protocol = "-1" 117 | cidr_blocks = ["0.0.0.0/0"] 118 | } 119 | 120 | } 121 | 122 | resource "cloudflare_record" "dns_record" { 123 | zone_id = var.cloudflare_zone 124 | name = var.domain_name 125 | value = aws_eip.eip.public_ip 126 | type = "A" 127 | ttl = 1 128 | } 129 | 130 | data "aws_ami" "ubuntu" { 131 | most_recent = true 132 | 133 | filter { 134 | name = "name" 135 | values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"] 136 | } 137 | 138 | filter { 139 | name = "virtualization-type" 140 | values = ["hvm"] 141 | } 142 | 143 | owners = ["099720109477"] # Canonical 144 | } 145 | -------------------------------------------------------------------------------- /ansible/roles/k3s/templates/ingress.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | --- 6 | apiVersion: v1 7 | kind: ServiceAccount 8 | metadata: 9 | name: nginx-ingress-microk8s-serviceaccount 10 | namespace: ingress 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRole 14 | metadata: 15 | name: nginx-ingress-microk8s-clusterrole 16 | rules: 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | - services 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | - apiGroups: 27 | - "" 28 | resources: 29 | - configmaps 30 | - endpoints 31 | - nodes 32 | - pods 33 | - secrets 34 | verbs: 35 | - list 36 | - watch 37 | - apiGroups: 38 | - "" 39 | resources: 40 | - events 41 | verbs: 42 | - create 43 | - patch 44 | - apiGroups: 45 | - extensions 46 | - networking.k8s.io 47 | resources: 48 | - ingresses 49 | verbs: 50 | - get 51 | - list 52 | - watch 53 | - apiGroups: 54 | - extensions 55 | - networking.k8s.io 56 | resources: 57 | - ingresses/status 58 | verbs: 59 | - update 60 | --- 61 | apiVersion: rbac.authorization.k8s.io/v1 62 | kind: Role 63 | metadata: 64 | name: nginx-ingress-microk8s-role 65 | namespace: ingress 66 | rules: 67 | - apiGroups: 68 | - "" 69 | resources: 70 | - configmaps 71 | - endpoints 72 | - pods 73 | - secrets 74 | verbs: 75 | - get 76 | - apiGroups: 77 | - "" 78 | resources: 79 | - configmaps 80 | resourceNames: 81 | - ingress-controller-leader-nginx 82 | verbs: 83 | - create 84 | - update 85 | - apiGroups: 86 | - "" 87 | resources: 88 | - configmaps 89 | verbs: 90 | - create 91 | --- 92 | apiVersion: rbac.authorization.k8s.io/v1 93 | kind: ClusterRoleBinding 94 | metadata: 95 | name: nginx-ingress-microk8s 96 | roleRef: 97 | apiGroup: rbac.authorization.k8s.io 98 | kind: ClusterRole 99 | name: nginx-ingress-microk8s-clusterrole 100 | subjects: 101 | - kind: ServiceAccount 102 | name: nginx-ingress-microk8s-serviceaccount 103 | namespace: ingress 104 | --- 105 | apiVersion: rbac.authorization.k8s.io/v1 106 | kind: RoleBinding 107 | metadata: 108 | name: nginx-ingress-microk8s 109 | namespace: ingress 110 | roleRef: 111 | apiGroup: rbac.authorization.k8s.io 112 | kind: Role 113 | name: nginx-ingress-microk8s-role 114 | subjects: 115 | - kind: ServiceAccount 116 | name: nginx-ingress-microk8s-serviceaccount 117 | --- 118 | apiVersion: v1 119 | kind: ConfigMap 120 | metadata: 121 | name: nginx-load-balancer-microk8s-conf 122 | namespace: ingress 123 | --- 124 | apiVersion: v1 125 | kind: ConfigMap 126 | metadata: 127 | name: nginx-ingress-tcp-microk8s-conf 128 | namespace: ingress 129 | --- 130 | apiVersion: v1 131 | kind: ConfigMap 132 | metadata: 133 | name: nginx-ingress-udp-microk8s-conf 134 | namespace: ingress 135 | --- 136 | apiVersion: apps/v1 137 | kind: DaemonSet 138 | metadata: 139 | name: nginx-ingress-microk8s-controller 140 | namespace: ingress 141 | labels: 142 | microk8s-application: nginx-ingress-microk8s 143 | spec: 144 | selector: 145 | matchLabels: 146 | name: nginx-ingress-microk8s 147 | template: 148 | metadata: 149 | labels: 150 | name: nginx-ingress-microk8s 151 | spec: 152 | terminationGracePeriodSeconds: 60 153 | # hostPort doesn't work with CNI, so we have to use hostNetwork instead 154 | # see https://github.com/kubernetes/kubernetes/issues/23920 155 | dnsPolicy: ClusterFirstWithHostNet 156 | hostNetwork: true 157 | serviceAccountName: nginx-ingress-microk8s-serviceaccount 158 | containers: 159 | - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1 160 | name: nginx-ingress-microk8s 161 | livenessProbe: 162 | httpGet: 163 | path: /healthz 164 | port: 10254 165 | scheme: HTTP 166 | initialDelaySeconds: 30 167 | timeoutSeconds: 5 168 | # use downward API 169 | env: 170 | - name: POD_NAME 171 | valueFrom: 172 | fieldRef: 173 | fieldPath: metadata.name 174 | - name: POD_NAMESPACE 175 | valueFrom: 176 | fieldRef: 177 | fieldPath: metadata.namespace 178 | ports: 179 | - containerPort: 80 180 | - containerPort: 443 181 | args: 182 | - /nginx-ingress-controller 183 | - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf 184 | - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf 185 | - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf 186 | - --publish-status-address={{ inventory_hostname }} 187 | -------------------------------------------------------------------------------- /ansible/roles/microk8s/templates/ingress.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | --- 6 | apiVersion: v1 7 | kind: ServiceAccount 8 | metadata: 9 | name: nginx-ingress-microk8s-serviceaccount 10 | namespace: ingress 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRole 14 | metadata: 15 | name: nginx-ingress-microk8s-clusterrole 16 | rules: 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | - services 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | - apiGroups: 27 | - "" 28 | resources: 29 | - configmaps 30 | - endpoints 31 | - nodes 32 | - pods 33 | - secrets 34 | verbs: 35 | - list 36 | - watch 37 | - apiGroups: 38 | - "" 39 | resources: 40 | - events 41 | verbs: 42 | - create 43 | - patch 44 | - apiGroups: 45 | - extensions 46 | - networking.k8s.io 47 | resources: 48 | - ingresses 49 | verbs: 50 | - get 51 | - list 52 | - watch 53 | - apiGroups: 54 | - extensions 55 | - networking.k8s.io 56 | resources: 57 | - ingresses/status 58 | verbs: 59 | - update 60 | --- 61 | apiVersion: rbac.authorization.k8s.io/v1 62 | kind: Role 63 | metadata: 64 | name: nginx-ingress-microk8s-role 65 | namespace: ingress 66 | rules: 67 | - apiGroups: 68 | - "" 69 | resources: 70 | - configmaps 71 | - endpoints 72 | - pods 73 | - secrets 74 | verbs: 75 | - get 76 | - apiGroups: 77 | - "" 78 | resources: 79 | - configmaps 80 | resourceNames: 81 | - ingress-controller-leader-nginx 82 | verbs: 83 | - create 84 | - update 85 | - apiGroups: 86 | - "" 87 | resources: 88 | - configmaps 89 | verbs: 90 | - create 91 | --- 92 | apiVersion: rbac.authorization.k8s.io/v1 93 | kind: ClusterRoleBinding 94 | metadata: 95 | name: nginx-ingress-microk8s 96 | roleRef: 97 | apiGroup: rbac.authorization.k8s.io 98 | kind: ClusterRole 99 | name: nginx-ingress-microk8s-clusterrole 100 | subjects: 101 | - kind: ServiceAccount 102 | name: nginx-ingress-microk8s-serviceaccount 103 | namespace: ingress 104 | --- 105 | apiVersion: rbac.authorization.k8s.io/v1 106 | kind: RoleBinding 107 | metadata: 108 | name: nginx-ingress-microk8s 109 | namespace: ingress 110 | roleRef: 111 | apiGroup: rbac.authorization.k8s.io 112 | kind: Role 113 | name: nginx-ingress-microk8s-role 114 | subjects: 115 | - kind: ServiceAccount 116 | name: nginx-ingress-microk8s-serviceaccount 117 | --- 118 | apiVersion: v1 119 | kind: ConfigMap 120 | metadata: 121 | name: nginx-load-balancer-microk8s-conf 122 | namespace: ingress 123 | --- 124 | apiVersion: v1 125 | kind: ConfigMap 126 | metadata: 127 | name: nginx-ingress-tcp-microk8s-conf 128 | namespace: ingress 129 | --- 130 | apiVersion: v1 131 | kind: ConfigMap 132 | metadata: 133 | name: nginx-ingress-udp-microk8s-conf 134 | namespace: ingress 135 | --- 136 | apiVersion: apps/v1 137 | kind: DaemonSet 138 | metadata: 139 | name: nginx-ingress-microk8s-controller 140 | namespace: ingress 141 | labels: 142 | microk8s-application: nginx-ingress-microk8s 143 | spec: 144 | selector: 145 | matchLabels: 146 | name: nginx-ingress-microk8s 147 | template: 148 | metadata: 149 | labels: 150 | name: nginx-ingress-microk8s 151 | spec: 152 | terminationGracePeriodSeconds: 60 153 | # hostPort doesn't work with CNI, so we have to use hostNetwork instead 154 | # see https://github.com/kubernetes/kubernetes/issues/23920 155 | dnsPolicy: ClusterFirstWithHostNet 156 | hostNetwork: true 157 | serviceAccountName: nginx-ingress-microk8s-serviceaccount 158 | containers: 159 | - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.1 160 | name: nginx-ingress-microk8s 161 | livenessProbe: 162 | httpGet: 163 | path: /healthz 164 | port: 10254 165 | scheme: HTTP 166 | initialDelaySeconds: 30 167 | timeoutSeconds: 5 168 | # use downward API 169 | env: 170 | - name: POD_NAME 171 | valueFrom: 172 | fieldRef: 173 | fieldPath: metadata.name 174 | - name: POD_NAMESPACE 175 | valueFrom: 176 | fieldRef: 177 | fieldPath: metadata.namespace 178 | ports: 179 | - containerPort: 80 180 | - containerPort: 443 181 | args: 182 | - /nginx-ingress-controller 183 | - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf 184 | - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf 185 | - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf 186 | - --publish-status-address={{ inventory_hostname }} 187 | -------------------------------------------------------------------------------- /resources/replaybot/replaybot-preprod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: replaybot-preprod 5 | --- 6 | apiVersion: v1 7 | kind: Service 8 | metadata: 9 | name: elasticsearch 10 | namespace: replaybot-preprod 11 | spec: 12 | ports: 13 | - port: 9200 14 | protocol: TCP 15 | targetPort: 9200 16 | selector: 17 | app: elasticsearch 18 | --- 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: parser 23 | namespace: replaybot-preprod 24 | spec: 25 | ports: 26 | - port: 5001 27 | protocol: TCP 28 | targetPort: 5001 29 | selector: 30 | app: parser 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: search 36 | namespace: replaybot-preprod 37 | spec: 38 | ports: 39 | - port: 80 40 | protocol: TCP 41 | targetPort: 5002 42 | selector: 43 | app: search 44 | --- 45 | apiVersion: apps/v1 46 | kind: Deployment 47 | metadata: 48 | labels: 49 | app: discord 50 | name: discord 51 | namespace: replaybot-preprod 52 | spec: 53 | selector: 54 | matchLabels: 55 | app: discord 56 | template: 57 | metadata: 58 | labels: 59 | app: discord 60 | spec: 61 | containers: 62 | - env: 63 | - name: NEW_RELIC_APP_NAME 64 | value: replaybot_discord_preprod 65 | - name: NEW_RELIC_LICENSE_KEY 66 | valueFrom: 67 | secretKeyRef: 68 | key: NEW_RELIC_LICENSE_KEY 69 | name: replaybot-secrets 70 | - name: BOT_TOKEN 71 | valueFrom: 72 | secretKeyRef: 73 | key: BOT_TOKEN 74 | name: replaybot-secrets 75 | - name: BOT_SHARED_KEY 76 | valueFrom: 77 | secretKeyRef: 78 | key: BOT_SHARED_KEY 79 | name: replaybot-secrets 80 | - name: PARSER_ENDPOINT 81 | value: http://parser:5001 82 | - name: API_ENDPOINT 83 | value: https://api-preprod.replaybot.com/api 84 | image: leigholiver/replaybot_discord_preprod 85 | name: discord 86 | resources: 87 | limits: 88 | cpu: 100m 89 | memory: 100Mi 90 | --- 91 | apiVersion: apps/v1 92 | kind: Deployment 93 | metadata: 94 | labels: 95 | app: elasticsearch 96 | name: elasticsearch 97 | namespace: replaybot-preprod 98 | spec: 99 | selector: 100 | matchLabels: 101 | app: elasticsearch 102 | template: 103 | metadata: 104 | labels: 105 | app: elasticsearch 106 | spec: 107 | containers: 108 | - env: 109 | - name: discovery.type 110 | value: single-node 111 | - name: bootstrap.memory_lock 112 | value: "true" 113 | - name: ES_JAVA_OPTS 114 | value: -Xms128m -Xmx128m 115 | image: elasticsearch:7.7.0 116 | name: elasticsearch 117 | ports: 118 | - containerPort: 9200 119 | resources: 120 | limits: 121 | cpu: 500m 122 | memory: 500Mi 123 | initContainers: 124 | - command: 125 | - sysctl 126 | - -w 127 | - vm.max_map_count=262144 128 | image: busybox:1.27.2 129 | name: init-sysctl 130 | securityContext: 131 | privileged: true 132 | --- 133 | apiVersion: apps/v1 134 | kind: Deployment 135 | metadata: 136 | labels: 137 | app: parser 138 | name: parser 139 | namespace: replaybot-preprod 140 | spec: 141 | selector: 142 | matchLabels: 143 | app: parser 144 | template: 145 | metadata: 146 | labels: 147 | app: parser 148 | spec: 149 | containers: 150 | - env: 151 | - name: NEW_RELIC_APP_NAME 152 | value: replaybot_parser_preprod 153 | - name: NEW_RELIC_LICENSE_KEY 154 | valueFrom: 155 | secretKeyRef: 156 | key: NEW_RELIC_LICENSE_KEY 157 | name: replaybot-secrets 158 | image: leigholiver/replaybot_parser_preprod 159 | name: parser 160 | ports: 161 | - containerPort: 5001 162 | resources: 163 | limits: 164 | cpu: 150m 165 | memory: 300Mi 166 | --- 167 | apiVersion: apps/v1 168 | kind: Deployment 169 | metadata: 170 | labels: 171 | app: search 172 | name: search 173 | namespace: replaybot-preprod 174 | spec: 175 | selector: 176 | matchLabels: 177 | app: search 178 | template: 179 | metadata: 180 | labels: 181 | app: search 182 | spec: 183 | containers: 184 | - env: 185 | - name: NEW_RELIC_APP_NAME 186 | value: replaybot_search_preprod 187 | - name: NEW_RELIC_LICENSE_KEY 188 | valueFrom: 189 | secretKeyRef: 190 | key: NEW_RELIC_LICENSE_KEY 191 | name: replaybot-secrets 192 | - name: ELASTICSEARCH_ENDPOINT 193 | value: http://elasticsearch:9200 194 | - name: ELASTICSEARCH_PASSWORD 195 | valueFrom: 196 | secretKeyRef: 197 | key: ELASTICSEARCH_PASSWORD 198 | name: replaybot-secrets 199 | image: leigholiver/replaybot_search_preprod 200 | name: search 201 | ports: 202 | - containerPort: 5002 203 | resources: 204 | limits: 205 | cpu: 100m 206 | memory: 100Mi 207 | --- 208 | apiVersion: networking.k8s.io/v1 209 | kind: Ingress 210 | metadata: 211 | annotations: 212 | cert-manager.io/cluster-issuer: letsencrypt-prod 213 | external-dns.alpha.kubernetes.io/hostname: search-preprod.replaybot.com 214 | name: search 215 | namespace: replaybot-preprod 216 | spec: 217 | rules: 218 | - host: search-preprod.replaybot.com 219 | http: 220 | paths: 221 | - backend: 222 | service: 223 | name: search 224 | port: 225 | number: 80 226 | path: / 227 | pathType: Prefix 228 | tls: 229 | - hosts: 230 | - search-preprod.replaybot.com 231 | secretName: search-tls 232 | -------------------------------------------------------------------------------- /ansible/roles/platform/templates/flux.yml: -------------------------------------------------------------------------------- 1 | # modified from `fluxctl install` to include garbage collection 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: flux 6 | --- 7 | apiVersion: v1 8 | kind: Secret 9 | metadata: 10 | name: flux-git-deploy 11 | namespace: flux 12 | type: Opaque 13 | --- 14 | # memcached deployment used by Flux to cache 15 | # container image metadata. 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | name: memcached 20 | namespace: flux 21 | spec: 22 | replicas: 1 23 | selector: 24 | matchLabels: 25 | name: memcached 26 | template: 27 | metadata: 28 | labels: 29 | name: memcached 30 | spec: 31 | nodeSelector: 32 | beta.kubernetes.io/os: linux 33 | containers: 34 | - name: memcached 35 | image: memcached:1.5.20 36 | imagePullPolicy: IfNotPresent 37 | args: 38 | - -m 512 # Maximum memory to use, in megabytes 39 | - -I 5m # Maximum size for one item 40 | - -p 11211 # Default port 41 | # - -vv # Uncomment to get logs of each request and response. 42 | ports: 43 | - name: clients 44 | containerPort: 11211 45 | securityContext: 46 | runAsUser: 11211 47 | runAsGroup: 11211 48 | allowPrivilegeEscalation: false 49 | --- 50 | apiVersion: v1 51 | kind: Service 52 | metadata: 53 | name: memcached 54 | namespace: flux 55 | spec: 56 | ports: 57 | - name: memcached 58 | port: 11211 59 | selector: 60 | name: memcached 61 | --- 62 | # The service account, cluster roles, and cluster role binding are 63 | # only needed for Kubernetes with role-based access control (RBAC). 64 | apiVersion: v1 65 | kind: ServiceAccount 66 | metadata: 67 | labels: 68 | name: flux 69 | name: flux 70 | namespace: flux 71 | --- 72 | apiVersion: rbac.authorization.k8s.io/v1beta1 73 | kind: ClusterRole 74 | metadata: 75 | labels: 76 | name: flux 77 | name: flux 78 | rules: 79 | - apiGroups: ['*'] 80 | resources: ['*'] 81 | verbs: ['*'] 82 | - nonResourceURLs: ['*'] 83 | verbs: ['*'] 84 | --- 85 | apiVersion: rbac.authorization.k8s.io/v1beta1 86 | kind: ClusterRoleBinding 87 | metadata: 88 | labels: 89 | name: flux 90 | name: flux 91 | roleRef: 92 | apiGroup: rbac.authorization.k8s.io 93 | kind: ClusterRole 94 | name: flux 95 | subjects: 96 | - kind: ServiceAccount 97 | name: flux 98 | namespace: flux 99 | --- 100 | apiVersion: apps/v1 101 | kind: Deployment 102 | metadata: 103 | name: flux 104 | namespace: flux 105 | spec: 106 | replicas: 1 107 | selector: 108 | matchLabels: 109 | name: flux 110 | strategy: 111 | type: Recreate 112 | template: 113 | metadata: 114 | annotations: 115 | prometheus.io/port: "3031" # tell prometheus to scrape /metrics endpoint's port. 116 | labels: 117 | name: flux 118 | spec: 119 | nodeSelector: 120 | beta.kubernetes.io/os: linux 121 | serviceAccountName: flux 122 | volumes: 123 | - name: git-key 124 | secret: 125 | secretName: flux-git-deploy 126 | defaultMode: 0400 # when mounted read-only, we won't be able to chmod 127 | 128 | # This is a tmpfs used for generating SSH keys. In K8s >= 1.10, 129 | # mounted secrets are read-only, so we need a separate volume we 130 | # can write to. 131 | - name: git-keygen 132 | emptyDir: 133 | medium: Memory 134 | 135 | # The following volume is for using a customised known_hosts 136 | # file, which you will need to do if you host your own git 137 | # repo rather than using github or the like. You'll also need to 138 | # mount it into the container, below. See 139 | # https://docs.fluxcd.io/en/latest/guides/use-private-git-host 140 | # - name: ssh-config 141 | # configMap: 142 | # name: flux-ssh-config 143 | 144 | # The following volume is for using a customised .kube/config, 145 | # which you will need to do if you wish to have a different 146 | # default namespace. You will also need to provide the configmap 147 | # with an entry for `config`, and uncomment the volumeMount and 148 | # env entries below. 149 | # - name: kubeconfig 150 | # configMap: 151 | # name: flux-kubeconfig 152 | 153 | # The following volume is used to import GPG keys (for signing 154 | # and verification purposes). You will also need to provide the 155 | # secret with the keys, and uncomment the volumeMount and args 156 | # below. 157 | # - name: gpg-keys 158 | # secret: 159 | # secretName: flux-gpg-keys 160 | # defaultMode: 0400 161 | 162 | containers: 163 | - name: flux 164 | # There are no ":latest" images for flux. Find the most recent 165 | # release or image version at https://hub.docker.com/r/fluxcd/flux/tags 166 | # and replace the tag here. 167 | image: docker.io/fluxcd/flux:1.20.2 168 | imagePullPolicy: IfNotPresent 169 | resources: 170 | requests: 171 | cpu: 50m 172 | memory: 64Mi 173 | ports: 174 | - containerPort: 3030 # informational 175 | livenessProbe: 176 | httpGet: 177 | port: 3030 178 | path: /api/flux/v6/identity.pub 179 | initialDelaySeconds: 5 180 | timeoutSeconds: 5 181 | readinessProbe: 182 | httpGet: 183 | port: 3030 184 | path: /api/flux/v6/identity.pub 185 | initialDelaySeconds: 5 186 | timeoutSeconds: 5 187 | volumeMounts: 188 | - name: git-key 189 | mountPath: /etc/fluxd/ssh # to match location given in image's /etc/ssh/config 190 | readOnly: true # this will be the case perforce in K8s >=1.10 191 | - name: git-keygen 192 | mountPath: /var/fluxd/keygen # to match location given in image's /etc/ssh/config 193 | 194 | # Include this if you need to mount a customised known_hosts 195 | # file; you'll also need the volume declared above. 196 | # - name: ssh-config 197 | # mountPath: /root/.ssh 198 | 199 | # Include this and the volume "kubeconfig" above, and the 200 | # environment entry "KUBECONFIG" below, to override the config 201 | # used by kubectl. 202 | # - name: kubeconfig 203 | # mountPath: /etc/fluxd/kube 204 | 205 | # Include this to point kubectl at a different config; you 206 | # will need to do this if you have mounted an alternate config 207 | # from a configmap, as in commented blocks above. 208 | # env: 209 | # - name: KUBECONFIG 210 | # value: /etc/fluxd/kube/config 211 | 212 | # Include this and the volume "gpg-keys" above, and the 213 | # args below. 214 | # - name: gpg-keys 215 | # mountPath: /root/gpg-import 216 | # readOnly: true 217 | 218 | # Include this if you want to supply HTTP basic auth credentials for git 219 | # via the `GIT_AUTHUSER` and `GIT_AUTHKEY` environment variables using a 220 | # secret. 221 | # envFrom: 222 | # - secretRef: 223 | # name: flux-git-auth 224 | 225 | args: 226 | 227 | # If you deployed memcached in a different namespace to flux, 228 | # or with a different service name, you can supply these 229 | # following two arguments to tell fluxd how to connect to it. 230 | # - --memcached-hostname=memcached.default.svc.cluster.local 231 | 232 | # Use the memcached ClusterIP service name by setting the 233 | # memcached-service to string empty 234 | - --memcached-service= 235 | 236 | # This must be supplied, and be in the tmpfs (emptyDir) 237 | # mounted above, for K8s >= 1.10 238 | - --ssh-keygen-dir=/var/fluxd/keygen 239 | 240 | # Replace the following URL to change the Git repository used by Flux. 241 | # HTTP basic auth credentials can be supplied using environment variables: 242 | # https://$(GIT_AUTHUSER):$(GIT_AUTHKEY)@github.com/user/repository.git 243 | - --git-url=git@github.com:{{ github_username }}/{{ github_repo }} 244 | - --git-branch={% if github_branch != "" %}{{ github_branch }}{% else %}master{% endif %} 245 | 246 | - --git-label=flux 247 | - --git-user={{ github_username }} 248 | - --git-email={{ github_username }}@users.noreply.github.com 249 | {% if github_namespaces != "" %}- --git-path={{ github_namespaces }}{% endif %} 250 | 251 | 252 | # remove deleted workloads 253 | - --sync-garbage-collection 254 | 255 | # Include these two to enable git commit signing 256 | # - --git-gpg-key-import=/root/gpg-import 257 | # - --git-signing-key= 258 | 259 | # Include this to enable git signature verification 260 | # - --git-verify-signatures 261 | 262 | # Tell flux it has readonly access to the repo (default `false`) 263 | # - --git-readonly 264 | 265 | # Instruct flux where to put sync bookkeeping (default "git", meaning use a tag in the upstream git repo) 266 | # - --sync-state=git 267 | 268 | # Include these next two to connect to an "upstream" service 269 | # (e.g., Weave Cloud). The token is particular to the service. 270 | # - --connect=wss://cloud.weave.works/api/flux 271 | # - --token=abc123abc123abc123abc123 272 | 273 | # Enable manifest generation (default `false`) 274 | # - --manifest-generation=false 275 | 276 | # Serve /metrics endpoint at different port; 277 | # make sure to set prometheus' annotation to scrape the port value. 278 | - --listen-metrics=:3031 279 | 280 | # Optional DNS settings, configuring the ndots option may resolve 281 | # nslookup issues on some Kubernetes setups. 282 | # dnsPolicy: "None" 283 | # dnsConfig: 284 | # options: 285 | # - name: ndots 286 | # value: "1" 287 | -------------------------------------------------------------------------------- /ansible/roles/k3s/files/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # Usage: 5 | # curl ... | ENV_VAR=... sh - 6 | # or 7 | # ENV_VAR=... ./install.sh 8 | # 9 | # Example: 10 | # Installing a server without traefik: 11 | # curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh - 12 | # Installing an agent to point at a server: 13 | # curl ... | K3S_TOKEN=xxx K3S_URL=https://server-url:6443 sh - 14 | # 15 | # Environment variables: 16 | # - K3S_* 17 | # Environment variables which begin with K3S_ will be preserved for the 18 | # systemd service to use. Setting K3S_URL without explicitly setting 19 | # a systemd exec command will default the command to "agent", and we 20 | # enforce that K3S_TOKEN or K3S_CLUSTER_SECRET is also set. 21 | # 22 | # - INSTALL_K3S_SKIP_DOWNLOAD 23 | # If set to true will not download k3s hash or binary. 24 | # 25 | # - INSTALL_K3S_SYMLINK 26 | # If set to 'skip' will not create symlinks, 'force' will overwrite, 27 | # default will symlink if command does not exist in path. 28 | # 29 | # - INSTALL_K3S_SKIP_ENABLE 30 | # If set to true will not enable or start k3s service. 31 | # 32 | # - INSTALL_K3S_SKIP_START 33 | # If set to true will not start k3s service. 34 | # 35 | # - INSTALL_K3S_VERSION 36 | # Version of k3s to download from github. Will attempt to download from the 37 | # stable channel if not specified. 38 | # 39 | # - INSTALL_K3S_COMMIT 40 | # Commit of k3s to download from temporary cloud storage. 41 | # * (for developer & QA use) 42 | # 43 | # - INSTALL_K3S_BIN_DIR 44 | # Directory to install k3s binary, links, and uninstall script to, or use 45 | # /usr/local/bin as the default 46 | # 47 | # - INSTALL_K3S_BIN_DIR_READ_ONLY 48 | # If set to true will not write files to INSTALL_K3S_BIN_DIR, forces 49 | # setting INSTALL_K3S_SKIP_DOWNLOAD=true 50 | # 51 | # - INSTALL_K3S_SYSTEMD_DIR 52 | # Directory to install systemd service and environment files to, or use 53 | # /etc/systemd/system as the default 54 | # 55 | # - INSTALL_K3S_EXEC or script arguments 56 | # Command with flags to use for launching k3s in the systemd service, if 57 | # the command is not specified will default to "agent" if K3S_URL is set 58 | # or "server" if not. The final systemd command resolves to a combination 59 | # of EXEC and script args ($@). 60 | # 61 | # The following commands result in the same behavior: 62 | # curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh -s - 63 | # curl ... | INSTALL_K3S_EXEC="server --disable=traefik" sh -s - 64 | # curl ... | INSTALL_K3S_EXEC="server" sh -s - --disable=traefik 65 | # curl ... | sh -s - server --disable=traefik 66 | # curl ... | sh -s - --disable=traefik 67 | # 68 | # - INSTALL_K3S_NAME 69 | # Name of systemd service to create, will default from the k3s exec command 70 | # if not specified. If specified the name will be prefixed with 'k3s-'. 71 | # 72 | # - INSTALL_K3S_TYPE 73 | # Type of systemd service to create, will default from the k3s exec command 74 | # if not specified. 75 | # 76 | # - INSTALL_K3S_SELINUX_WARN 77 | # If set to true will continue if k3s-selinux policy is not found. 78 | # 79 | # - INSTALL_K3S_CHANNEL_URL 80 | # Channel URL for fetching k3s download URL. 81 | # Defaults to 'https://update.k3s.io/v1-release/channels'. 82 | # 83 | # - INSTALL_K3S_CHANNEL 84 | # Channel to use for fetching k3s download URL. 85 | # Defaults to 'stable'. 86 | 87 | GITHUB_URL=https://github.com/rancher/k3s/releases 88 | STORAGE_URL=https://storage.googleapis.com/k3s-ci-builds 89 | DOWNLOADER= 90 | 91 | # --- helper functions for logs --- 92 | info() 93 | { 94 | echo '[INFO] ' "$@" 95 | } 96 | warn() 97 | { 98 | echo '[WARN] ' "$@" >&2 99 | } 100 | fatal() 101 | { 102 | echo '[ERROR] ' "$@" >&2 103 | exit 1 104 | } 105 | 106 | # --- fatal if no systemd or openrc --- 107 | verify_system() { 108 | if [ -x /sbin/openrc-run ]; then 109 | HAS_OPENRC=true 110 | return 111 | fi 112 | if [ -d /run/systemd ]; then 113 | HAS_SYSTEMD=true 114 | return 115 | fi 116 | fatal 'Can not find systemd or openrc to use as a process supervisor for k3s' 117 | } 118 | 119 | # --- add quotes to command arguments --- 120 | quote() { 121 | for arg in "$@"; do 122 | printf '%s\n' "$arg" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/'/" 123 | done 124 | } 125 | 126 | # --- add indentation and trailing slash to quoted args --- 127 | quote_indent() { 128 | printf ' \\\n' 129 | for arg in "$@"; do 130 | printf '\t%s \\\n' "$(quote "$arg")" 131 | done 132 | } 133 | 134 | # --- escape most punctuation characters, except quotes, forward slash, and space --- 135 | escape() { 136 | printf '%s' "$@" | sed -e 's/\([][!#$%&()*;<=>?\_`{|}]\)/\\\1/g;' 137 | } 138 | 139 | # --- escape double quotes --- 140 | escape_dq() { 141 | printf '%s' "$@" | sed -e 's/"/\\"/g' 142 | } 143 | 144 | # --- ensures $K3S_URL is empty or begins with https://, exiting fatally otherwise --- 145 | verify_k3s_url() { 146 | case "${K3S_URL}" in 147 | "") 148 | ;; 149 | https://*) 150 | ;; 151 | *) 152 | fatal "Only https:// URLs are supported for K3S_URL (have ${K3S_URL})" 153 | ;; 154 | esac 155 | } 156 | 157 | # --- define needed environment variables --- 158 | setup_env() { 159 | # --- use command args if passed or create default --- 160 | case "$1" in 161 | # --- if we only have flags discover if command should be server or agent --- 162 | (-*|"") 163 | if [ -z "${K3S_URL}" ]; then 164 | CMD_K3S=server 165 | else 166 | if [ -z "${K3S_TOKEN}" ] && [ -z "${K3S_TOKEN_FILE}" ] && [ -z "${K3S_CLUSTER_SECRET}" ]; then 167 | fatal "Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN, K3S_TOKEN_FILE or K3S_CLUSTER_SECRET is not defined." 168 | fi 169 | CMD_K3S=agent 170 | fi 171 | ;; 172 | # --- command is provided --- 173 | (*) 174 | CMD_K3S=$1 175 | shift 176 | ;; 177 | esac 178 | 179 | verify_k3s_url 180 | 181 | CMD_K3S_EXEC="${CMD_K3S}$(quote_indent "$@")" 182 | 183 | # --- use systemd name if defined or create default --- 184 | if [ -n "${INSTALL_K3S_NAME}" ]; then 185 | SYSTEM_NAME=k3s-${INSTALL_K3S_NAME} 186 | else 187 | if [ "${CMD_K3S}" = server ]; then 188 | SYSTEM_NAME=k3s 189 | else 190 | SYSTEM_NAME=k3s-${CMD_K3S} 191 | fi 192 | fi 193 | 194 | # --- check for invalid characters in system name --- 195 | valid_chars=$(printf '%s' "${SYSTEM_NAME}" | sed -e 's/[][!#$%&()*;<=>?\_`{|}/[:space:]]/^/g;' ) 196 | if [ "${SYSTEM_NAME}" != "${valid_chars}" ]; then 197 | invalid_chars=$(printf '%s' "${valid_chars}" | sed -e 's/[^^]/ /g') 198 | fatal "Invalid characters for system name: 199 | ${SYSTEM_NAME} 200 | ${invalid_chars}" 201 | fi 202 | 203 | # --- use sudo if we are not already root --- 204 | SUDO=sudo 205 | if [ $(id -u) -eq 0 ]; then 206 | SUDO= 207 | fi 208 | 209 | # --- use systemd type if defined or create default --- 210 | if [ -n "${INSTALL_K3S_TYPE}" ]; then 211 | SYSTEMD_TYPE=${INSTALL_K3S_TYPE} 212 | else 213 | if [ "${CMD_K3S}" = server ]; then 214 | SYSTEMD_TYPE=notify 215 | else 216 | SYSTEMD_TYPE=exec 217 | fi 218 | fi 219 | 220 | # --- use binary install directory if defined or create default --- 221 | if [ -n "${INSTALL_K3S_BIN_DIR}" ]; then 222 | BIN_DIR=${INSTALL_K3S_BIN_DIR} 223 | else 224 | BIN_DIR=/usr/local/bin 225 | fi 226 | 227 | # --- use systemd directory if defined or create default --- 228 | if [ -n "${INSTALL_K3S_SYSTEMD_DIR}" ]; then 229 | SYSTEMD_DIR="${INSTALL_K3S_SYSTEMD_DIR}" 230 | else 231 | SYSTEMD_DIR=/etc/systemd/system 232 | fi 233 | 234 | # --- set related files from system name --- 235 | SERVICE_K3S=${SYSTEM_NAME}.service 236 | UNINSTALL_K3S_SH=${UNINSTALL_K3S_SH:-${BIN_DIR}/${SYSTEM_NAME}-uninstall.sh} 237 | KILLALL_K3S_SH=${KILLALL_K3S_SH:-${BIN_DIR}/k3s-killall.sh} 238 | 239 | # --- use service or environment location depending on systemd/openrc --- 240 | if [ "${HAS_SYSTEMD}" = true ]; then 241 | FILE_K3S_SERVICE=${SYSTEMD_DIR}/${SERVICE_K3S} 242 | FILE_K3S_ENV=${SYSTEMD_DIR}/${SERVICE_K3S}.env 243 | elif [ "${HAS_OPENRC}" = true ]; then 244 | $SUDO mkdir -p /etc/rancher/k3s 245 | FILE_K3S_SERVICE=/etc/init.d/${SYSTEM_NAME} 246 | FILE_K3S_ENV=/etc/rancher/k3s/${SYSTEM_NAME}.env 247 | fi 248 | 249 | # --- get hash of config & exec for currently installed k3s --- 250 | PRE_INSTALL_HASHES=$(get_installed_hashes) 251 | 252 | # --- if bin directory is read only skip download --- 253 | if [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ]; then 254 | INSTALL_K3S_SKIP_DOWNLOAD=true 255 | fi 256 | 257 | # --- setup channel values 258 | INSTALL_K3S_CHANNEL_URL=${INSTALL_K3S_CHANNEL_URL:-'https://update.k3s.io/v1-release/channels'} 259 | INSTALL_K3S_CHANNEL=${INSTALL_K3S_CHANNEL:-'stable'} 260 | } 261 | 262 | # --- check if skip download environment variable set --- 263 | can_skip_download() { 264 | if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ]; then 265 | return 1 266 | fi 267 | } 268 | 269 | # --- verify an executabe k3s binary is installed --- 270 | verify_k3s_is_executable() { 271 | if [ ! -x ${BIN_DIR}/k3s ]; then 272 | fatal "Executable k3s binary not found at ${BIN_DIR}/k3s" 273 | fi 274 | } 275 | 276 | # --- set arch and suffix, fatal if architecture not supported --- 277 | setup_verify_arch() { 278 | if [ -z "$ARCH" ]; then 279 | ARCH=$(uname -m) 280 | fi 281 | case $ARCH in 282 | amd64) 283 | ARCH=amd64 284 | SUFFIX= 285 | ;; 286 | x86_64) 287 | ARCH=amd64 288 | SUFFIX= 289 | ;; 290 | arm64) 291 | ARCH=arm64 292 | SUFFIX=-${ARCH} 293 | ;; 294 | aarch64) 295 | ARCH=arm64 296 | SUFFIX=-${ARCH} 297 | ;; 298 | arm*) 299 | ARCH=arm 300 | SUFFIX=-${ARCH}hf 301 | ;; 302 | *) 303 | fatal "Unsupported architecture $ARCH" 304 | esac 305 | } 306 | 307 | # --- verify existence of network downloader executable --- 308 | verify_downloader() { 309 | # Return failure if it doesn't exist or is no executable 310 | [ -x "$(which $1)" ] || return 1 311 | 312 | # Set verified executable as our downloader program and return success 313 | DOWNLOADER=$1 314 | return 0 315 | } 316 | 317 | # --- create tempory directory and cleanup when done --- 318 | setup_tmp() { 319 | TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX) 320 | TMP_HASH=${TMP_DIR}/k3s.hash 321 | TMP_BIN=${TMP_DIR}/k3s.bin 322 | cleanup() { 323 | code=$? 324 | set +e 325 | trap - EXIT 326 | rm -rf ${TMP_DIR} 327 | exit $code 328 | } 329 | trap cleanup INT EXIT 330 | } 331 | 332 | # --- use desired k3s version if defined or find version from channel --- 333 | get_release_version() { 334 | if [ -n "${INSTALL_K3S_COMMIT}" ]; then 335 | VERSION_K3S="commit ${INSTALL_K3S_COMMIT}" 336 | elif [ -n "${INSTALL_K3S_VERSION}" ]; then 337 | VERSION_K3S=${INSTALL_K3S_VERSION} 338 | else 339 | info "Finding release for channel ${INSTALL_K3S_CHANNEL}" 340 | version_url="${INSTALL_K3S_CHANNEL_URL}/${INSTALL_K3S_CHANNEL}" 341 | case $DOWNLOADER in 342 | curl) 343 | VERSION_K3S=$(curl -w '%{url_effective}' -L -s -S ${version_url} -o /dev/null | sed -e 's|.*/||') 344 | ;; 345 | wget) 346 | VERSION_K3S=$(wget -SqO /dev/null ${version_url} 2>&1 | grep -i Location | sed -e 's|.*/||') 347 | ;; 348 | *) 349 | fatal "Incorrect downloader executable '$DOWNLOADER'" 350 | ;; 351 | esac 352 | fi 353 | info "Using ${VERSION_K3S} as release" 354 | } 355 | 356 | # --- download from github url --- 357 | download() { 358 | [ $# -eq 2 ] || fatal 'download needs exactly 2 arguments' 359 | 360 | case $DOWNLOADER in 361 | curl) 362 | curl -o $1 -sfL $2 363 | ;; 364 | wget) 365 | wget -qO $1 $2 366 | ;; 367 | *) 368 | fatal "Incorrect executable '$DOWNLOADER'" 369 | ;; 370 | esac 371 | 372 | # Abort if download command failed 373 | [ $? -eq 0 ] || fatal 'Download failed' 374 | } 375 | 376 | # --- download hash from github url --- 377 | download_hash() { 378 | if [ -n "${INSTALL_K3S_COMMIT}" ]; then 379 | HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum 380 | else 381 | HASH_URL=${GITHUB_URL}/download/${VERSION_K3S}/sha256sum-${ARCH}.txt 382 | fi 383 | info "Downloading hash ${HASH_URL}" 384 | download ${TMP_HASH} ${HASH_URL} 385 | HASH_EXPECTED=$(grep " k3s${SUFFIX}$" ${TMP_HASH}) 386 | HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*} 387 | } 388 | 389 | # --- check hash against installed version --- 390 | installed_hash_matches() { 391 | if [ -x ${BIN_DIR}/k3s ]; then 392 | HASH_INSTALLED=$(sha256sum ${BIN_DIR}/k3s) 393 | HASH_INSTALLED=${HASH_INSTALLED%%[[:blank:]]*} 394 | if [ "${HASH_EXPECTED}" = "${HASH_INSTALLED}" ]; then 395 | return 396 | fi 397 | fi 398 | return 1 399 | } 400 | 401 | # --- download binary from github url --- 402 | download_binary() { 403 | if [ -n "${INSTALL_K3S_COMMIT}" ]; then 404 | BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT} 405 | else 406 | BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX} 407 | fi 408 | info "Downloading binary ${BIN_URL}" 409 | download ${TMP_BIN} ${BIN_URL} 410 | } 411 | 412 | # --- verify downloaded binary hash --- 413 | verify_binary() { 414 | info "Verifying binary download" 415 | HASH_BIN=$(sha256sum ${TMP_BIN}) 416 | HASH_BIN=${HASH_BIN%%[[:blank:]]*} 417 | if [ "${HASH_EXPECTED}" != "${HASH_BIN}" ]; then 418 | fatal "Download sha256 does not match ${HASH_EXPECTED}, got ${HASH_BIN}" 419 | fi 420 | } 421 | 422 | # --- setup permissions and move binary to system directory --- 423 | setup_binary() { 424 | chmod 755 ${TMP_BIN} 425 | info "Installing k3s to ${BIN_DIR}/k3s" 426 | $SUDO chown root:root ${TMP_BIN} 427 | $SUDO mv -f ${TMP_BIN} ${BIN_DIR}/k3s 428 | } 429 | 430 | # --- setup selinux policy --- 431 | setup_selinux() { 432 | policy_hint="please install: 433 | yum install -y container-selinux selinux-policy-base 434 | rpm -i https://rpm.rancher.io/k3s-selinux-0.1.1-rc1.el7.noarch.rpm 435 | " 436 | policy_error=fatal 437 | if [ "$INSTALL_K3S_SELINUX_WARN" = true ]; then 438 | policy_error=warn 439 | fi 440 | 441 | if ! $SUDO chcon -u system_u -r object_r -t container_runtime_exec_t ${BIN_DIR}/k3s >/dev/null 2>&1; then 442 | if $SUDO grep '^\s*SELINUX=enforcing' /etc/selinux/config >/dev/null 2>&1; then 443 | $policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}" 444 | fi 445 | else 446 | if [ ! -f /usr/share/selinux/packages/k3s.pp ]; then 447 | $policy_error "Failed to find the k3s-selinux policy, ${policy_hint}" 448 | fi 449 | fi 450 | } 451 | 452 | # --- download and verify k3s --- 453 | download_and_verify() { 454 | if can_skip_download; then 455 | info 'Skipping k3s download and verify' 456 | verify_k3s_is_executable 457 | return 458 | fi 459 | 460 | setup_verify_arch 461 | verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files' 462 | setup_tmp 463 | get_release_version 464 | download_hash 465 | 466 | if installed_hash_matches; then 467 | info 'Skipping binary downloaded, installed k3s matches hash' 468 | return 469 | fi 470 | 471 | download_binary 472 | verify_binary 473 | setup_binary 474 | } 475 | 476 | # --- add additional utility links --- 477 | create_symlinks() { 478 | [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return 479 | [ "${INSTALL_K3S_SYMLINK}" = skip ] && return 480 | 481 | for cmd in kubectl crictl ctr; do 482 | if [ ! -e ${BIN_DIR}/${cmd} ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then 483 | which_cmd=$(which ${cmd} 2>/dev/null || true) 484 | if [ -z "${which_cmd}" ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then 485 | info "Creating ${BIN_DIR}/${cmd} symlink to k3s" 486 | $SUDO ln -sf k3s ${BIN_DIR}/${cmd} 487 | else 488 | info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, command exists in PATH at ${which_cmd}" 489 | fi 490 | else 491 | info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, already exists" 492 | fi 493 | done 494 | } 495 | 496 | # --- create killall script --- 497 | create_killall() { 498 | [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return 499 | info "Creating killall script ${KILLALL_K3S_SH}" 500 | $SUDO tee ${KILLALL_K3S_SH} >/dev/null << \EOF 501 | #!/bin/sh 502 | [ $(id -u) -eq 0 ] || exec sudo $0 $@ 503 | 504 | for bin in /var/lib/rancher/k3s/data/**/bin/; do 505 | [ -d $bin ] && export PATH=$PATH:$bin:$bin/aux 506 | done 507 | 508 | set -x 509 | 510 | for service in /etc/systemd/system/k3s*.service; do 511 | [ -s $service ] && systemctl stop $(basename $service) 512 | done 513 | 514 | for service in /etc/init.d/k3s*; do 515 | [ -x $service ] && $service stop 516 | done 517 | 518 | pschildren() { 519 | ps -e -o ppid= -o pid= | \ 520 | sed -e 's/^\s*//g; s/\s\s*/\t/g;' | \ 521 | grep -w "^$1" | \ 522 | cut -f2 523 | } 524 | 525 | pstree() { 526 | for pid in $@; do 527 | echo $pid 528 | for child in $(pschildren $pid); do 529 | pstree $child 530 | done 531 | done 532 | } 533 | 534 | killtree() { 535 | kill -9 $( 536 | { set +x; } 2>/dev/null; 537 | pstree $@; 538 | set -x; 539 | ) 2>/dev/null 540 | } 541 | 542 | getshims() { 543 | ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1 544 | } 545 | 546 | killtree $({ set +x; } 2>/dev/null; getshims; set -x) 547 | 548 | do_unmount() { 549 | { set +x; } 2>/dev/null 550 | MOUNTS= 551 | while read ignore mount ignore; do 552 | MOUNTS="$mount\n$MOUNTS" 553 | done /dev/null | grep 'master cni0' | while read ignore iface ignore; do 570 | iface=${iface%%@*} 571 | [ -z "$iface" ] || ip link delete $iface 572 | done 573 | ip link delete cni0 574 | ip link delete flannel.1 575 | rm -rf /var/lib/cni/ 576 | iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore 577 | EOF 578 | $SUDO chmod 755 ${KILLALL_K3S_SH} 579 | $SUDO chown root:root ${KILLALL_K3S_SH} 580 | } 581 | 582 | # --- create uninstall script --- 583 | create_uninstall() { 584 | [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return 585 | info "Creating uninstall script ${UNINSTALL_K3S_SH}" 586 | $SUDO tee ${UNINSTALL_K3S_SH} >/dev/null << EOF 587 | #!/bin/sh 588 | set -x 589 | [ \$(id -u) -eq 0 ] || exec sudo \$0 \$@ 590 | 591 | ${KILLALL_K3S_SH} 592 | 593 | if which systemctl; then 594 | systemctl disable ${SYSTEM_NAME} 595 | systemctl reset-failed ${SYSTEM_NAME} 596 | systemctl daemon-reload 597 | fi 598 | if which rc-update; then 599 | rc-update delete ${SYSTEM_NAME} default 600 | fi 601 | 602 | rm -f ${FILE_K3S_SERVICE} 603 | rm -f ${FILE_K3S_ENV} 604 | 605 | remove_uninstall() { 606 | rm -f ${UNINSTALL_K3S_SH} 607 | } 608 | trap remove_uninstall EXIT 609 | 610 | if (ls ${SYSTEMD_DIR}/k3s*.service || ls /etc/init.d/k3s*) >/dev/null 2>&1; then 611 | set +x; echo 'Additional k3s services installed, skipping uninstall of k3s'; set -x 612 | exit 613 | fi 614 | 615 | for cmd in kubectl crictl ctr; do 616 | if [ -L ${BIN_DIR}/\$cmd ]; then 617 | rm -f ${BIN_DIR}/\$cmd 618 | fi 619 | done 620 | 621 | rm -rf /etc/rancher/k3s 622 | rm -rf /run/k3s 623 | rm -rf /run/flannel 624 | rm -rf /var/lib/rancher/k3s 625 | rm -rf /var/lib/kubelet 626 | rm -f ${BIN_DIR}/k3s 627 | rm -f ${KILLALL_K3S_SH} 628 | EOF 629 | $SUDO chmod 755 ${UNINSTALL_K3S_SH} 630 | $SUDO chown root:root ${UNINSTALL_K3S_SH} 631 | } 632 | 633 | # --- disable current service if loaded -- 634 | systemd_disable() { 635 | $SUDO rm -f /etc/systemd/system/${SERVICE_K3S} || true 636 | $SUDO rm -f /etc/systemd/system/${SERVICE_K3S}.env || true 637 | $SUDO systemctl disable ${SYSTEM_NAME} >/dev/null 2>&1 || true 638 | } 639 | 640 | # --- capture current env and create file containing k3s_ variables --- 641 | create_env_file() { 642 | info "env: Creating environment file ${FILE_K3S_ENV}" 643 | UMASK=$(umask) 644 | umask 0377 645 | env | grep '^K3S_' | $SUDO tee ${FILE_K3S_ENV} >/dev/null 646 | env | egrep -i '^(NO|HTTP|HTTPS)_PROXY' | $SUDO tee -a ${FILE_K3S_ENV} >/dev/null 647 | umask $UMASK 648 | } 649 | 650 | # --- write systemd service file --- 651 | create_systemd_service_file() { 652 | info "systemd: Creating service file ${FILE_K3S_SERVICE}" 653 | $SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF 654 | [Unit] 655 | Description=Lightweight Kubernetes 656 | Documentation=https://k3s.io 657 | Wants=network-online.target 658 | 659 | [Install] 660 | WantedBy=multi-user.target 661 | 662 | [Service] 663 | Type=${SYSTEMD_TYPE} 664 | EnvironmentFile=${FILE_K3S_ENV} 665 | KillMode=process 666 | Delegate=yes 667 | # Having non-zero Limit*s causes performance problems due to accounting overhead 668 | # in the kernel. We recommend using cgroups to do container-local accounting. 669 | LimitNOFILE=1048576 670 | LimitNPROC=infinity 671 | LimitCORE=infinity 672 | TasksMax=infinity 673 | TimeoutStartSec=0 674 | Restart=always 675 | RestartSec=5s 676 | ExecStartPre=-/sbin/modprobe br_netfilter 677 | ExecStartPre=-/sbin/modprobe overlay 678 | ExecStart=${BIN_DIR}/k3s \\ 679 | ${CMD_K3S_EXEC} 680 | 681 | EOF 682 | } 683 | 684 | # --- write openrc service file --- 685 | create_openrc_service_file() { 686 | LOG_FILE=/var/log/${SYSTEM_NAME}.log 687 | 688 | info "openrc: Creating service file ${FILE_K3S_SERVICE}" 689 | $SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF 690 | #!/sbin/openrc-run 691 | 692 | depend() { 693 | after network-online 694 | want cgroups 695 | } 696 | 697 | start_pre() { 698 | rm -f /tmp/k3s.* 699 | } 700 | 701 | supervisor=supervise-daemon 702 | name=${SYSTEM_NAME} 703 | command="${BIN_DIR}/k3s" 704 | command_args="$(escape_dq "${CMD_K3S_EXEC}") 705 | >>${LOG_FILE} 2>&1" 706 | 707 | output_log=${LOG_FILE} 708 | error_log=${LOG_FILE} 709 | 710 | pidfile="/var/run/${SYSTEM_NAME}.pid" 711 | respawn_delay=5 712 | respawn_max=0 713 | 714 | set -o allexport 715 | if [ -f /etc/environment ]; then source /etc/environment; fi 716 | if [ -f ${FILE_K3S_ENV} ]; then source ${FILE_K3S_ENV}; fi 717 | set +o allexport 718 | EOF 719 | $SUDO chmod 0755 ${FILE_K3S_SERVICE} 720 | 721 | $SUDO tee /etc/logrotate.d/${SYSTEM_NAME} >/dev/null << EOF 722 | ${LOG_FILE} { 723 | missingok 724 | notifempty 725 | copytruncate 726 | } 727 | EOF 728 | } 729 | 730 | # --- write systemd or openrc service file --- 731 | create_service_file() { 732 | [ "${HAS_SYSTEMD}" = true ] && create_systemd_service_file 733 | [ "${HAS_OPENRC}" = true ] && create_openrc_service_file 734 | return 0 735 | } 736 | 737 | # --- get hashes of the current k3s bin and service files 738 | get_installed_hashes() { 739 | $SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true 740 | } 741 | 742 | # --- enable and start systemd service --- 743 | systemd_enable() { 744 | info "systemd: Enabling ${SYSTEM_NAME} unit" 745 | $SUDO systemctl enable ${FILE_K3S_SERVICE} >/dev/null 746 | $SUDO systemctl daemon-reload >/dev/null 747 | } 748 | 749 | systemd_start() { 750 | info "systemd: Starting ${SYSTEM_NAME}" 751 | $SUDO systemctl restart ${SYSTEM_NAME} 752 | } 753 | 754 | # --- enable and start openrc service --- 755 | openrc_enable() { 756 | info "openrc: Enabling ${SYSTEM_NAME} service for default runlevel" 757 | $SUDO rc-update add ${SYSTEM_NAME} default >/dev/null 758 | } 759 | 760 | openrc_start() { 761 | info "openrc: Starting ${SYSTEM_NAME}" 762 | $SUDO ${FILE_K3S_SERVICE} restart 763 | } 764 | 765 | # --- startup systemd or openrc service --- 766 | service_enable_and_start() { 767 | [ "${INSTALL_K3S_SKIP_ENABLE}" = true ] && return 768 | 769 | [ "${HAS_SYSTEMD}" = true ] && systemd_enable 770 | [ "${HAS_OPENRC}" = true ] && openrc_enable 771 | 772 | [ "${INSTALL_K3S_SKIP_START}" = true ] && return 773 | 774 | POST_INSTALL_HASHES=$(get_installed_hashes) 775 | if [ "${PRE_INSTALL_HASHES}" = "${POST_INSTALL_HASHES}" ]; then 776 | info 'No change detected so skipping service start' 777 | return 778 | fi 779 | 780 | [ "${HAS_SYSTEMD}" = true ] && systemd_start 781 | [ "${HAS_OPENRC}" = true ] && openrc_start 782 | return 0 783 | } 784 | 785 | # --- re-evaluate args to include env command --- 786 | eval set -- $(escape "${INSTALL_K3S_EXEC}") $(quote "$@") 787 | 788 | # --- run the install process -- 789 | { 790 | verify_system 791 | setup_env "$@" 792 | download_and_verify 793 | setup_selinux 794 | create_symlinks 795 | create_killall 796 | create_uninstall 797 | systemd_disable 798 | create_env_file 799 | create_service_file 800 | service_enable_and_start 801 | } 802 | -------------------------------------------------------------------------------- /ansible/roles/platform/templates/newrelic.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: nri-bundle/charts/nri-metadata-injection/templates/clusterrolebinding.yaml 3 | 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: ClusterRoleBinding 6 | metadata: 7 | name: nri-bundle-nri-metadata-injection 8 | labels: 9 | app.kubernetes.io/name: nri-metadata-injection 10 | helm.sh/chart: nri-metadata-injection-1.1.0 11 | app.kubernetes.io/instance: nri-bundle 12 | app.kubernetes.io/version: "1.3.0" 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: ClusterRole 16 | name: nri-bundle-nri-metadata-injection 17 | subjects: 18 | - kind: ServiceAccount 19 | name: nri-bundle-nri-metadata-injection 20 | namespace: default 21 | 22 | --- 23 | # Source: nri-bundle/charts/kube-state-metrics/templates/deployment.yaml 24 | apiVersion: apps/v1 25 | kind: Deployment 26 | metadata: 27 | name: nri-bundle-kube-state-metrics 28 | namespace: default 29 | labels: 30 | app.kubernetes.io/name: kube-state-metrics 31 | helm.sh/chart: "kube-state-metrics-2.8.1" 32 | app.kubernetes.io/instance: "nri-bundle" 33 | spec: 34 | selector: 35 | matchLabels: 36 | app.kubernetes.io/name: kube-state-metrics 37 | replicas: 1 38 | template: 39 | metadata: 40 | labels: 41 | app.kubernetes.io/name: kube-state-metrics 42 | app.kubernetes.io/instance: "nri-bundle" 43 | spec: 44 | hostNetwork: false 45 | serviceAccountName: nri-bundle-kube-state-metrics 46 | securityContext: 47 | fsGroup: 65534 48 | runAsUser: 65534 49 | containers: 50 | - name: kube-state-metrics 51 | args: 52 | 53 | - --collectors=certificatesigningrequests 54 | 55 | 56 | - --collectors=configmaps 57 | 58 | 59 | - --collectors=cronjobs 60 | 61 | 62 | - --collectors=daemonsets 63 | 64 | 65 | - --collectors=deployments 66 | 67 | 68 | - --collectors=endpoints 69 | 70 | 71 | - --collectors=horizontalpodautoscalers 72 | 73 | 74 | - --collectors=ingresses 75 | 76 | 77 | - --collectors=jobs 78 | 79 | 80 | - --collectors=limitranges 81 | 82 | 83 | - --collectors=mutatingwebhookconfigurations 84 | 85 | 86 | - --collectors=namespaces 87 | 88 | 89 | - --collectors=networkpolicies 90 | 91 | 92 | - --collectors=nodes 93 | 94 | 95 | - --collectors=persistentvolumeclaims 96 | 97 | 98 | - --collectors=persistentvolumes 99 | 100 | 101 | - --collectors=poddisruptionbudgets 102 | 103 | 104 | - --collectors=pods 105 | 106 | 107 | - --collectors=replicasets 108 | 109 | 110 | - --collectors=replicationcontrollers 111 | 112 | 113 | - --collectors=resourcequotas 114 | 115 | 116 | - --collectors=secrets 117 | 118 | 119 | - --collectors=services 120 | 121 | 122 | - --collectors=statefulsets 123 | 124 | 125 | - --collectors=storageclasses 126 | 127 | 128 | - --collectors=validatingwebhookconfigurations 129 | 130 | 131 | - --collectors=verticalpodautoscalers 132 | 133 | 134 | - --collectors=volumeattachments 135 | 136 | 137 | 138 | imagePullPolicy: IfNotPresent 139 | image: "quay.io/coreos/kube-state-metrics:v1.9.5" 140 | ports: 141 | - containerPort: 8080 142 | livenessProbe: 143 | httpGet: 144 | path: /healthz 145 | port: 8080 146 | initialDelaySeconds: 5 147 | timeoutSeconds: 5 148 | readinessProbe: 149 | httpGet: 150 | path: / 151 | port: 8080 152 | initialDelaySeconds: 5 153 | timeoutSeconds: 5 154 | 155 | --- 156 | # Source: nri-bundle/charts/nri-prometheus/templates/clusterrolebinding.yaml 157 | 158 | apiVersion: rbac.authorization.k8s.io/v1 159 | kind: ClusterRoleBinding 160 | metadata: 161 | name: nri-bundle-nri-prometheus 162 | labels: 163 | app.kubernetes.io/name: nri-prometheus 164 | helm.sh/chart: nri-prometheus-1.2.0 165 | app.kubernetes.io/instance: nri-bundle 166 | app.kubernetes.io/version: "2.0.0" 167 | roleRef: 168 | apiGroup: rbac.authorization.k8s.io 169 | kind: ClusterRole 170 | name: nri-bundle-nri-prometheus 171 | subjects: 172 | - kind: ServiceAccount 173 | name: nri-prometheus 174 | namespace: default 175 | --- 176 | # Source: nri-bundle/charts/kube-state-metrics/templates/serviceaccount.yaml 177 | apiVersion: v1 178 | kind: ServiceAccount 179 | metadata: 180 | labels: 181 | app.kubernetes.io/name: kube-state-metrics 182 | helm.sh/chart: kube-state-metrics-2.8.1 183 | app.kubernetes.io/instance: nri-bundle 184 | name: nri-bundle-kube-state-metrics 185 | namespace: default 186 | imagePullSecrets: 187 | [] 188 | 189 | --- 190 | # Source: nri-bundle/charts/nri-kube-events/templates/secret.yaml 191 | 192 | apiVersion: v1 193 | kind: Secret 194 | metadata: 195 | name: nri-bundle-nri-kube-events-config 196 | namespace: default 197 | labels: 198 | app: nri-kube-events 199 | app.kubernetes.io/name: nri-kube-events 200 | helm.sh/chart: nri-kube-events-1.2.1 201 | app.kubernetes.io/instance: nri-bundle 202 | app.kubernetes.io/version: "1.3.0" 203 | type: Opaque 204 | stringData: 205 | licenseKey: {{ newrelic_license_key }} 206 | 207 | --- 208 | # Source: nri-bundle/charts/newrelic-logging/templates/configmap.yaml 209 | apiVersion: v1 210 | kind: ConfigMap 211 | metadata: 212 | labels: 213 | app: newrelic-logging 214 | chart: newrelic-logging-1.1.4 215 | release: nri-bundle 216 | app.kubernetes.io/name: newrelic-logging 217 | name: nri-bundle-newrelic-logging-fluent-bit-config 218 | data: 219 | # Configuration files: server, input, filters and output 220 | # ====================================================== 221 | fluent-bit.conf: | 222 | [SERVICE] 223 | Flush 1 224 | Log_Level ${LOG_LEVEL} 225 | Daemon off 226 | Parsers_File parsers.conf 227 | HTTP_Server On 228 | HTTP_Listen 0.0.0.0 229 | HTTP_Port 2020 230 | 231 | @INCLUDE input-kubernetes.conf 232 | @INCLUDE output-newrelic.conf 233 | @INCLUDE filter-kubernetes.conf 234 | 235 | input-kubernetes.conf: | 236 | [INPUT] 237 | Name tail 238 | Tag kube.* 239 | Path ${PATH} 240 | Parser docker 241 | DB /var/log/flb_kube.db 242 | Mem_Buf_Limit 7MB 243 | Skip_Long_Lines On 244 | Refresh_Interval 10 245 | 246 | filter-kubernetes.conf: | 247 | [FILTER] 248 | Name record_modifier 249 | Match * 250 | Record cluster_name ${CLUSTER_NAME} 251 | 252 | [FILTER] 253 | Name kubernetes 254 | Match kube.* 255 | Kube_URL https://kubernetes.default.svc.cluster.local:443 256 | Merge_JSON_Log Off 257 | 258 | output-newrelic.conf: | 259 | [OUTPUT] 260 | Name newrelic 261 | Match * 262 | licenseKey ${LICENSE_KEY} 263 | endpoint ${ENDPOINT} 264 | 265 | parsers.conf: | 266 | [PARSER] 267 | Name json 268 | Format json 269 | Time_Key time 270 | Time_Format %d/%b/%Y:%H:%M:%S %z 271 | 272 | [PARSER] 273 | Name docker 274 | Format json 275 | Time_Key time 276 | Time_Format %Y-%m-%dT%H:%M:%S.%L 277 | Time_Keep On 278 | # Command | Decoder | Field | Optional Action 279 | # =============|==================|================= 280 | Decode_Field_As escaped log 281 | 282 | --- 283 | # Source: nri-bundle/charts/newrelic-logging/templates/clusterrolebinding.yaml 284 | 285 | apiVersion: rbac.authorization.k8s.io/v1 286 | kind: ClusterRoleBinding 287 | metadata: 288 | labels: 289 | app: newrelic-logging 290 | chart: newrelic-logging-1.1.4 291 | release: nri-bundle 292 | app.kubernetes.io/name: newrelic-logging 293 | name: nri-bundle-newrelic-logging 294 | roleRef: 295 | apiGroup: rbac.authorization.k8s.io 296 | kind: ClusterRole 297 | name: nri-bundle-newrelic-logging 298 | subjects: 299 | - kind: ServiceAccount 300 | name: nri-bundle-newrelic-logging 301 | namespace: default 302 | --- 303 | # Source: nri-bundle/charts/newrelic-infrastructure/templates/secret.yaml 304 | 305 | apiVersion: v1 306 | kind: Secret 307 | metadata: 308 | namespace: default 309 | labels: 310 | app: newrelic-infrastructure 311 | chart: newrelic-infrastructure-1.3.0 312 | release: nri-bundle 313 | mode: privileged 314 | name: nri-bundle-newrelic-infrastructure-config 315 | type: Opaque 316 | stringData: 317 | license: {{ newrelic_license_key }} 318 | 319 | --- 320 | # Source: nri-bundle/charts/nri-metadata-injection/templates/mutationwebhookconfiguration.yaml 321 | apiVersion: admissionregistration.k8s.io/v1beta1 322 | kind: MutatingWebhookConfiguration 323 | metadata: 324 | name: nri-bundle-nri-metadata-injection 325 | labels: 326 | app.kubernetes.io/name: nri-metadata-injection 327 | helm.sh/chart: nri-metadata-injection-1.1.0 328 | app.kubernetes.io/instance: nri-bundle 329 | app.kubernetes.io/version: "1.3.0" 330 | webhooks: 331 | - name: metadata-injection.newrelic.com 332 | clientConfig: 333 | service: 334 | name: nri-bundle-nri-metadata-injection 335 | namespace: default 336 | path: "/mutate" 337 | caBundle: "" 338 | rules: 339 | - operations: [ "CREATE" ] 340 | apiGroups: [""] 341 | apiVersions: ["v1"] 342 | resources: ["pods"] 343 | failurePolicy: Ignore 344 | 345 | --- 346 | # Source: nri-bundle/charts/nri-metadata-injection/templates/job.yaml 347 | 348 | apiVersion: batch/v1 349 | kind: Job 350 | metadata: 351 | name: nri-bundle-nri-metadata-injection-job 352 | namespace: default 353 | labels: 354 | app.kubernetes.io/name: nri-metadata-injection 355 | helm.sh/chart: nri-metadata-injection-1.1.0 356 | app.kubernetes.io/instance: nri-bundle 357 | app.kubernetes.io/version: "1.3.0" 358 | spec: 359 | template: 360 | metadata: 361 | labels: 362 | app.kubernetes.io/name: nri-metadata-injection 363 | helm.sh/chart: nri-metadata-injection-1.1.0 364 | app.kubernetes.io/instance: nri-bundle 365 | app.kubernetes.io/version: "1.3.0" 366 | spec: 367 | serviceAccountName: nri-bundle-nri-metadata-injection 368 | containers: 369 | - name: nri-metadata-injection-job 370 | # This is a minimal kubectl image based on Alpine Linux that sings certificates using the k8s extension api server 371 | image: "newrelic/k8s-webhook-cert-manager:1.3.0" 372 | imagePullPolicy: "IfNotPresent" 373 | command: ["./generate_certificate.sh"] 374 | args: 375 | - "--service" 376 | - nri-bundle-nri-metadata-injection 377 | - "--webhook" 378 | - nri-bundle-nri-metadata-injection 379 | - "--secret" 380 | - nri-bundle-nri-metadata-injection 381 | - "--namespace" 382 | - default 383 | restartPolicy: Never 384 | backoffLimit: 1 385 | 386 | --- 387 | # Source: nri-bundle/charts/nri-kube-events/templates/serviceaccount.yaml 388 | 389 | apiVersion: v1 390 | kind: ServiceAccount 391 | metadata: 392 | labels: 393 | app: nri-kube-events 394 | app.kubernetes.io/name: nri-kube-events 395 | helm.sh/chart: nri-kube-events-1.2.1 396 | app.kubernetes.io/instance: nri-bundle 397 | app.kubernetes.io/version: "1.3.0" 398 | name: nri-bundle-nri-kube-events 399 | namespace: default 400 | --- 401 | # Source: nri-bundle/charts/newrelic-infrastructure/templates/daemonset-windows.yaml 402 | 403 | 404 | --- 405 | # Source: nri-bundle/charts/nri-metadata-injection/templates/clusterrole.yaml 406 | 407 | apiVersion: rbac.authorization.k8s.io/v1 408 | kind: ClusterRole 409 | metadata: 410 | name: nri-bundle-nri-metadata-injection 411 | labels: 412 | app.kubernetes.io/name: nri-metadata-injection 413 | helm.sh/chart: nri-metadata-injection-1.1.0 414 | app.kubernetes.io/instance: nri-bundle 415 | app.kubernetes.io/version: "1.3.0" 416 | rules: 417 | - apiGroups: ["admissionregistration.k8s.io"] 418 | resources: ["mutatingwebhookconfigurations"] 419 | verbs: ["get", "create", "patch"] 420 | - apiGroups: ["certificates.k8s.io"] 421 | resources: ["certificatesigningrequests"] 422 | verbs: ["create", "get", "delete"] 423 | - apiGroups: ["certificates.k8s.io"] 424 | resources: ["certificatesigningrequests/approval"] 425 | verbs: ["update"] 426 | - apiGroups: [""] 427 | resources: ["secrets"] 428 | verbs: ["create", "get", "patch"] 429 | - apiGroups: [""] 430 | resources: ["configmaps"] 431 | verbs: ["get"] 432 | - apiGroups: ["certificates.k8s.io"] 433 | resources: 434 | - "signers" 435 | resourceNames: 436 | - "kubernetes.io/legacy-unknown" 437 | verbs: ["approve"] 438 | 439 | --- 440 | # Source: nri-bundle/charts/newrelic-logging/templates/daemonset.yaml 441 | 442 | apiVersion: apps/v1 443 | #apiVersion: apps/v1 444 | kind: DaemonSet 445 | metadata: 446 | labels: 447 | app: newrelic-logging 448 | chart: newrelic-logging-1.1.4 449 | release: nri-bundle 450 | app.kubernetes.io/name: newrelic-logging 451 | name: nri-bundle-newrelic-logging 452 | spec: 453 | updateStrategy: 454 | type: RollingUpdate 455 | selector: 456 | matchLabels: 457 | app: newrelic-logging 458 | release: nri-bundle 459 | template: 460 | metadata: 461 | labels: 462 | app: newrelic-logging 463 | release: nri-bundle 464 | app.kubernetes.io/name: newrelic-logging 465 | spec: 466 | serviceAccountName: nri-bundle-newrelic-logging 467 | hostNetwork: true # This option is a requirement for the Infrastructure Agent to report the proper hostname in New Relic. 468 | dnsPolicy: ClusterFirstWithHostNet 469 | terminationGracePeriodSeconds: 10 470 | containers: 471 | - name: newrelic-logging 472 | image: "newrelic/newrelic-fluentbit-output:1.3.0" 473 | imagePullPolicy: "IfNotPresent" 474 | env: 475 | - name: ENDPOINT 476 | value: "https://log-api.eu.newrelic.com/log/v1" 477 | - name: SOURCE 478 | value: "kubernetes" 479 | - name: LICENSE_KEY 480 | valueFrom: 481 | secretKeyRef: 482 | name: nri-bundle-newrelic-logging-config 483 | key: license 484 | - name: CLUSTER_NAME 485 | value: 486 | - name: LOG_LEVEL 487 | value: "info" 488 | - name: PATH 489 | value: "/var/log/containers/*.log" 490 | command: 491 | - /fluent-bit/bin/fluent-bit 492 | - -c 493 | - /fluent-bit/etc/fluent-bit.conf 494 | - -e 495 | - /fluent-bit/bin/out_newrelic.so 496 | volumeMounts: 497 | - name: fluent-bit-config 498 | mountPath: /fluent-bit/etc 499 | - name: var 500 | mountPath: /var 501 | resources: 502 | limits: 503 | cpu: 500m 504 | memory: 128Mi 505 | requests: 506 | cpu: 250m 507 | memory: 64Mi 508 | 509 | volumes: 510 | - name: fluent-bit-config 511 | configMap: 512 | name: nri-bundle-newrelic-logging-fluent-bit-config 513 | - name: var 514 | hostPath: 515 | path: /var 516 | tolerations: 517 | - effect: NoSchedule 518 | operator: Exists 519 | - effect: NoExecute 520 | operator: Exists 521 | 522 | 523 | --- 524 | # Source: nri-bundle/charts/newrelic-logging/templates/clusterrole.yaml 525 | 526 | apiVersion: rbac.authorization.k8s.io/v1 527 | kind: ClusterRole 528 | metadata: 529 | labels: 530 | app: newrelic-logging 531 | chart: newrelic-logging-1.1.4 532 | release: nri-bundle 533 | app.kubernetes.io/name: newrelic-logging 534 | name: nri-bundle-newrelic-logging 535 | rules: 536 | - apiGroups: [""] 537 | resources: 538 | - namespaces 539 | - pods 540 | verbs: ["get", "list", "watch"] 541 | --- 542 | # Source: nri-bundle/charts/kube-state-metrics/templates/servicemonitor.yaml 543 | 544 | 545 | --- 546 | # Source: nri-bundle/charts/kube-state-metrics/templates/psp-clusterrole.yaml 547 | 548 | 549 | --- 550 | # Source: nri-bundle/charts/kube-state-metrics/templates/clusterrolebinding.yaml 551 | apiVersion: rbac.authorization.k8s.io/v1beta1 552 | kind: ClusterRoleBinding 553 | metadata: 554 | labels: 555 | app.kubernetes.io/name: kube-state-metrics 556 | helm.sh/chart: kube-state-metrics-2.8.1 557 | app.kubernetes.io/instance: nri-bundle 558 | name: nri-bundle-kube-state-metrics 559 | roleRef: 560 | apiGroup: rbac.authorization.k8s.io 561 | kind: ClusterRole 562 | name: nri-bundle-kube-state-metrics 563 | subjects: 564 | - kind: ServiceAccount 565 | name: nri-bundle-kube-state-metrics 566 | namespace: default 567 | --- 568 | # Source: nri-bundle/charts/nri-kube-events/templates/clusterrolebinding.yaml 569 | 570 | apiVersion: rbac.authorization.k8s.io/v1 571 | kind: ClusterRoleBinding 572 | metadata: 573 | labels: 574 | app: nri-kube-events 575 | app.kubernetes.io/name: nri-kube-events 576 | helm.sh/chart: nri-kube-events-1.2.1 577 | app.kubernetes.io/instance: nri-bundle 578 | app.kubernetes.io/version: "1.3.0" 579 | name: nri-bundle-nri-kube-events 580 | roleRef: 581 | apiGroup: rbac.authorization.k8s.io 582 | kind: ClusterRole 583 | name: nri-bundle-nri-kube-events 584 | subjects: 585 | - kind: ServiceAccount 586 | name: nri-bundle-nri-kube-events 587 | namespace: default 588 | --- 589 | # Source: nri-bundle/charts/nri-kube-events/templates/configmap.yaml 590 | apiVersion: v1 591 | kind: ConfigMap 592 | data: 593 | config.yaml: |- 594 | sinks: 595 | - name: newRelicInfra 596 | config: 597 | agentEndpoint: http://localhost:8001/v1/data 598 | clusterName: platform 599 | agentHTTPTimeout: 30s 600 | metadata: 601 | name: nri-bundle-nri-kube-events-config 602 | namespace: default 603 | labels: 604 | app: nri-kube-events 605 | app.kubernetes.io/name: nri-kube-events 606 | helm.sh/chart: nri-kube-events-1.2.1 607 | app.kubernetes.io/instance: nri-bundle 608 | app.kubernetes.io/version: "1.3.0" 609 | 610 | --- 611 | # Source: nri-bundle/charts/newrelic-infrastructure/templates/clusterrole.yaml 612 | 613 | apiVersion: rbac.authorization.k8s.io/v1 614 | kind: ClusterRole 615 | metadata: 616 | labels: 617 | app: newrelic-infrastructure 618 | chart: newrelic-infrastructure-1.3.0 619 | release: nri-bundle 620 | mode: privileged 621 | name: nri-bundle-newrelic-infrastructure 622 | rules: 623 | - apiGroups: [""] 624 | resources: 625 | - "nodes" 626 | - "nodes/metrics" 627 | - "nodes/stats" 628 | - "nodes/proxy" 629 | - "pods" 630 | - "services" 631 | - "secrets" 632 | verbs: ["get", "list"] 633 | - nonResourceURLs: ["/metrics"] 634 | verbs: ["get"] 635 | --- 636 | # Source: nri-bundle/charts/kube-state-metrics/templates/stsdiscovery-role.yaml 637 | 638 | 639 | --- 640 | # Source: nri-bundle/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml 641 | 642 | 643 | --- 644 | # Source: nri-bundle/charts/nri-prometheus/templates/clusterrole.yaml 645 | 646 | apiVersion: rbac.authorization.k8s.io/v1 647 | kind: ClusterRole 648 | metadata: 649 | name: nri-bundle-nri-prometheus 650 | labels: 651 | app.kubernetes.io/name: nri-prometheus 652 | helm.sh/chart: nri-prometheus-1.2.0 653 | app.kubernetes.io/instance: nri-bundle 654 | app.kubernetes.io/version: "2.0.0" 655 | rules: 656 | - apiGroups: [""] 657 | resources: 658 | - "nodes" 659 | - "nodes/metrics" 660 | - "nodes/stats" 661 | - "nodes/proxy" 662 | - "pods" 663 | - "services" 664 | verbs: ["get", "list", "watch"] 665 | - nonResourceURLs: 666 | - /metrics 667 | verbs: 668 | - get 669 | --- 670 | # Source: nri-bundle/charts/nri-metadata-injection/templates/service.yaml 671 | apiVersion: v1 672 | kind: Service 673 | metadata: 674 | name: nri-bundle-nri-metadata-injection 675 | namespace: default 676 | labels: 677 | app.kubernetes.io/name: nri-metadata-injection 678 | helm.sh/chart: nri-metadata-injection-1.1.0 679 | app.kubernetes.io/instance: nri-bundle 680 | app.kubernetes.io/version: "1.3.0" 681 | spec: 682 | ports: 683 | - port: 443 684 | targetPort: 8443 685 | selector: 686 | app.kubernetes.io/name: nri-metadata-injection 687 | 688 | --- 689 | # Source: nri-bundle/charts/newrelic-infrastructure/templates/podsecuritypolicy.yaml 690 | 691 | 692 | --- 693 | # Source: nri-bundle/charts/kube-state-metrics/templates/podsecuritypolicy.yaml 694 | 695 | 696 | --- 697 | # Source: nri-bundle/charts/nri-prometheus/templates/deployment.yaml 698 | 699 | apiVersion: apps/v1 700 | kind: Deployment 701 | metadata: 702 | name: nri-bundle-nri-prometheus 703 | namespace: default 704 | labels: 705 | app.kubernetes.io/name: nri-prometheus 706 | helm.sh/chart: nri-prometheus-1.2.0 707 | app.kubernetes.io/instance: nri-bundle 708 | app.kubernetes.io/version: "2.0.0" 709 | spec: 710 | replicas: 1 711 | selector: 712 | matchLabels: 713 | app.kubernetes.io/name: nri-prometheus 714 | template: 715 | metadata: 716 | labels: 717 | app.kubernetes.io/name: nri-prometheus 718 | helm.sh/chart: nri-prometheus-1.2.0 719 | app.kubernetes.io/instance: nri-bundle 720 | app.kubernetes.io/version: "2.0.0" 721 | spec: 722 | serviceAccountName: nri-prometheus 723 | containers: 724 | - name: nri-prometheus 725 | image: newrelic/nri-prometheus:2.0.0 726 | args: 727 | - "--configfile=/etc/nri-prometheus/config.yaml" 728 | ports: 729 | - containerPort: 8080 730 | volumeMounts: 731 | - name: config-volume 732 | mountPath: /etc/nri-prometheus/ 733 | env: 734 | - name: "LICENSE_KEY" 735 | valueFrom: 736 | secretKeyRef: 737 | name: nri-bundle-nri-prometheus-config 738 | key: licenseKey 739 | - name: "BEARER_TOKEN_FILE" 740 | value: "/var/run/secrets/kubernetes.io/serviceaccount/token" 741 | - name: "CA_FILE" 742 | value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" 743 | volumes: 744 | - name: config-volume 745 | configMap: 746 | name: nri-bundle-nri-prometheus-config 747 | 748 | --- 749 | # Source: nri-bundle/charts/kube-state-metrics/templates/clusterrole.yaml 750 | apiVersion: rbac.authorization.k8s.io/v1beta1 751 | kind: ClusterRole 752 | metadata: 753 | labels: 754 | app.kubernetes.io/name: kube-state-metrics 755 | helm.sh/chart: kube-state-metrics-2.8.1 756 | app.kubernetes.io/instance: nri-bundle 757 | name: nri-bundle-kube-state-metrics 758 | rules: 759 | 760 | - apiGroups: ["certificates.k8s.io"] 761 | resources: 762 | - certificatesigningrequests 763 | verbs: ["list", "watch"] 764 | 765 | - apiGroups: [""] 766 | resources: 767 | - configmaps 768 | verbs: ["list", "watch"] 769 | 770 | - apiGroups: ["batch"] 771 | resources: 772 | - cronjobs 773 | verbs: ["list", "watch"] 774 | 775 | - apiGroups: ["extensions", "apps"] 776 | resources: 777 | - daemonsets 778 | verbs: ["list", "watch"] 779 | 780 | - apiGroups: ["extensions", "apps"] 781 | resources: 782 | - deployments 783 | verbs: ["list", "watch"] 784 | 785 | - apiGroups: [""] 786 | resources: 787 | - endpoints 788 | verbs: ["list", "watch"] 789 | 790 | - apiGroups: ["autoscaling"] 791 | resources: 792 | - horizontalpodautoscalers 793 | verbs: ["list", "watch"] 794 | 795 | - apiGroups: ["extensions", "networking.k8s.io"] 796 | resources: 797 | - ingresses 798 | verbs: ["list", "watch"] 799 | 800 | - apiGroups: ["batch"] 801 | resources: 802 | - jobs 803 | verbs: ["list", "watch"] 804 | 805 | - apiGroups: [""] 806 | resources: 807 | - limitranges 808 | verbs: ["list", "watch"] 809 | 810 | - apiGroups: ["admissionregistration.k8s.io"] 811 | resources: 812 | - mutatingwebhookconfigurations 813 | verbs: ["list", "watch"] 814 | 815 | - apiGroups: [""] 816 | resources: 817 | - namespaces 818 | verbs: ["list", "watch"] 819 | 820 | - apiGroups: ["networking.k8s.io"] 821 | resources: 822 | - networkpolicies 823 | verbs: ["list", "watch"] 824 | 825 | - apiGroups: [""] 826 | resources: 827 | - nodes 828 | verbs: ["list", "watch"] 829 | 830 | - apiGroups: [""] 831 | resources: 832 | - persistentvolumeclaims 833 | verbs: ["list", "watch"] 834 | 835 | - apiGroups: [""] 836 | resources: 837 | - persistentvolumes 838 | verbs: ["list", "watch"] 839 | 840 | - apiGroups: ["policy"] 841 | resources: 842 | - poddisruptionbudgets 843 | verbs: ["list", "watch"] 844 | 845 | - apiGroups: [""] 846 | resources: 847 | - pods 848 | verbs: ["list", "watch"] 849 | 850 | - apiGroups: ["extensions", "apps"] 851 | resources: 852 | - replicasets 853 | verbs: ["list", "watch"] 854 | 855 | - apiGroups: [""] 856 | resources: 857 | - replicationcontrollers 858 | verbs: ["list", "watch"] 859 | 860 | - apiGroups: [""] 861 | resources: 862 | - resourcequotas 863 | verbs: ["list", "watch"] 864 | 865 | - apiGroups: [""] 866 | resources: 867 | - secrets 868 | verbs: ["list", "watch"] 869 | 870 | - apiGroups: [""] 871 | resources: 872 | - services 873 | verbs: ["list", "watch"] 874 | 875 | - apiGroups: ["apps"] 876 | resources: 877 | - statefulsets 878 | verbs: ["list", "watch"] 879 | 880 | - apiGroups: ["storage.k8s.io"] 881 | resources: 882 | - storageclasses 883 | verbs: ["list", "watch"] 884 | 885 | - apiGroups: ["admissionregistration.k8s.io"] 886 | resources: 887 | - validatingwebhookconfigurations 888 | verbs: ["list", "watch"] 889 | 890 | - apiGroups: ["storage.k8s.io"] 891 | resources: 892 | - volumeattachments 893 | verbs: ["list", "watch"] 894 | 895 | - apiGroups: ["autoscaling.k8s.io"] 896 | resources: 897 | - verticalpodautoscalers 898 | verbs: ["list", "watch"] 899 | 900 | --- 901 | # Source: nri-bundle/charts/newrelic-infrastructure/templates/clusterrolebinding.yaml 902 | 903 | apiVersion: rbac.authorization.k8s.io/v1 904 | kind: ClusterRoleBinding 905 | metadata: 906 | labels: 907 | app: newrelic-infrastructure 908 | chart: newrelic-infrastructure-1.3.0 909 | release: nri-bundle 910 | mode: privileged 911 | name: nri-bundle-newrelic-infrastructure 912 | roleRef: 913 | apiGroup: rbac.authorization.k8s.io 914 | kind: ClusterRole 915 | name: nri-bundle-newrelic-infrastructure 916 | subjects: 917 | - kind: ServiceAccount 918 | name: nri-bundle-newrelic-infrastructure 919 | namespace: default 920 | --- 921 | # Source: nri-bundle/charts/nri-kube-events/templates/deployment.yaml 922 | 923 | apiVersion: apps/v1 924 | kind: Deployment 925 | metadata: 926 | name: nri-bundle-nri-kube-events 927 | namespace: default 928 | labels: 929 | app: nri-kube-events 930 | app.kubernetes.io/name: nri-kube-events 931 | helm.sh/chart: nri-kube-events-1.2.1 932 | app.kubernetes.io/instance: nri-bundle 933 | app.kubernetes.io/version: "1.3.0" 934 | spec: 935 | replicas: 1 936 | selector: 937 | matchLabels: 938 | app.kubernetes.io/name: nri-kube-events 939 | template: 940 | metadata: 941 | labels: 942 | app: nri-kube-events 943 | app.kubernetes.io/name: nri-kube-events 944 | helm.sh/chart: nri-kube-events-1.2.1 945 | app.kubernetes.io/instance: nri-bundle 946 | app.kubernetes.io/version: "1.3.0" 947 | spec: 948 | containers: 949 | - name: kube-events 950 | image: newrelic/nri-kube-events:1.3.0 951 | imagePullPolicy: IfNotPresent 952 | args: ["-config", "/app/config/config.yaml", "-loglevel", "debug"] 953 | volumeMounts: 954 | - name: config-volume 955 | mountPath: /app/config 956 | - name: infra-agent 957 | image: newrelic/k8s-events-forwarder:1.12.0 958 | securityContext: 959 | privileged: false 960 | runAsUser: 1000 # nri-kube-events 961 | runAsNonRoot: false 962 | allowPrivilegeEscalation: false 963 | readOnlyRootFilesystem: true 964 | ports: 965 | - containerPort: 8001 966 | env: 967 | - name: NRIA_LICENSE_KEY 968 | valueFrom: 969 | secretKeyRef: 970 | name: nri-bundle-nri-kube-events-config 971 | key: licenseKey 972 | volumeMounts: 973 | - mountPath: /var/db/newrelic-infra/data 974 | name: tmpfs-data 975 | - mountPath: /var/db/newrelic-infra/user_data 976 | name: tmpfs-user-data 977 | - mountPath: /tmp 978 | name: tmpfs-tmp 979 | serviceAccountName: nri-bundle-nri-kube-events 980 | volumes: 981 | - name: config-volume 982 | configMap: 983 | name: nri-bundle-nri-kube-events-config 984 | - name: tmpfs-data 985 | emptyDir: {} 986 | - name: tmpfs-user-data 987 | emptyDir: {} 988 | - name: tmpfs-tmp 989 | emptyDir: {} 990 | 991 | --- 992 | # Source: nri-bundle/charts/newrelic-logging/templates/secret.yaml 993 | 994 | apiVersion: v1 995 | kind: Secret 996 | metadata: 997 | labels: 998 | app: newrelic-logging 999 | chart: newrelic-logging-1.1.4 1000 | release: nri-bundle 1001 | app.kubernetes.io/name: newrelic-logging 1002 | name: nri-bundle-newrelic-logging-config 1003 | type: Opaque 1004 | stringData: 1005 | license: {{ newrelic_license_key }} 1006 | 1007 | --- 1008 | # Source: nri-bundle/charts/kube-state-metrics/templates/service.yaml 1009 | apiVersion: v1 1010 | kind: Service 1011 | metadata: 1012 | name: nri-bundle-kube-state-metrics 1013 | namespace: default 1014 | labels: 1015 | app.kubernetes.io/name: kube-state-metrics 1016 | helm.sh/chart: "kube-state-metrics-2.8.1" 1017 | app.kubernetes.io/instance: "nri-bundle" 1018 | annotations: 1019 | prometheus.io/scrape: 'true' 1020 | spec: 1021 | type: "ClusterIP" 1022 | ports: 1023 | - name: "http" 1024 | protocol: TCP 1025 | port: 8080 1026 | targetPort: 8080 1027 | selector: 1028 | app.kubernetes.io/name: kube-state-metrics 1029 | app.kubernetes.io/instance: nri-bundle 1030 | 1031 | --- 1032 | # Source: nri-bundle/charts/nri-prometheus/templates/secret.yaml 1033 | 1034 | apiVersion: v1 1035 | kind: Secret 1036 | metadata: 1037 | name: nri-bundle-nri-prometheus-config 1038 | namespace: default 1039 | labels: 1040 | app.kubernetes.io/name: nri-prometheus 1041 | helm.sh/chart: nri-prometheus-1.2.0 1042 | app.kubernetes.io/instance: nri-bundle 1043 | app.kubernetes.io/version: "2.0.0" 1044 | type: Opaque 1045 | stringData: 1046 | licenseKey: {{ newrelic_license_key }} 1047 | 1048 | --- 1049 | # Source: nri-bundle/charts/newrelic-logging/templates/serviceaccount.yaml 1050 | 1051 | apiVersion: v1 1052 | kind: ServiceAccount 1053 | metadata: 1054 | labels: 1055 | app: newrelic-logging 1056 | chart: newrelic-logging-1.1.4 1057 | release: "nri-bundle" 1058 | name: nri-bundle-newrelic-logging 1059 | --- 1060 | # Source: nri-bundle/charts/nri-kube-events/templates/clusterrole.yaml 1061 | 1062 | apiVersion: rbac.authorization.k8s.io/v1 1063 | kind: ClusterRole 1064 | metadata: 1065 | labels: 1066 | app: nri-kube-events 1067 | app.kubernetes.io/name: nri-kube-events 1068 | helm.sh/chart: nri-kube-events-1.2.1 1069 | app.kubernetes.io/instance: nri-bundle 1070 | app.kubernetes.io/version: "1.3.0" 1071 | name: nri-bundle-nri-kube-events 1072 | rules: 1073 | - apiGroups: [""] 1074 | resources: ["events"] 1075 | verbs: ["get", "watch", "list"] 1076 | --- 1077 | # Source: nri-bundle/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml 1078 | 1079 | 1080 | --- 1081 | # Source: nri-bundle/charts/nri-metadata-injection/templates/serviceaccount.yaml 1082 | 1083 | apiVersion: v1 1084 | kind: ServiceAccount 1085 | metadata: 1086 | name: nri-bundle-nri-metadata-injection 1087 | namespace: default 1088 | labels: 1089 | app.kubernetes.io/name: nri-metadata-injection 1090 | helm.sh/chart: nri-metadata-injection-1.1.0 1091 | app.kubernetes.io/instance: nri-bundle 1092 | app.kubernetes.io/version: "1.3.0" 1093 | --- 1094 | # Source: nri-bundle/charts/nri-prometheus/templates/configmap.yaml 1095 | kind: ConfigMap 1096 | metadata: 1097 | name: nri-bundle-nri-prometheus-config 1098 | namespace: default 1099 | labels: 1100 | app.kubernetes.io/name: nri-prometheus 1101 | helm.sh/chart: nri-prometheus-1.2.0 1102 | app.kubernetes.io/instance: nri-bundle 1103 | app.kubernetes.io/version: "2.0.0" 1104 | apiVersion: v1 1105 | data: 1106 | config.yaml: | 1107 | cluster_name: platform 1108 | require_scrape_enabled_label_for_nodes: true 1109 | 1110 | 1111 | --- 1112 | # Source: nri-bundle/charts/nri-metadata-injection/templates/deployment.yaml 1113 | 1114 | apiVersion: apps/v1 1115 | kind: Deployment 1116 | metadata: 1117 | name: nri-bundle-nri-metadata-injection 1118 | namespace: default 1119 | labels: 1120 | app.kubernetes.io/name: nri-metadata-injection 1121 | helm.sh/chart: nri-metadata-injection-1.1.0 1122 | app.kubernetes.io/instance: nri-bundle 1123 | app.kubernetes.io/version: "1.3.0" 1124 | spec: 1125 | replicas: 1 1126 | selector: 1127 | matchLabels: 1128 | app.kubernetes.io/name: nri-metadata-injection 1129 | template: 1130 | metadata: 1131 | labels: 1132 | app.kubernetes.io/name: nri-metadata-injection 1133 | helm.sh/chart: nri-metadata-injection-1.1.0 1134 | app.kubernetes.io/instance: nri-bundle 1135 | app.kubernetes.io/version: "1.3.0" 1136 | spec: 1137 | serviceAccountName: nri-bundle-nri-metadata-injection 1138 | containers: 1139 | - name: nri-metadata-injection 1140 | image: "newrelic/k8s-metadata-injection:1.3.0" 1141 | imagePullPolicy: "IfNotPresent" 1142 | env: 1143 | - name: clusterName 1144 | value: platform 1145 | volumeMounts: 1146 | - name: tls-key-cert-pair 1147 | mountPath: /etc/tls-key-cert-pair 1148 | readinessProbe: 1149 | httpGet: 1150 | path: /health 1151 | port: 8080 1152 | initialDelaySeconds: 1 1153 | periodSeconds: 1 1154 | resources: 1155 | limits: 1156 | memory: 80M 1157 | requests: 1158 | cpu: 100m 1159 | memory: 30M 1160 | 1161 | volumes: 1162 | - name: tls-key-cert-pair 1163 | secret: 1164 | secretName: nri-bundle-nri-metadata-injection 1165 | 1166 | --- 1167 | # Source: nri-bundle/charts/newrelic-infrastructure/templates/serviceaccount.yaml 1168 | 1169 | apiVersion: v1 1170 | kind: ServiceAccount 1171 | metadata: 1172 | namespace: default 1173 | labels: 1174 | app: newrelic-infrastructure 1175 | chart: newrelic-infrastructure-1.3.0 1176 | release: "nri-bundle" 1177 | name: nri-bundle-newrelic-infrastructure 1178 | --- 1179 | # Source: nri-bundle/charts/nri-prometheus/templates/serviceaccount.yaml 1180 | 1181 | apiVersion: v1 1182 | kind: ServiceAccount 1183 | metadata: 1184 | name: nri-prometheus 1185 | namespace: default 1186 | labels: 1187 | app.kubernetes.io/name: nri-prometheus 1188 | helm.sh/chart: nri-prometheus-1.2.0 1189 | app.kubernetes.io/instance: nri-bundle 1190 | app.kubernetes.io/version: "2.0.0" 1191 | --- 1192 | # Source: nri-bundle/charts/newrelic-infrastructure/templates/daemonset.yaml 1193 | 1194 | apiVersion: apps/v1 1195 | kind: DaemonSet 1196 | metadata: 1197 | namespace: default 1198 | labels: 1199 | app: newrelic-infrastructure 1200 | chart: newrelic-infrastructure-1.3.0 1201 | release: nri-bundle 1202 | mode: privileged 1203 | name: nri-bundle-newrelic-infrastructure 1204 | spec: 1205 | updateStrategy: 1206 | type: RollingUpdate 1207 | selector: 1208 | matchLabels: 1209 | app: newrelic-infrastructure 1210 | release: nri-bundle 1211 | template: 1212 | metadata: 1213 | labels: 1214 | app: newrelic-infrastructure 1215 | release: nri-bundle 1216 | mode: privileged 1217 | spec: 1218 | serviceAccountName: nri-bundle-newrelic-infrastructure 1219 | hostNetwork: true 1220 | dnsPolicy: ClusterFirstWithHostNet 1221 | containers: 1222 | - name: newrelic-infrastructure 1223 | image: "newrelic/infrastructure-k8s:1.26.0" 1224 | imagePullPolicy: "IfNotPresent" 1225 | securityContext: 1226 | privileged: true 1227 | env: 1228 | - name: NRIA_LICENSE_KEY 1229 | valueFrom: 1230 | secretKeyRef: 1231 | name: nri-bundle-newrelic-infrastructure-config 1232 | key: license 1233 | - name: "CLUSTER_NAME" 1234 | value: platform 1235 | - name: ETCD_TLS_SECRET_NAMESPACE 1236 | value: "default" 1237 | - name: "NRIA_DISPLAY_NAME" 1238 | valueFrom: 1239 | fieldRef: 1240 | apiVersion: "v1" 1241 | fieldPath: "spec.nodeName" 1242 | - name: "NRK8S_NODE_NAME" 1243 | valueFrom: 1244 | fieldRef: 1245 | apiVersion: "v1" 1246 | fieldPath: "spec.nodeName" 1247 | - name: "NRIA_CUSTOM_ATTRIBUTES" 1248 | value: '{"clusterName":"$(CLUSTER_NAME)"}' 1249 | - name: "NRIA_PASSTHROUGH_ENVIRONMENT" 1250 | value: "KUBERNETES_SERVICE_HOST,KUBERNETES_SERVICE_PORT,CLUSTER_NAME,CADVISOR_PORT,NRK8S_NODE_NAME,KUBE_STATE_METRICS_URL,KUBE_STATE_METRICS_POD_LABEL,TIMEOUT,ETCD_TLS_SECRET_NAME,ETCD_TLS_SECRET_NAMESPACE,API_SERVER_SECURE_PORT,KUBE_STATE_METRICS_SCHEME,KUBE_STATE_METRICS_PORT,SCHEDULER_ENDPOINT_URL,ETCD_ENDPOINT_URL,CONTROLLER_MANAGER_ENDPOINT_URL,API_SERVER_ENDPOINT_URL,DISABLE_KUBE_STATE_METRICS" 1251 | volumeMounts: 1252 | - name: dev 1253 | mountPath: /dev 1254 | - name: host-docker-socket 1255 | mountPath: /var/run/docker.sock 1256 | - name: log 1257 | mountPath: /var/log 1258 | - name: host-volume 1259 | mountPath: /host 1260 | readOnly: true 1261 | resources: 1262 | limits: 1263 | memory: 300M 1264 | requests: 1265 | cpu: 100m 1266 | memory: 150M 1267 | 1268 | volumes: 1269 | - name: dev 1270 | hostPath: 1271 | path: /dev 1272 | - name: host-docker-socket 1273 | hostPath: 1274 | path: /var/run/docker.sock 1275 | - name: log 1276 | hostPath: 1277 | path: /var/log 1278 | - name: host-volume 1279 | hostPath: 1280 | path: / 1281 | tolerations: 1282 | - effect: NoSchedule 1283 | operator: Exists 1284 | - effect: NoExecute 1285 | operator: Exists 1286 | 1287 | 1288 | --- 1289 | # Source: nri-bundle/charts/newrelic-infrastructure/templates/configmap.yaml 1290 | --------------------------------------------------------------------------------