├── README.md ├── calico ├── README.md ├── networkpolicy-allow-egress.yml ├── networkpolicy-isolation-egress.yml ├── networkpolicy-isolation.yml ├── networkpolicy-nginx.yml └── nginx.yml ├── cert-manager ├── README.md ├── certificate-prod.yml ├── certificate-staging.yml ├── issuer-prod.yml ├── issuer-staging.yml ├── myapp-ingress.yml └── myapp.yml ├── dex ├── README.md ├── configmap-ldap.yaml ├── dex-ns.yaml ├── dex.yaml ├── gencert-ldap.sh ├── gencert.sh ├── ldap │ ├── certinfo.ldif │ └── users.ldif └── user.yaml ├── helm ├── README.md └── rbac-config.yml ├── istio └── README.md ├── openshift └── README.md ├── rook ├── README.md ├── fs-demo.yaml ├── mysql-demo.yaml ├── rook-cluster.yaml ├── rook-storageclass-fs.yaml ├── rook-storageclass-objectstore.yaml ├── rook-storageclass.yaml └── rook-tools.yaml ├── scripts ├── create-user.sh ├── install-kubernetes.sh └── install-node.sh └── vault ├── README.md ├── etcd-operator-deploy.yaml ├── etcd-rbac.yaml ├── etcd_crds.yaml ├── example_vault.yaml ├── policy.hcl ├── vault-deployment.yaml ├── vault-rbac.yaml └── vault_crd.yaml /README.md: -------------------------------------------------------------------------------- 1 | # on-prem-or-cloud-agnostic-kubernetes 2 | Setting up and running an on-prem or cloud agnostic Kubernetes cluster 3 | 4 | This is the course material for the On-Prem or Cloud Agnostic Udemy Course on Udemy (see https://www.udemy.com/learn-devops-on-prem-or-cloud-agnostic-kubernetes/?couponCode=KUBERNETES_GIT) 5 | -------------------------------------------------------------------------------- /calico/README.md: -------------------------------------------------------------------------------- 1 | # Network policy 2 | 3 | # ingress 4 | 5 | ``` 6 | kubectl create -f nginx.yml 7 | kubectl create -f networkpolicy-isolation.yml 8 | kubectl create -f networkpolicy-nginx.yml 9 | ``` 10 | 11 | ``` 12 | kubectl run -it --rm -l app=access-nginx --image busybox busybox 13 | ``` 14 | 15 | ## egress 16 | ``` 17 | kubectl replace -f networkpolicy-isolation.yml 18 | kubectl create -f networkpolicy-allow-egress.yml 19 | ``` 20 | -------------------------------------------------------------------------------- /calico/networkpolicy-allow-egress.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-google 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | app: egress 9 | policyTypes: 10 | - Egress 11 | egress: 12 | - to: 13 | - ipBlock: 14 | cidr: 8.8.8.8/32 15 | --- 16 | apiVersion: networking.k8s.io/v1 17 | kind: NetworkPolicy 18 | metadata: 19 | name: allow-dns 20 | spec: 21 | podSelector: 22 | matchLabels: 23 | app: egress 24 | policyTypes: 25 | - Egress 26 | egress: 27 | - to: 28 | # allow DNS resolution 29 | ports: 30 | - port: 53 31 | protocol: UDP 32 | - port: 53 33 | protocol: TCP 34 | -------------------------------------------------------------------------------- /calico/networkpolicy-isolation-egress.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: default-deny 5 | spec: 6 | podSelector: {} 7 | policyTypes: 8 | - Ingress 9 | - Egress 10 | -------------------------------------------------------------------------------- /calico/networkpolicy-isolation.yml: -------------------------------------------------------------------------------- 1 | kind: NetworkPolicy 2 | apiVersion: networking.k8s.io/v1 3 | metadata: 4 | name: default-deny 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: {} 9 | -------------------------------------------------------------------------------- /calico/networkpolicy-nginx.yml: -------------------------------------------------------------------------------- 1 | kind: NetworkPolicy 2 | apiVersion: networking.k8s.io/v1 3 | metadata: 4 | name: access-nginx 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: nginx 10 | ingress: 11 | - from: 12 | #- ipBlock: 13 | # cidr: 172.17.0.0/16 14 | - podSelector: 15 | matchLabels: 16 | app: access-nginx 17 | ports: 18 | - protocol: TCP 19 | port: 80 20 | -------------------------------------------------------------------------------- /calico/nginx.yml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | labels: 10 | app: nginx 11 | spec: 12 | containers: 13 | - name: nginx 14 | image: nginx 15 | ports: 16 | - name: http-port 17 | containerPort: 80 18 | --- 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: nginx 23 | labels: 24 | app: nginx 25 | spec: 26 | ports: 27 | - port: 80 28 | nodePort: 31001 29 | targetPort: http-port 30 | protocol: TCP 31 | selector: 32 | app: nginx 33 | type: NodePort 34 | -------------------------------------------------------------------------------- /cert-manager/README.md: -------------------------------------------------------------------------------- 1 | # install nginx ingress 2 | 3 | ``` 4 | helm install --name my-ingress stable/nginx-ingress \ 5 | --set controller.kind=DaemonSet \ 6 | --set controller.service.type=NodePort \ 7 | --set controller.hostNetwork=true 8 | ``` 9 | 10 | # start myapp 11 | 12 | Create myapp and add an ingress rule: 13 | 14 | ``` 15 | kubectl create -f myapp.yml 16 | kubectl create -f myapp-ingress.yml 17 | ``` 18 | 19 | # install cert-manager 20 | 21 | ``` 22 | helm install \ 23 | --name cert-manager \ 24 | --namespace kube-system \ 25 | stable/cert-manager 26 | ``` 27 | -------------------------------------------------------------------------------- /cert-manager/certificate-prod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: certmanager.k8s.io/v1alpha1 2 | kind: Certificate 3 | metadata: 4 | name: myapp 5 | namespace: default 6 | spec: 7 | secretName: myapp-tls-prod 8 | issuerRef: 9 | name: myapp-letsncrypt-prod 10 | commonName: myapp.newtech.academy 11 | #dnsNames: 12 | #- www.myapp.newtech.academy 13 | acme: 14 | config: 15 | - http01: 16 | ingress: myapp 17 | domains: 18 | - myapp.newtech.academy 19 | #- www.myapp.newtech.academy 20 | -------------------------------------------------------------------------------- /cert-manager/certificate-staging.yml: -------------------------------------------------------------------------------- 1 | apiVersion: certmanager.k8s.io/v1alpha1 2 | kind: Certificate 3 | metadata: 4 | name: myapp 5 | namespace: default 6 | spec: 7 | secretName: myapp-tls-staging 8 | issuerRef: 9 | name: myapp-letsncrypt-staging 10 | commonName: myapp.newtech.academy 11 | #dnsNames: 12 | #- www.myapp.newtech.academy 13 | acme: 14 | config: 15 | - http01: 16 | ingress: myapp 17 | domains: 18 | - myapp.newtech.academy 19 | #- www.myapp.newtech.academy 20 | -------------------------------------------------------------------------------- /cert-manager/issuer-prod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: certmanager.k8s.io/v1alpha1 2 | kind: Issuer 3 | metadata: 4 | name: myapp-letsncrypt-prod 5 | spec: 6 | acme: 7 | # The ACME server URL 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | # Email address used for ACME registration 10 | email: your@email.inv 11 | # Name of a secret used to store the ACME account private key 12 | privateKeySecretRef: 13 | name: myapp-letsncrypt-prod 14 | # Enable HTTP01 validations 15 | http01: {} 16 | -------------------------------------------------------------------------------- /cert-manager/issuer-staging.yml: -------------------------------------------------------------------------------- 1 | apiVersion: certmanager.k8s.io/v1alpha1 2 | kind: Issuer 3 | metadata: 4 | name: myapp-letsncrypt-staging 5 | namespace: default 6 | spec: 7 | acme: 8 | # The ACME server URL 9 | server: https://acme-staging-v02.api.letsencrypt.org/directory 10 | # Email address used for ACME registration 11 | email: your@email.inv 12 | # Name of a secret used to store the ACME account private key 13 | privateKeySecretRef: 14 | name: myapp-letsncrypt-staging 15 | # Enable HTTP01 validations 16 | http01: {} 17 | -------------------------------------------------------------------------------- /cert-manager/myapp-ingress.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | kubernetes.io/ingress.class: nginx 6 | name: myapp 7 | namespace: default 8 | spec: 9 | #tls: 10 | #- secretName: myapp-tls-staging 11 | # hosts: 12 | # - myapp.newtech.academy 13 | rules: 14 | - host: myapp.newtech.academy 15 | http: 16 | paths: 17 | - path: / 18 | pathType: Prefix 19 | backend: 20 | service: 21 | name: myapp 22 | port: 23 | number: 3000 24 | -------------------------------------------------------------------------------- /cert-manager/myapp.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: myapp 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: myapp 10 | template: 11 | metadata: 12 | labels: 13 | app: myapp 14 | spec: 15 | containers: 16 | - name: k8s-demo 17 | image: wardviaene/k8s-demo 18 | ports: 19 | - containerPort: 3000 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: myapp 25 | spec: 26 | ports: 27 | - port: 3000 28 | targetPort: 3000 29 | protocol: TCP 30 | selector: 31 | app: myapp 32 | -------------------------------------------------------------------------------- /dex/README.md: -------------------------------------------------------------------------------- 1 | # Install dex 2 | 3 | Create certificate: 4 | ``` 5 | ./gencert.sh 6 | kubectl create -f dex-ns.yaml 7 | kubectl create secret tls dex.newtech.academy.tls -n dex --cert=ssl/cert.pem --key=ssl/key.pem 8 | sudo cp ssl/ca.pem /etc/kubernetes/pki/openid-ca.pem 9 | ``` 10 | 11 | Create secret: 12 | ``` 13 | kubectl create secret \ 14 | generic github-client \ 15 | -n dex \ 16 | --from-literal=client-id=$GITHUB_CLIENT_ID \ 17 | --from-literal=client-secret=$GITHUB_CLIENT_SECRET 18 | ``` 19 | 20 | kube-apiserver manifest file changes ( /etc/kubernetes/manifests/kube-apiserver.yaml): 21 | ``` 22 | - --oidc-issuer-url=https://dex.newtech.academy:32000 23 | - --oidc-client-id=example-app 24 | - --oidc-ca-file=/etc/kubernetes/pki/openid-ca.pem 25 | - --oidc-username-claim=email 26 | - --oidc-groups-claim=groups 27 | ``` 28 | 29 | deploy: 30 | ``` 31 | kubectl create -f dex.yaml 32 | ``` 33 | 34 | deploy example app: 35 | ``` 36 | sudo apt-get install make golang-1.9 37 | git clone https://github.com/coreos/dex.git 38 | cd dex 39 | git checkout v2.10.0 40 | export PATH=$PATH:/usr/lib/go-1.9/bin 41 | go get github.com/coreos/dex 42 | make bin/example-app 43 | export MY_IP=$(curl -s ifconfig.co) 44 | ./bin/example-app --issuer https://dex.newtech.academy:32000 --issuer-root-ca /etc/kubernetes/pki/openid-ca.pem --listen http://${MY_IP}:5555 --redirect-uri http://${MY_IP}:5555/callback 45 | ``` 46 | 47 | # Add user: 48 | ``` 49 | kubectl create -f user.yaml 50 | #kubectl config set-credentials developer --token ${TOKEN} 51 | kubectl config set-credentials developer --auth-provider=oidc --auth-provider-arg=idp-issuer-url=https://dex.newtech.academy:32000 --auth-provider-arg=client-id=example-app --auth-provider-arg=idp-certificate-authority=/etc/kubernetes/pki/openid-ca.pem --auth-provider-arg=id-token=${TOKEN} 52 | kubectl config set-context dev-default --cluster=kubernetes --namespace=default --user=developer 53 | kubectl config use-context dev-default 54 | ``` 55 | 56 | # Auto-renewal of token 57 | For autorenewal, you need to share the client secret with the end-user (not recommended) 58 | ``` 59 | kubectl config set-credentials developer --auth-provider=oidc --auth-provider-arg=idp-issuer-url=https://dex.newtech.academy:32000 --auth-provider-arg=client-id=example-app --auth-provider-arg=idp-certificate-authority=/etc/kubernetes/pki/openid-ca.pem --auth-provider-arg=id-token=${TOKEN} --auth-provider-arg=refresh-token=${REFRESH_TOKEN} --auth-provider-arg=client-secret=${CLIENT_SECRET} 60 | ``` 61 | 62 | # LDAP config 63 | 64 | ``` 65 | sudo apt-get -y install slapd ldap-utils gnutls-bin ssl-cert 66 | sudo dpkg-reconfigure slapd 67 | ./gencert-ldap.sh 68 | sudo ldapmodify -H ldapi:// -Y EXTERNAL -f ldap/certinfo.ldif 69 | ldapadd -x -D cn=admin,dc=example,dc=com -W -f ldap/users.ldif 70 | ``` 71 | 72 | Edit (with sudo) /etc/default/slapd 73 | ``` 74 | SLAPD_SERVICES="ldap:/// ldapi:/// ldaps:///" 75 | ``` 76 | and run: 77 | 78 | ``` 79 | sudo systemctl restart slapd.service 80 | ``` 81 | 82 | create LDAP CA secret and change configmap 83 | ``` 84 | cat /etc/ssl/certs/cacert.pem 85 | kubectl edit configmap ldap-tls -n dex 86 | kubectl apply -f configmap-ldap.yaml 87 | kubectl edit deploy dex -n dex # edit the ldap IP alias 88 | ``` 89 | 90 | 91 | -------------------------------------------------------------------------------- /dex/configmap-ldap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dex 5 | namespace: dex 6 | data: 7 | config.yaml: | 8 | issuer: https://dex.newtech.academy:32000 9 | storage: 10 | type: kubernetes 11 | config: 12 | inCluster: true 13 | web: 14 | https: 0.0.0.0:5556 15 | tlsCert: /etc/dex/tls/tls.crt 16 | tlsKey: /etc/dex/tls/tls.key 17 | connectors: 18 | - type: github 19 | id: github 20 | name: GitHub 21 | config: 22 | clientID: $GITHUB_CLIENT_ID 23 | clientSecret: $GITHUB_CLIENT_SECRET 24 | redirectURI: https://dex.newtech.academy:32000/callback 25 | org: kubernetes 26 | - type: ldap 27 | id: ldap 28 | name: LDAP 29 | config: 30 | host: ldap01.example.com:636 31 | rootCA: /etc/dex/ldap-tls/cacert.pem 32 | 33 | # The DN and password for an application service account. The connector uses 34 | # these credentials to search for users and groups. Not required if the LDAP 35 | # server provides access for anonymous auth. 36 | # Please note that if the bind password contains a `$`, it has to be saved in an 37 | # environment variable which should be given as the value to `bindPW`. 38 | bindDN: uid=serviceaccount,ou=People,dc=example,dc=com 39 | bindPW: serviceaccountldap 40 | 41 | # The attribute to display in the provided password prompt. If unset, will 42 | # display "Username" 43 | usernamePrompt: SSO Username 44 | 45 | # User search maps a username and password entered by a user to a LDAP entry. 46 | userSearch: 47 | # BaseDN to start the search from. It will translate to the query 48 | # "(&(objectClass=person)(uid=))". 49 | baseDN: ou=People,dc=example,dc=com 50 | # Optional filter to apply when searching the directory. 51 | filter: "(objectClass=inetOrgPerson)" 52 | 53 | # username attribute used for comparing user entries. This will be translated 54 | # and combined with the other filter as "(=)". 55 | username: uid 56 | # The following three fields are direct mappings of attributes on the user entry. 57 | # String representation of the user. 58 | idAttr: uid 59 | # Required. Attribute to map to Email. 60 | emailAttr: mail 61 | # Maps to display name of users. No default value. 62 | nameAttr: cn 63 | 64 | # Group search queries for groups given a user entry. 65 | groupSearch: 66 | # BaseDN to start the search from. It will translate to the query 67 | # "(&(objectClass=group)(member=))". 68 | baseDN: ou=Groups,dc=example,dc=com 69 | # Optional filter to apply when searching the directory. 70 | filter: "(objectClass=group)" 71 | 72 | # Following two fields are used to match a user to a group. It adds an additional 73 | # requirement to the filter that an attribute in the group must match the user's 74 | # attribute value. 75 | userAttr: uid 76 | groupAttr: member 77 | 78 | # Represents group name. 79 | nameAttr: name 80 | oauth2: 81 | skipApprovalScreen: true 82 | staticClients: 83 | - id: example-app 84 | redirectURIs: 85 | - 'http://178.62.90.238:5555/callback' 86 | name: 'Example App' 87 | secret: ZXhhbXBsZS1hcHAtc2VjcmV0 88 | enablePasswordDB: false 89 | -------------------------------------------------------------------------------- /dex/dex-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: dex 5 | -------------------------------------------------------------------------------- /dex/dex.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: dex 5 | namespace: dex 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1beta1 8 | kind: ClusterRole 9 | metadata: 10 | name: dex 11 | namespace: dex 12 | rules: 13 | - apiGroups: ["dex.coreos.com"] # API group created by dex 14 | resources: ["*"] 15 | verbs: ["*"] 16 | - apiGroups: ["apiextensions.k8s.io"] 17 | resources: ["customresourcedefinitions"] 18 | verbs: ["create"] # To manage its own resources identity must be able to create customresourcedefinitions. 19 | --- 20 | apiVersion: rbac.authorization.k8s.io/v1beta1 21 | kind: ClusterRoleBinding 22 | metadata: 23 | name: dex 24 | roleRef: 25 | apiGroup: rbac.authorization.k8s.io 26 | kind: ClusterRole 27 | name: dex 28 | subjects: 29 | - kind: ServiceAccount 30 | name: dex # Service account assigned to the dex pod. 31 | namespace: dex # The namespace dex is running in. 32 | --- 33 | apiVersion: apps/v1 34 | kind: Deployment 35 | metadata: 36 | labels: 37 | app: dex 38 | name: dex 39 | namespace: dex 40 | spec: 41 | replicas: 3 42 | selector: 43 | matchLabels: 44 | app: dex 45 | template: 46 | metadata: 47 | labels: 48 | app: dex 49 | spec: 50 | serviceAccountName: dex 51 | hostAliases: 52 | - ip: "127.1.2.3" 53 | hostnames: 54 | - "ldap01.example.com" 55 | containers: 56 | - image: ghcr.io/dexidp/dex:v2.30.0 57 | name: dex 58 | command: ["/usr/local/bin/dex", "serve", "/etc/dex/cfg/config.yaml"] 59 | 60 | ports: 61 | - name: https 62 | containerPort: 5556 63 | 64 | volumeMounts: 65 | - name: config 66 | mountPath: /etc/dex/cfg 67 | - name: tls 68 | mountPath: /etc/dex/tls 69 | - name: ldap-tls 70 | mountPath: /etc/dex/ldap-tls 71 | 72 | env: 73 | - name: GITHUB_CLIENT_ID 74 | valueFrom: 75 | secretKeyRef: 76 | name: github-client 77 | key: client-id 78 | - name: GITHUB_CLIENT_SECRET 79 | valueFrom: 80 | secretKeyRef: 81 | name: github-client 82 | key: client-secret 83 | volumes: 84 | - name: config 85 | configMap: 86 | name: dex 87 | items: 88 | - key: config.yaml 89 | path: config.yaml 90 | - name: tls 91 | secret: 92 | secretName: dex.newtech.academy.tls 93 | - name: ldap-tls 94 | configMap: 95 | name: ldap-tls 96 | items: 97 | - key: cacert.pem 98 | path: cacert.pem 99 | --- 100 | kind: ConfigMap 101 | apiVersion: v1 102 | metadata: 103 | name: ldap-tls 104 | namespace: dex 105 | data: 106 | cacert.pem: | 107 | empty 108 | --- 109 | kind: ConfigMap 110 | apiVersion: v1 111 | metadata: 112 | name: dex 113 | namespace: dex 114 | data: 115 | config.yaml: | 116 | issuer: https://dex.newtech.academy:32000 117 | storage: 118 | type: kubernetes 119 | config: 120 | inCluster: true 121 | web: 122 | https: 0.0.0.0:5556 123 | tlsCert: /etc/dex/tls/tls.crt 124 | tlsKey: /etc/dex/tls/tls.key 125 | connectors: 126 | - type: github 127 | id: github 128 | name: GitHub 129 | config: 130 | clientID: $GITHUB_CLIENT_ID 131 | clientSecret: $GITHUB_CLIENT_SECRET 132 | redirectURI: https://dex.newtech.academy:32000/callback 133 | org: kubernetes 134 | oauth2: 135 | skipApprovalScreen: true 136 | 137 | staticClients: 138 | - id: example-app 139 | redirectURIs: 140 | - 'https://dex.newtech.academy:32000/callback' 141 | - 'http://178.62.90.238:5555/callback' 142 | name: 'Example App' 143 | secret: ZXhhbXBsZS1hcHAtc2VjcmV0 144 | enablePasswordDB: false 145 | --- 146 | apiVersion: v1 147 | kind: Service 148 | metadata: 149 | name: dex 150 | namespace: dex 151 | spec: 152 | type: NodePort 153 | ports: 154 | - name: dex 155 | port: 5556 156 | protocol: TCP 157 | targetPort: 5556 158 | nodePort: 32000 159 | selector: 160 | app: dex 161 | -------------------------------------------------------------------------------- /dex/gencert-ldap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # from https://help.ubuntu.com/lts/serverguide/openldap-server.html 4 | 5 | set -x 6 | 7 | sudo sh -c "certtool --generate-privkey > /etc/ssl/private/cakey.pem" 8 | 9 | echo 'cn = Example Company 10 | ca 11 | cert_signing_key 12 | ' > /tmp/ca.info 13 | 14 | sudo mv /tmp/ca.info /etc/ssl/ca.info 15 | 16 | sudo certtool --generate-self-signed \ 17 | --load-privkey /etc/ssl/private/cakey.pem \ 18 | --template /etc/ssl/ca.info \ 19 | --outfile /etc/ssl/certs/cacert.pem 20 | 21 | sudo certtool --generate-privkey \ 22 | --bits 1024 \ 23 | --outfile /etc/ssl/private/ldap01_slapd_key.pem 24 | 25 | echo 'organization = Example Company 26 | cn = ldap01.example.com 27 | tls_www_server 28 | encryption_key 29 | signing_key 30 | expiration_days = 3650' > /tmp/ldap01.info 31 | 32 | sudo mv /tmp/ldap01.info /etc/ssl/ldap01.info 33 | 34 | sudo certtool --generate-certificate \ 35 | --load-privkey /etc/ssl/private/ldap01_slapd_key.pem \ 36 | --load-ca-certificate /etc/ssl/certs/cacert.pem \ 37 | --load-ca-privkey /etc/ssl/private/cakey.pem \ 38 | --template /etc/ssl/ldap01.info \ 39 | --outfile /etc/ssl/certs/ldap01_slapd_cert.pem 40 | 41 | sudo chgrp openldap /etc/ssl/private/ldap01_slapd_key.pem 42 | sudo chmod 0640 /etc/ssl/private/ldap01_slapd_key.pem 43 | sudo gpasswd -a openldap ssl-cert 44 | 45 | sudo sh -c "cat /etc/ssl/certs/cacert.pem >> /etc/ssl/certs/ca-certificates.crt" 46 | 47 | sudo systemctl restart slapd.service 48 | 49 | -------------------------------------------------------------------------------- /dex/gencert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p ssl 4 | 5 | cat << EOF > ssl/req.cnf 6 | [req] 7 | req_extensions = v3_req 8 | distinguished_name = req_distinguished_name 9 | 10 | [req_distinguished_name] 11 | 12 | [ v3_req ] 13 | basicConstraints = CA:FALSE 14 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment 15 | subjectAltName = @alt_names 16 | 17 | [alt_names] 18 | DNS.1 = dex.newtech.academy 19 | EOF 20 | 21 | openssl genrsa -out ssl/ca-key.pem 2048 22 | openssl req -x509 -new -nodes -key ssl/ca-key.pem -days 10 -out ssl/ca.pem -subj "/CN=kube-ca" 23 | 24 | openssl genrsa -out ssl/key.pem 2048 25 | openssl req -new -key ssl/key.pem -out ssl/csr.pem -subj "/CN=kube-ca" -config ssl/req.cnf 26 | openssl x509 -req -in ssl/csr.pem -CA ssl/ca.pem -CAkey ssl/ca-key.pem -CAcreateserial -out ssl/cert.pem -days 10 -extensions v3_req -extfile ssl/req.cnf 27 | 28 | -------------------------------------------------------------------------------- /dex/ldap/certinfo.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=config 2 | add: olcTLSCACertificateFile 3 | olcTLSCACertificateFile: /etc/ssl/certs/cacert.pem 4 | - 5 | add: olcTLSCertificateFile 6 | olcTLSCertificateFile: /etc/ssl/certs/ldap01_slapd_cert.pem 7 | - 8 | add: olcTLSCertificateKeyFile 9 | olcTLSCertificateKeyFile: /etc/ssl/private/ldap01_slapd_key.pem 10 | 11 | -------------------------------------------------------------------------------- /dex/ldap/users.ldif: -------------------------------------------------------------------------------- 1 | dn: ou=People,dc=example,dc=com 2 | objectClass: organizationalUnit 3 | ou: People 4 | 5 | dn: ou=Groups,dc=example,dc=com 6 | objectClass: organizationalUnit 7 | ou: Groups 8 | 9 | dn: cn=miners,ou=Groups,dc=example,dc=com 10 | objectClass: posixGroup 11 | cn: miners 12 | gidNumber: 5000 13 | 14 | dn: uid=john,ou=People,dc=example,dc=com 15 | objectClass: inetOrgPerson 16 | objectClass: posixAccount 17 | objectClass: shadowAccount 18 | uid: john 19 | sn: Doe 20 | givenName: John 21 | cn: John Doe 22 | displayName: John Doe 23 | uidNumber: 10000 24 | gidNumber: 5000 25 | userPassword: johnldap 26 | gecos: John Doe 27 | mail: john@doe.inv 28 | loginShell: /bin/bash 29 | homeDirectory: /home/john 30 | 31 | dn: uid=serviceaccount,ou=People,dc=example,dc=com 32 | objectClass: inetOrgPerson 33 | objectClass: posixAccount 34 | objectClass: shadowAccount 35 | uid: serviceaccount 36 | sn: serviceaccount 37 | givenName: serviceaccount 38 | cn: service account 39 | displayName: service account 40 | uidNumber: 99999 41 | gidNumber: 9999 42 | userPassword: serviceaccountldap 43 | gecos: Service Account 44 | loginShell: /bin/false 45 | homeDirectory: /home/serviceaccount 46 | -------------------------------------------------------------------------------- /dex/user.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: Role 3 | metadata: 4 | name: exampleUser 5 | namespace: default 6 | rules: 7 | - apiGroups: [""] # "" indicates the core API group 8 | resources: ["pods"] 9 | verbs: ["get", "watch", "list"] 10 | --- 11 | apiVersion: rbac.authorization.k8s.io/v1beta1 12 | kind: RoleBinding 13 | metadata: 14 | name: exampleUser 15 | roleRef: 16 | apiGroup: rbac.authorization.k8s.io 17 | kind: Role 18 | name: exampleUser 19 | subjects: 20 | - kind: User 21 | name: your@email.inv 22 | namespace: default 23 | -------------------------------------------------------------------------------- /helm/README.md: -------------------------------------------------------------------------------- 1 | # install helm 2 | 3 | You can download the latest release from https://github.com/kubernetes/helm/releases or enter the following command to install helm locally: 4 | 5 | ``` 6 | curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash 7 | ``` 8 | 9 | # init helm 10 | 11 | ``` 12 | helm init --service-account tiller 13 | ``` 14 | -------------------------------------------------------------------------------- /helm/rbac-config.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: tiller 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1beta1 8 | kind: ClusterRoleBinding 9 | metadata: 10 | name: tiller 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: cluster-admin 15 | subjects: 16 | - kind: ServiceAccount 17 | name: tiller 18 | namespace: kube-system 19 | -------------------------------------------------------------------------------- /istio/README.md: -------------------------------------------------------------------------------- 1 | # istio install 2 | 3 | download (0.7.1): 4 | ``` 5 | wget https://github.com/istio/istio/releases/download/0.7.1/istio-0.7.1-linux.tar.gz 6 | tar -xzvf istio-0.7.1-linux.tar.gz 7 | cd istio-0.7.1 8 | echo 'export PATH="$PATH:/home/ubuntu/istio-0.7.1/bin"' >> ~/.profile 9 | ``` 10 | 11 | Download (latest): 12 | ``` 13 | curl -L https://git.io/getLatestIstio | sh - 14 | echo 'export PATH="$PATH:/home/ubuntu/istio-0.7.1/bin"' >> ~/.profile # change 0.7.1 in your version 15 | cd istio-0.7.1 # change 0.7.1 in your version 16 | ``` 17 | 18 | with no mutual TLS authentication 19 | ``` 20 | kubectl apply -f install/kubernetes/istio.yaml 21 | ``` 22 | 23 | or with mutual TLS authentication 24 | ``` 25 | kubectl apply -f install/kubernetes/istio-auth.yaml 26 | ``` 27 | 28 | Example app (from istio) 29 | ``` 30 | kubectl edit svc istio-ingress -n istio-system # change loadbalancer to nodeport (or use hostport) 31 | export PATH="$PATH:/home/ubuntu/istio-0.7.1/bin" 32 | kubectl apply -f <(istioctl kube-inject --debug -f samples/bookinfo/kube/bookinfo.yaml) 33 | ``` 34 | 35 | 36 | # Traffic management 37 | 38 | Add default route to v1: 39 | ``` 40 | istioctl create -f samples/bookinfo/routing/route-rule-all-v1.yaml 41 | ``` 42 | 43 | Route traffic to v2 if rule matches 44 | ``` 45 | istioctl replace -f samples/bookinfo/routing/route-rule-reviews-test-v2.yaml 46 | ``` 47 | 48 | Route 50% of traffic between v1 and v3: 49 | ``` 50 | istioctl replace -f samples/bookinfo/routing/route-rule-reviews-50-v3.yaml 51 | ``` 52 | 53 | # Distributed tracing 54 | 55 | Enable zipkin: 56 | ``` 57 | kubectl apply -f install/kubernetes/addons/zipkin.yaml 58 | ``` 59 | 60 | Enable Jaeger: 61 | ``` 62 | kubectl delete -f install/kubernetes/addons/zipkin.yaml # if zipkin was installed, delete it first 63 | kubectl apply -n istio-system -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml 64 | 65 | ``` 66 | 67 | -------------------------------------------------------------------------------- /openshift/README.md: -------------------------------------------------------------------------------- 1 | # Install Docker 2 | ``` 3 | yum -y install docker 4 | systemctl enable docker 5 | systemctl start docker 6 | ``` 7 | 8 | # set insecure registry 9 | ``` 10 | echo '{ 11 | "insecure-registries": [ 12 | "172.30.0.0/16" 13 | ] 14 | }' > /etc/docker/daemon.json 15 | systemctl daemon-reload 16 | systemctl restart docker 17 | ``` 18 | 19 | 20 | # install oc, cluster up 21 | ``` 22 | curl -o ~/openshift-origin-client-tools-v3.9.0-191fece-linux-64bit.tar.gz -L https://github.com/openshift/origin/releases/download/v3.9.0/openshift-origin-client-tools-v3.9.0-191fece-linux-64bit.tar.gz 23 | cd ~ 24 | tar -xzvf openshift-origin-client-tools-v3.9.0-191fece-linux-64bit.tar.gz 25 | export PATH=$PATH:~/openshift-origin-client-tools-v3.9.0-191fece-linux-64bit 26 | echo 'export PATH=$PATH:~/openshift-origin-client-tools-v3.9.0-191fece-linux-64bit' >> .bash_profile 27 | oc cluster up --public-hostname=$(curl -s ifconfig.co) --host-data-dir=/data 28 | ``` 29 | -------------------------------------------------------------------------------- /rook/README.md: -------------------------------------------------------------------------------- 1 | # Rook 2 | 3 | Note: When creating kubernetes nodes, ensure you have one or more free devices to use. When using DigitalOcean, you can add an unformatted volume to each node droplet. 4 | 5 | Examples from https://github.com/rook/rook/tree/master/cluster/examples/kubernetes 6 | 7 | Install rook: 8 | ``` 9 | kubectl create -f https://raw.githubusercontent.com/rook/rook/v1.12.2/deploy/examples/crds.yaml 10 | kubectl create -f https://raw.githubusercontent.com/rook/rook/v1.12.2/deploy/examples/common.yaml 11 | kubectl create -f https://raw.githubusercontent.com/rook/rook/v1.12.2/deploy/examples/operator.yaml 12 | kubectl create -f rook-cluster.yaml 13 | ``` 14 | 15 | Storage: 16 | ``` 17 | kubectl create -f rook-storageclass.yaml 18 | ``` 19 | 20 | Rook tools: 21 | ``` 22 | kubectl create -f rook-tools.yaml 23 | ``` 24 | Note: use ceph status instead of rookctl status 25 | 26 | MySQL demo: 27 | ``` 28 | kubectl create -f mysql-demo.yaml 29 | ``` 30 | 31 | # object storage 32 | 33 | Create object storage: 34 | ``` 35 | kubectl create -f 36 | ``` 37 | 38 | Create user: 39 | ``` 40 | radosgw-admin user create --uid rook-user --display-name "A rook rgw User" --rgw-realm=my-store --rgw-zonegroup=my-store 41 | ``` 42 | 43 | Export variables 44 | ``` 45 | export AWS_HOST= 46 | export AWS_ENDPOINT= 47 | export AWS_ACCESS_KEY_ID= 48 | export AWS_SECRET_ACCESS_KEY= 49 | ``` 50 | -------------------------------------------------------------------------------- /rook/fs-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: ubuntu 5 | spec: 6 | containers: 7 | - image: ubuntu:latest 8 | name: ubuntu 9 | command: [ "/bin/bash", "-c", "--" ] 10 | args: [ "while true; do sleep 300; done;" ] 11 | volumeMounts: 12 | - name: fs-store 13 | mountPath: /data 14 | volumes: 15 | - name: fs-store 16 | flexVolume: 17 | driver: rook.io/rook 18 | fsType: ceph 19 | options: 20 | fsName: myfs 21 | clusterNamespace: rook 22 | clusterName: rook 23 | 24 | -------------------------------------------------------------------------------- /rook/mysql-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: demo-mysql 5 | labels: 6 | app: demo 7 | spec: 8 | ports: 9 | - port: 3306 10 | selector: 11 | app: demo 12 | tier: mysql 13 | clusterIP: None 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: mysql-pv-claim 19 | labels: 20 | app: demo 21 | spec: 22 | storageClassName: rook-block 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 10Gi 28 | --- 29 | apiVersion: apps/v1 30 | kind: Deployment 31 | metadata: 32 | name: demo-mysql 33 | labels: 34 | app: demo 35 | spec: 36 | selector: 37 | matchLabels: 38 | app: demo 39 | strategy: 40 | type: Recreate 41 | template: 42 | metadata: 43 | labels: 44 | app: demo 45 | tier: mysql 46 | spec: 47 | containers: 48 | - image: mysql:5.6 49 | name: mysql 50 | env: 51 | - name: MYSQL_ROOT_PASSWORD 52 | value: changeme 53 | ports: 54 | - containerPort: 3306 55 | name: mysql 56 | volumeMounts: 57 | - name: mysql-persistent-storage 58 | mountPath: /var/lib/mysql 59 | volumes: 60 | - name: mysql-persistent-storage 61 | persistentVolumeClaim: 62 | claimName: mysql-pv-claim 63 | -------------------------------------------------------------------------------- /rook/rook-cluster.yaml: -------------------------------------------------------------------------------- 1 | ################################################################################################################# 2 | # Define the settings for the rook-ceph cluster with common settings for a production cluster. 3 | # All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required 4 | # in this example. See the documentation for more details on storage settings available. 5 | 6 | # For example, to create the cluster: 7 | # kubectl create -f crds.yaml -f common.yaml -f operator.yaml 8 | # kubectl create -f cluster.yaml 9 | ################################################################################################################# 10 | 11 | apiVersion: ceph.rook.io/v1 12 | kind: CephCluster 13 | metadata: 14 | name: rook-ceph 15 | namespace: rook-ceph # namespace:cluster 16 | spec: 17 | cephVersion: 18 | # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). 19 | # v16 is Pacific, and v17 is Quincy. 20 | # RECOMMENDATION: In production, use a specific version tag instead of the general v17 flag, which pulls the latest release and could result in different 21 | # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. 22 | # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v17.2.3-20220805 23 | # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities 24 | image: quay.io/ceph/ceph:v17.2.5 25 | # Whether to allow unsupported versions of Ceph. Currently `pacific` and `quincy` are supported. 26 | # Future versions such as `reef` (v18) would require this to be set to `true`. 27 | # Do not set to true in production. 28 | allowUnsupported: false 29 | # The path on the host where configuration files will be persisted. Must be specified. 30 | # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. 31 | # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. 32 | dataDirHostPath: /var/lib/rook 33 | # Whether or not upgrade should continue even if a check fails 34 | # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise 35 | # Use at your OWN risk 36 | # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades 37 | skipUpgradeChecks: false 38 | # Whether or not continue if PGs are not clean during an upgrade 39 | continueUpgradeAfterChecksEvenIfNotHealthy: false 40 | # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart. 41 | # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one 42 | # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would 43 | # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. 44 | # The default wait timeout is 10 minutes. 45 | waitTimeoutForHealthyOSDInMinutes: 10 46 | mon: 47 | # Set the number of mons to be started. Generally recommended to be 3. 48 | # For highest availability, an odd number of mons should be specified. 49 | count: 3 50 | # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. 51 | # Mons should only be allowed on the same node for test environments where data loss is acceptable. 52 | allowMultiplePerNode: false 53 | mgr: 54 | # When higher availability of the mgr is needed, increase the count to 2. 55 | # In that case, one mgr will be active and one in standby. When Ceph updates which 56 | # mgr is active, Rook will update the mgr services to match the active mgr. 57 | count: 2 58 | allowMultiplePerNode: false 59 | modules: 60 | # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules 61 | # are already enabled by other settings in the cluster CR. 62 | - name: pg_autoscaler 63 | enabled: true 64 | # enable the ceph dashboard for viewing cluster status 65 | dashboard: 66 | enabled: true 67 | # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) 68 | # urlPrefix: /ceph-dashboard 69 | # serve the dashboard at the given port. 70 | # port: 8443 71 | # serve the dashboard using SSL 72 | ssl: true 73 | # enable prometheus alerting for cluster 74 | monitoring: 75 | # requires Prometheus to be pre-installed 76 | enabled: false 77 | network: 78 | connections: 79 | # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network. 80 | # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted. 81 | # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check. 82 | # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only, 83 | # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class. 84 | # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes. 85 | encryption: 86 | enabled: false 87 | # Whether to compress the data in transit across the wire. The default is false. 88 | # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption. 89 | compression: 90 | enabled: false 91 | # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled 92 | # and clients will be required to connect to the Ceph cluster with the v2 port (3300). 93 | # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer). 94 | requireMsgr2: false 95 | # enable host networking 96 | #provider: host 97 | # enable the Multus network provider 98 | #provider: multus 99 | #selectors: 100 | # The selector keys are required to be `public` and `cluster`. 101 | # Based on the configuration, the operator will do the following: 102 | # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface 103 | # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' 104 | # 105 | # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus 106 | # 107 | #public: public-conf --> NetworkAttachmentDefinition object name in Multus 108 | #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus 109 | # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 110 | #ipFamily: "IPv6" 111 | # Ceph daemons to listen on both IPv4 and Ipv6 networks 112 | #dualStack: false 113 | # Enable multiClusterService to export the mon and OSD services to peer cluster. 114 | # This is useful to support RBD mirroring between two clusters having overlapping CIDRs. 115 | # Ensure that peer clusters are connected using an MCS API compatible application, like Globalnet Submariner. 116 | #multiClusterService: 117 | # enabled: false 118 | 119 | # enable the crash collector for ceph daemon crash collection 120 | crashCollector: 121 | disable: false 122 | # Uncomment daysToRetain to prune ceph crash entries older than the 123 | # specified number of days. 124 | #daysToRetain: 30 125 | # enable log collector, daemons will log on files and rotate 126 | logCollector: 127 | enabled: true 128 | periodicity: daily # one of: hourly, daily, weekly, monthly 129 | maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M. 130 | # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. 131 | cleanupPolicy: 132 | # Since cluster cleanup is destructive to data, confirmation is required. 133 | # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". 134 | # This value should only be set when the cluster is about to be deleted. After the confirmation is set, 135 | # Rook will immediately stop configuring the cluster and only wait for the delete command. 136 | # If the empty string is set, Rook will not destroy any data on hosts during uninstall. 137 | confirmation: "" 138 | # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion 139 | sanitizeDisks: 140 | # method indicates if the entire disk should be sanitized or simply ceph's metadata 141 | # in both case, re-install is possible 142 | # possible choices are 'complete' or 'quick' (default) 143 | method: quick 144 | # dataSource indicate where to get random bytes from to write on the disk 145 | # possible choices are 'zero' (default) or 'random' 146 | # using random sources will consume entropy from the system and will take much more time then the zero source 147 | dataSource: zero 148 | # iteration overwrite N times instead of the default (1) 149 | # takes an integer value 150 | iteration: 1 151 | # allowUninstallWithVolumes defines how the uninstall should be performed 152 | # If set to true, cephCluster deletion does not wait for the PVs to be deleted. 153 | allowUninstallWithVolumes: false 154 | # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. 155 | # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and 156 | # tolerate taints with a key of 'storage-node'. 157 | # placement: 158 | # all: 159 | # nodeAffinity: 160 | # requiredDuringSchedulingIgnoredDuringExecution: 161 | # nodeSelectorTerms: 162 | # - matchExpressions: 163 | # - key: role 164 | # operator: In 165 | # values: 166 | # - storage-node 167 | # podAffinity: 168 | # podAntiAffinity: 169 | # topologySpreadConstraints: 170 | # tolerations: 171 | # - key: storage-node 172 | # operator: Exists 173 | # The above placement information can also be specified for mon, osd, and mgr components 174 | # mon: 175 | # Monitor deployments may contain an anti-affinity rule for avoiding monitor 176 | # collocation on the same node. This is a required rule when host network is used 177 | # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a 178 | # preferred rule with weight: 50. 179 | # osd: 180 | # prepareosd: 181 | # mgr: 182 | # cleanup: 183 | annotations: 184 | # all: 185 | # mon: 186 | # osd: 187 | # cleanup: 188 | # prepareosd: 189 | # clusterMetadata annotations will be applied to only `rook-ceph-mon-endpoints` configmap and the `rook-ceph-mon` and `rook-ceph-admin-keyring` secrets. 190 | # And clusterMetadata annotations will not be merged with `all` annotations. 191 | # clusterMetadata: 192 | # kubed.appscode.com/sync: "true" 193 | # If no mgr annotations are set, prometheus scrape annotations will be set by default. 194 | # mgr: 195 | labels: 196 | # all: 197 | # mon: 198 | # osd: 199 | # cleanup: 200 | # mgr: 201 | # prepareosd: 202 | # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator. 203 | # These labels can be passed as LabelSelector to Prometheus 204 | # monitoring: 205 | # crashcollector: 206 | resources: 207 | #The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory 208 | # mgr: 209 | # limits: 210 | # cpu: "500m" 211 | # memory: "1024Mi" 212 | # requests: 213 | # cpu: "500m" 214 | # memory: "1024Mi" 215 | # The above example requests/limits can also be added to the other components 216 | # mon: 217 | # osd: 218 | # For OSD it also is a possible to specify requests/limits based on device class 219 | # osd-hdd: 220 | # osd-ssd: 221 | # osd-nvme: 222 | # prepareosd: 223 | # mgr-sidecar: 224 | # crashcollector: 225 | # logcollector: 226 | # cleanup: 227 | # The option to automatically remove OSDs that are out and are safe to destroy. 228 | removeOSDsIfOutAndSafeToRemove: false 229 | priorityClassNames: 230 | #all: rook-ceph-default-priority-class 231 | mon: system-node-critical 232 | osd: system-node-critical 233 | mgr: system-cluster-critical 234 | #crashcollector: rook-ceph-crashcollector-priority-class 235 | storage: # cluster level storage configuration and selection 236 | useAllNodes: true 237 | useAllDevices: true 238 | #deviceFilter: 239 | config: 240 | # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map 241 | # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. 242 | # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB 243 | # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller 244 | # osdsPerDevice: "1" # this value can be overridden at the node or device level 245 | # encryptedDevice: "true" # the default value for this option is "false" 246 | # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named 247 | # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. 248 | # nodes: 249 | # - name: "172.17.4.201" 250 | # devices: # specific devices to use for storage can be specified for each node 251 | # - name: "sdb" 252 | # - name: "nvme01" # multiple osds can be created on high performance devices 253 | # config: 254 | # osdsPerDevice: "5" 255 | # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths 256 | # config: # configuration can be specified at the node level which overrides the cluster level config 257 | # - name: "172.17.4.301" 258 | # deviceFilter: "^sd." 259 | # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd 260 | onlyApplyOSDPlacement: false 261 | # The section for configuring management of daemon disruptions during upgrade or fencing. 262 | disruptionManagement: 263 | # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically 264 | # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will 265 | # block eviction of OSDs by default and unblock them safely when drains are detected. 266 | managePodBudgets: true 267 | # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the 268 | # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. 269 | osdMaintenanceTimeout: 30 270 | # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. 271 | # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. 272 | # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. 273 | pgHealthCheckTimeout: 0 274 | 275 | # healthChecks 276 | # Valid values for daemons are 'mon', 'osd', 'status' 277 | healthCheck: 278 | daemonHealth: 279 | mon: 280 | disabled: false 281 | interval: 45s 282 | osd: 283 | disabled: false 284 | interval: 60s 285 | status: 286 | disabled: false 287 | interval: 60s 288 | # Change pod liveness probe timing or threshold values. Works for all mon,mgr,osd daemons. 289 | livenessProbe: 290 | mon: 291 | disabled: false 292 | mgr: 293 | disabled: false 294 | osd: 295 | disabled: false 296 | # Change pod startup probe timing or threshold values. Works for all mon,mgr,osd daemons. 297 | startupProbe: 298 | mon: 299 | disabled: false 300 | mgr: 301 | disabled: false 302 | osd: 303 | disabled: false 304 | -------------------------------------------------------------------------------- /rook/rook-storageclass-fs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rook.io/v1alpha1 2 | kind: Filesystem 3 | metadata: 4 | name: myfs 5 | namespace: rook 6 | spec: 7 | metadataPool: 8 | replicated: 9 | size: 3 10 | dataPools: 11 | - erasureCoded: 12 | dataChunks: 2 13 | codingChunks: 1 14 | metadataServer: 15 | activeCount: 1 16 | activeStandby: true 17 | -------------------------------------------------------------------------------- /rook/rook-storageclass-objectstore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rook.io/v1alpha1 2 | kind: ObjectStore 3 | metadata: 4 | name: my-store 5 | namespace: rook 6 | spec: 7 | metadataPool: 8 | replicated: 9 | size: 3 10 | dataPool: 11 | erasureCoded: 12 | dataChunks: 2 13 | codingChunks: 1 14 | gateway: 15 | type: s3 16 | sslCertificateRef: 17 | port: 80 18 | securePort: 19 | instances: 1 20 | allNodes: false 21 | -------------------------------------------------------------------------------- /rook/rook-storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ceph.rook.io/v1 2 | kind: CephBlockPool 3 | metadata: 4 | name: replicapool 5 | namespace: rook-ceph 6 | spec: 7 | failureDomain: host 8 | replicated: 9 | size: 2 10 | # For an erasure-coded pool, comment out the replication size above and uncomment the following settings. 11 | # Make sure you have enough OSDs to support the replica size or erasure code chunks. 12 | #erasureCoded: 13 | # dataChunks: 2 14 | # codingChunks: 1 15 | --- 16 | apiVersion: storage.k8s.io/v1 17 | kind: StorageClass 18 | metadata: 19 | name: rook-block 20 | provisioner: rook-ceph.rbd.csi.ceph.com 21 | parameters: 22 | # Ceph pool into which the RBD image shall be created 23 | pool: replicapool 24 | # clusterID is the namespace where the rook cluster is running 25 | clusterID: rook-ceph 26 | # The secrets contain Ceph admin credentials. 27 | csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner 28 | csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph 29 | csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner 30 | csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph 31 | csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node 32 | csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph 33 | 34 | # Specify the filesystem type of the volume. If not specified, csi-provisioner 35 | # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock 36 | # in hyperconverged settings where the volume is mounted on the same node as the osds. 37 | csi.storage.k8s.io/fstype: ext4 38 | 39 | -------------------------------------------------------------------------------- /rook/rook-tools.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rook-ceph-tools 5 | namespace: rook-ceph # namespace:cluster 6 | labels: 7 | app: rook-ceph-tools 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: rook-ceph-tools 13 | template: 14 | metadata: 15 | labels: 16 | app: rook-ceph-tools 17 | spec: 18 | dnsPolicy: ClusterFirstWithHostNet 19 | containers: 20 | - name: rook-ceph-tools 21 | image: quay.io/ceph/ceph:v17.2.5 22 | command: 23 | - /bin/bash 24 | - -c 25 | - | 26 | # Replicate the script from toolbox.sh inline so the ceph image 27 | # can be run directly, instead of requiring the rook toolbox 28 | CEPH_CONFIG="/etc/ceph/ceph.conf" 29 | MON_CONFIG="/etc/rook/mon-endpoints" 30 | KEYRING_FILE="/etc/ceph/keyring" 31 | 32 | # create a ceph config file in its default location so ceph/rados tools can be used 33 | # without specifying any arguments 34 | write_endpoints() { 35 | endpoints=$(cat ${MON_CONFIG}) 36 | 37 | # filter out the mon names 38 | # external cluster can have numbers or hyphens in mon names, handling them in regex 39 | # shellcheck disable=SC2001 40 | mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g') 41 | 42 | DATE=$(date) 43 | echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}" 44 | cat < ${CEPH_CONFIG} 45 | [global] 46 | mon_host = ${mon_endpoints} 47 | 48 | [client.admin] 49 | keyring = ${KEYRING_FILE} 50 | EOF 51 | } 52 | 53 | # watch the endpoints config file and update if the mon endpoints ever change 54 | watch_endpoints() { 55 | # get the timestamp for the target of the soft link 56 | real_path=$(realpath ${MON_CONFIG}) 57 | initial_time=$(stat -c %Z "${real_path}") 58 | while true; do 59 | real_path=$(realpath ${MON_CONFIG}) 60 | latest_time=$(stat -c %Z "${real_path}") 61 | 62 | if [[ "${latest_time}" != "${initial_time}" ]]; then 63 | write_endpoints 64 | initial_time=${latest_time} 65 | fi 66 | 67 | sleep 10 68 | done 69 | } 70 | 71 | # read the secret from an env var (for backward compatibility), or from the secret file 72 | ceph_secret=${ROOK_CEPH_SECRET} 73 | if [[ "$ceph_secret" == "" ]]; then 74 | ceph_secret=$(cat /var/lib/rook-ceph-mon/secret.keyring) 75 | fi 76 | 77 | # create the keyring file 78 | cat < ${KEYRING_FILE} 79 | [${ROOK_CEPH_USERNAME}] 80 | key = ${ceph_secret} 81 | EOF 82 | 83 | # write the initial config file 84 | write_endpoints 85 | 86 | # continuously update the mon endpoints if they fail over 87 | watch_endpoints 88 | imagePullPolicy: IfNotPresent 89 | tty: true 90 | securityContext: 91 | runAsNonRoot: true 92 | runAsUser: 2016 93 | runAsGroup: 2016 94 | env: 95 | - name: ROOK_CEPH_USERNAME 96 | valueFrom: 97 | secretKeyRef: 98 | name: rook-ceph-mon 99 | key: ceph-username 100 | volumeMounts: 101 | - mountPath: /etc/ceph 102 | name: ceph-config 103 | - name: mon-endpoint-volume 104 | mountPath: /etc/rook 105 | - name: ceph-admin-secret 106 | mountPath: /var/lib/rook-ceph-mon 107 | readOnly: true 108 | volumes: 109 | - name: ceph-admin-secret 110 | secret: 111 | secretName: rook-ceph-mon 112 | optional: false 113 | items: 114 | - key: ceph-secret 115 | path: secret.keyring 116 | - name: mon-endpoint-volume 117 | configMap: 118 | name: rook-ceph-mon-endpoints 119 | items: 120 | - key: data 121 | path: mon-endpoints 122 | - name: ceph-config 123 | emptyDir: {} 124 | tolerations: 125 | - key: "node.kubernetes.io/unreachable" 126 | operator: "Exists" 127 | effect: "NoExecute" 128 | tolerationSeconds: 5 129 | -------------------------------------------------------------------------------- /scripts/create-user.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | groupadd ubuntu 3 | useradd -g ubuntu -G admin -s /bin/bash -d /home/ubuntu ubuntu 4 | mkdir -p /home/ubuntu 5 | cp -r /root/.ssh /home/ubuntu/.ssh 6 | chown -R ubuntu:ubuntu /home/ubuntu 7 | echo "ubuntu ALL=(ALL:ALL) NOPASSWD:ALL" >> /etc/sudoers 8 | 9 | # create .kube/config 10 | mkdir -p ~ubuntu/.kube 11 | cp -i /etc/kubernetes/admin.conf ~ubuntu/.kube/config 12 | chown ubuntu:ubuntu ~ubuntu/.kube/config 13 | -------------------------------------------------------------------------------- /scripts/install-kubernetes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "This script has been tested on ubuntu 20.4.3 LTS (focal) and ubuntu 22.04.1 LTS (jammy). If you are using another distribution, you most likely need to edit this script." 4 | sleep 3 5 | 6 | echo "installing docker" 7 | apt-get update 8 | apt-get install -y \ 9 | apt-transport-https \ 10 | ca-certificates \ 11 | curl \ 12 | software-properties-common 13 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - 14 | add-apt-repository \ 15 | "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \ 16 | $(lsb_release -cs) \ 17 | stable" 18 | 19 | apt-get update && apt-get install docker-ce docker-ce-cli containerd.io -y 20 | 21 | 22 | echo "installing kubernetes" 23 | apt-get update && apt-get install -y apt-transport-https 24 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 25 | cat </etc/apt/sources.list.d/kubernetes.list 26 | deb http://apt.kubernetes.io/ kubernetes-xenial main 27 | EOF 28 | apt-get update 29 | apt-get install -y kubelet kubeadm kubectl 30 | 31 | # DigitalOcean without firewall (IP-in-IP allowed) - or any other cloud / on-prem that supports IP-in-IP traffic 32 | # echo "deploying kubernetes (with calico)..." 33 | # kubeadm init --pod-network-cidr=192.168.0.0/16 # add --apiserver-advertise-address="ip" if you want to use a different IP address than the main server IP 34 | # export KUBECONFIG=/etc/kubernetes/admin.conf 35 | # kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml 36 | # kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml 37 | 38 | 39 | # kubeadm configuration 40 | echo '# kubeadm-config.yaml 41 | kind: ClusterConfiguration 42 | apiVersion: kubeadm.k8s.io/v1beta3 43 | #kubernetesVersion: v1.21.0 44 | networking: 45 | podSubnet: 10.244.0.0/16 46 | --- 47 | kind: KubeletConfiguration 48 | apiVersion: kubelet.config.k8s.io/v1beta1 49 | cgroupDriver: cgroupfs' > kubeadm-config.yaml 50 | 51 | # containerd config to work with Kubernetes >=1.26 52 | echo "SystemdCgroup = true" > /etc/containerd/config.toml 53 | systemctl restart containerd 54 | 55 | # DigitalOcean with firewall (VxLAN with Flannel) - could be resolved in the future by allowing IP-in-IP in the firewall settings 56 | echo "deploying kubernetes (with canal)..." 57 | kubeadm init --config kubeadm-config.yaml # add --apiserver-advertise-address="ip" if you want to use a different IP address than the main server IP 58 | export KUBECONFIG=/etc/kubernetes/admin.conf 59 | curl https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/canal.yaml -O 60 | kubectl apply -f canal.yaml 61 | echo "HINT: " 62 | echo "run the command: export KUBECONFIG=/etc/kubernetes/admin.conf if kubectl doesn't work" 63 | 64 | -------------------------------------------------------------------------------- /scripts/install-node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "This script has been tested on ubuntu 20.4.3 LTS (focal) and ubuntu 22.04.1 LTS (jammy). If you are using another distribution, you most likely need to edit this script." 3 | sleep 3 4 | 5 | echo "installing docker" 6 | apt-get update 7 | apt-get install -y \ 8 | apt-transport-https \ 9 | ca-certificates \ 10 | curl \ 11 | software-properties-common 12 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - 13 | add-apt-repository \ 14 | "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \ 15 | $(lsb_release -cs) \ 16 | stable" 17 | 18 | apt-get update && apt-get install docker-ce docker-ce-cli containerd.io -y 19 | 20 | echo "installing kubeadm and kubectl" 21 | apt-get update && apt-get install -y apt-transport-https 22 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 23 | cat </etc/apt/sources.list.d/kubernetes.list 24 | deb http://apt.kubernetes.io/ kubernetes-xenial main 25 | EOF 26 | apt-get update 27 | apt-get install -y kubelet kubeadm kubectl 28 | 29 | # containerd config to work with Kubernetes >=1.26 30 | echo "SystemdCgroup = true" > /etc/containerd/config.toml 31 | systemctl restart containerd 32 | 33 | echo "You can now execute the kubeadm join command (the command is shown during kubeadm init on the master node)" 34 | -------------------------------------------------------------------------------- /vault/README.md: -------------------------------------------------------------------------------- 1 | # Vault 2 | 3 | ## deploy etcd operator 4 | ``` 5 | kubectl create -f etcd-rbac.yaml 6 | kubectl create -f etcd_crds.yaml 7 | kubectl create -f etcd-operator-deploy.yaml 8 | ``` 9 | 10 | ## deploy vault operator 11 | ``` 12 | kubectl create -f vault-rbac.yaml 13 | kubectl create -f vault_crd.yaml 14 | kubectl create -f vault-deployment.yaml 15 | ``` 16 | 17 | ## deploy vault + etcd cluster 18 | ``` 19 | kubectl create -f example_vault.yaml 20 | ``` 21 | 22 | ## install vault cli 23 | ``` 24 | wget https://releases.hashicorp.com/vault/0.10.1/vault_0.10.1_linux_amd64.zip 25 | sudo apt-get -y install unzip 26 | unzip vault_0.10.1_linux_amd64.zip 27 | chmod +x vault 28 | sudo mv vault /usr/local/bin 29 | ``` 30 | ## Initialize Vault cluster 31 | ``` 32 | kubectl get vault example -o jsonpath='{.status.vaultStatus.sealed[0]}' | xargs -0 -I {} kubectl -n default port-forward {} 8200 33 | export VAULT_ADDR='https://localhost:8200' 34 | export VAULT_SKIP_VERIFY="true" 35 | vault status 36 | vault operator init 37 | vault operator unseal 38 | vault operator unseal 39 | vault operator unseal 40 | vault login 41 | ``` 42 | 43 | ## Write a secret 44 | ``` 45 | kubectl -n default get vault example -o jsonpath='{.status.vaultStatus.active}' | xargs -0 -I {} kubectl -n default port-forward {} 8200 46 | vault write secret/myapp/mypassword value=pass123 47 | vault write sys/policy/my-policy policy=@policy.hcl 48 | vault token create -policy=my-policy 49 | 50 | ``` 51 | 52 | ## read a secret in a pod 53 | ``` 54 | kubectl run --image ubuntu -it --rm ubuntu 55 | apt-get update && apt-get -y install curl 56 | curl -k -H 'X-Vault-Token: ' https://example:8200/v1/secret/myapp/mypassword 57 | ``` 58 | -------------------------------------------------------------------------------- /vault/etcd-operator-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: etcd-operator 5 | labels: 6 | name: etcd-operator 7 | spec: 8 | replicas: 1 9 | template: 10 | metadata: 11 | labels: 12 | name: etcd-operator 13 | spec: 14 | serviceAccountName: etcd 15 | containers: 16 | - name: etcd-operator 17 | image: quay.io/coreos/etcd-operator:v0.8.3 18 | command: 19 | - etcd-operator 20 | - "--create-crd=false" 21 | env: 22 | - name: MY_POD_NAMESPACE 23 | valueFrom: 24 | fieldRef: 25 | fieldPath: metadata.namespace 26 | - name: MY_POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | fieldPath: metadata.name 30 | - name: etcd-backup-operator 31 | image: quay.io/coreos/etcd-operator:v0.8.3 32 | command: 33 | - etcd-backup-operator 34 | - "--create-crd=false" 35 | env: 36 | - name: MY_POD_NAMESPACE 37 | valueFrom: 38 | fieldRef: 39 | fieldPath: metadata.namespace 40 | - name: MY_POD_NAME 41 | valueFrom: 42 | fieldRef: 43 | fieldPath: metadata.name 44 | - name: etcd-restore-operator 45 | image: quay.io/coreos/etcd-operator:v0.8.3 46 | command: 47 | - etcd-restore-operator 48 | - "--create-crd=false" 49 | env: 50 | - name: MY_POD_NAMESPACE 51 | valueFrom: 52 | fieldRef: 53 | fieldPath: metadata.namespace 54 | - name: MY_POD_NAME 55 | valueFrom: 56 | fieldRef: 57 | fieldPath: metadata.name 58 | 59 | -------------------------------------------------------------------------------- /vault/etcd-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: etcd 5 | namespace: default 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1beta1 8 | kind: Role 9 | metadata: 10 | name: etcd 11 | namespace: default 12 | rules: 13 | - apiGroups: 14 | - etcd.database.coreos.com 15 | resources: 16 | - etcdclusters 17 | - etcdbackups 18 | - etcdrestores 19 | verbs: 20 | - "*" 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - pods 25 | - services 26 | - endpoints 27 | - persistentvolumeclaims 28 | - events 29 | verbs: 30 | - "*" 31 | - apiGroups: 32 | - apps 33 | resources: 34 | - deployments 35 | verbs: 36 | - "*" 37 | # The following permissions can be removed if not using S3 backup and TLS 38 | - apiGroups: 39 | - "" 40 | resources: 41 | - secrets 42 | verbs: 43 | - get 44 | --- 45 | apiVersion: rbac.authorization.k8s.io/v1beta1 46 | kind: RoleBinding 47 | metadata: 48 | name: etcd 49 | namespace: default 50 | roleRef: 51 | apiGroup: rbac.authorization.k8s.io 52 | kind: Role 53 | name: etcd 54 | subjects: 55 | - kind: ServiceAccount 56 | name: etcd 57 | namespace: default 58 | -------------------------------------------------------------------------------- /vault/etcd_crds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: etcdclusters.etcd.database.coreos.com 5 | spec: 6 | group: etcd.database.coreos.com 7 | names: 8 | kind: EtcdCluster 9 | listKind: EtcdClusterList 10 | plural: etcdclusters 11 | shortNames: 12 | - etcd 13 | singular: etcdcluster 14 | scope: Namespaced 15 | version: v1beta2 16 | --- 17 | apiVersion: apiextensions.k8s.io/v1beta1 18 | kind: CustomResourceDefinition 19 | metadata: 20 | name: etcdbackups.etcd.database.coreos.com 21 | spec: 22 | group: etcd.database.coreos.com 23 | names: 24 | kind: EtcdBackup 25 | listKind: EtcdBackupList 26 | plural: etcdbackups 27 | singular: etcdbackup 28 | scope: Namespaced 29 | version: v1beta2 30 | --- 31 | apiVersion: apiextensions.k8s.io/v1beta1 32 | kind: CustomResourceDefinition 33 | metadata: 34 | name: etcdrestores.etcd.database.coreos.com 35 | spec: 36 | group: etcd.database.coreos.com 37 | names: 38 | kind: EtcdRestore 39 | listKind: EtcdRestoreList 40 | plural: etcdrestores 41 | singular: etcdrestore 42 | scope: Namespaced 43 | version: v1beta2 44 | -------------------------------------------------------------------------------- /vault/example_vault.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "vault.security.coreos.com/v1alpha1" 2 | kind: "VaultService" 3 | metadata: 4 | name: "example" 5 | spec: 6 | nodes: 2 7 | version: "0.9.1-0" 8 | -------------------------------------------------------------------------------- /vault/policy.hcl: -------------------------------------------------------------------------------- 1 | path "secret/myapp/*" { 2 | capabilities = ["read"] 3 | } 4 | -------------------------------------------------------------------------------- /vault/vault-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: vault-operator 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | name: vault-operator 11 | spec: 12 | serviceAccountName: vault 13 | containers: 14 | - name: vault-operator 15 | image: quay.io/coreos/vault-operator:latest 16 | env: 17 | - name: MY_POD_NAMESPACE 18 | valueFrom: 19 | fieldRef: 20 | fieldPath: metadata.namespace 21 | - name: MY_POD_NAME 22 | valueFrom: 23 | fieldRef: 24 | fieldPath: metadata.name 25 | -------------------------------------------------------------------------------- /vault/vault-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: vault 5 | namespace: default 6 | --- 7 | kind: Role 8 | apiVersion: rbac.authorization.k8s.io/v1beta1 9 | metadata: 10 | name: vault-operator-role 11 | rules: 12 | - apiGroups: 13 | - etcd.database.coreos.com 14 | resources: 15 | - etcdclusters 16 | - etcdbackups 17 | - etcdrestores 18 | verbs: 19 | - "*" 20 | - apiGroups: 21 | - vault.security.coreos.com 22 | resources: 23 | - vaultservices 24 | verbs: 25 | - "*" 26 | - apiGroups: 27 | - storage.k8s.io 28 | resources: 29 | - storageclasses 30 | verbs: 31 | - "*" 32 | - apiGroups: 33 | - "" # "" indicates the core API group 34 | resources: 35 | - pods 36 | - services 37 | - endpoints 38 | - persistentvolumeclaims 39 | - events 40 | - configmaps 41 | - secrets 42 | verbs: 43 | - "*" 44 | - apiGroups: 45 | - apps 46 | resources: 47 | - deployments 48 | verbs: 49 | - "*" 50 | 51 | --- 52 | 53 | kind: RoleBinding 54 | apiVersion: rbac.authorization.k8s.io/v1beta1 55 | metadata: 56 | name: vault-operator-rolebinding 57 | subjects: 58 | - kind: ServiceAccount 59 | name: vault 60 | namespace: default 61 | roleRef: 62 | kind: Role 63 | name: vault-operator-role 64 | apiGroup: rbac.authorization.k8s.io 65 | -------------------------------------------------------------------------------- /vault/vault_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: vaultservices.vault.security.coreos.com 5 | spec: 6 | group: vault.security.coreos.com 7 | names: 8 | kind: VaultService 9 | listKind: VaultServiceList 10 | plural: vaultservices 11 | shortNames: 12 | - vault 13 | singular: vaultservice 14 | scope: Namespaced 15 | version: v1alpha1 16 | --------------------------------------------------------------------------------