├── .gitignore ├── misc ├── screenshots.png ├── architecture.png └── screenshots-with-kubelogin.png ├── ldap ├── ldif │ ├── 0-ous.ldif │ ├── 2-groups.ldif │ └── 1-users.ldif └── ldap.yaml ├── example-app ├── build.sh ├── go.mod ├── templates.go ├── main.go └── go.sum ├── tls-setup ├── ca-config.json ├── req-csr-k8s.json ├── req-csr-dex.json ├── ca-csr.json └── Makefile ├── manifests ├── authorization.yaml └── kube-apiserver.yaml ├── kind └── kind.yaml ├── setup.sh ├── dex └── dex.yaml └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | _* 2 | **/.DS_Store -------------------------------------------------------------------------------- /misc/screenshots.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightzheng100/kubernetes-dex-ldap-integration/HEAD/misc/screenshots.png -------------------------------------------------------------------------------- /misc/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightzheng100/kubernetes-dex-ldap-integration/HEAD/misc/architecture.png -------------------------------------------------------------------------------- /misc/screenshots-with-kubelogin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightzheng100/kubernetes-dex-ldap-integration/HEAD/misc/screenshots-with-kubelogin.png -------------------------------------------------------------------------------- /ldap/ldif/0-ous.ldif: -------------------------------------------------------------------------------- 1 | dn: ou=people,dc=example,dc=org 2 | ou: people 3 | description: All people in organisation 4 | objectclass: organizationalunit 5 | 6 | dn: ou=groups,dc=example,dc=org 7 | objectClass: organizationalUnit 8 | ou: groups -------------------------------------------------------------------------------- /example-app/build.sh: -------------------------------------------------------------------------------- 1 | GOOS=darwin GOARCH=arm64 go build -o example-app-darwin-arm64 2 | GOOS=darwin GOARCH=amd64 go build -o example-app-darwin-amd64 3 | GOOS=linux GOARCH=arm64 go build -o example-app-linux-arm64 4 | GOOS=linux GOARCH=amd64 go build -o example-app-linux-amd64 -------------------------------------------------------------------------------- /tls-setup/ca-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "usages": [ 5 | "signing", 6 | "key encipherment", 7 | "server auth", 8 | "client auth" 9 | ], 10 | "expiry": "8760h" 11 | } 12 | } 13 | } -------------------------------------------------------------------------------- /tls-setup/req-csr-k8s.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kube-apiserver", 3 | "hosts": [ 4 | "localhost", 5 | "127.0.0.1" 6 | ], 7 | "key": { 8 | "algo": "ecdsa", 9 | "size": 384 10 | }, 11 | "names": [ 12 | { 13 | "O": "autogenerated", 14 | "OU": "dex server", 15 | "L": "the internet" 16 | } 17 | ] 18 | } -------------------------------------------------------------------------------- /tls-setup/req-csr-dex.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "dex", 3 | "hosts": [ 4 | "localhost", 5 | "127.0.0.1", 6 | "dex.dex.svc" 7 | ], 8 | "key": { 9 | "algo": "ecdsa", 10 | "size": 384 11 | }, 12 | "names": [ 13 | { 14 | "O": "autogenerated", 15 | "OU": "dex server", 16 | "L": "the internet" 17 | } 18 | ] 19 | } -------------------------------------------------------------------------------- /tls-setup/ca-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "Autogenerated CA", 3 | "key": { 4 | "algo": "ecdsa", 5 | "size": 384 6 | }, 7 | "names": [ 8 | { 9 | "O": "Honest Achmed's Used Certificates", 10 | "OU": "Hastily-Generated Values Divison", 11 | "L": "San Francisco", 12 | "ST": "California", 13 | "C": "US" 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /example-app/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/brightzheng100/kubernetes-dex-ldap-integration 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/coreos/go-oidc v2.2.1+incompatible 7 | github.com/pquerna/cachecontrol v0.0.0-20201205024021-ac21108117ac // indirect 8 | github.com/spf13/cobra v1.1.1 9 | golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 10 | gopkg.in/square/go-jose.v2 v2.5.1 // indirect 11 | ) 12 | -------------------------------------------------------------------------------- /ldap/ldif/2-groups.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=admins,ou=groups,dc=example,dc=org 2 | objectClass: groupOfNames 3 | cn: admins 4 | member: cn=admin1,ou=people,dc=example,dc=org 5 | member: cn=admin2,ou=people,dc=example,dc=org 6 | 7 | dn: cn=developers,ou=groups,dc=example,dc=org 8 | objectClass: groupOfNames 9 | cn: developers 10 | member: cn=developer1,ou=people,dc=example,dc=org 11 | member: cn=developer2,ou=people,dc=example,dc=org -------------------------------------------------------------------------------- /manifests/authorization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | namespace: dex 5 | name: read-pods 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods"] 9 | verbs: ["get", "watch", "list"] 10 | --- 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | kind: RoleBinding 13 | metadata: 14 | name: read-pods 15 | namespace: dex 16 | subjects: 17 | - kind: User 18 | name: admin1@example.org 19 | apiGroup: rbac.authorization.k8s.io 20 | roleRef: 21 | kind: Role 22 | name: read-pods 23 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /tls-setup/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: cfssl cfssljson ca req-dex req-k8s clean 2 | 3 | all: cfssl cfssljson ca req-dex req-k8s 4 | 5 | cfssl: 6 | go install github.com/cloudflare/cfssl/cmd/cfssl@v1.6.4 7 | 8 | cfssljson: 9 | go install github.com/cloudflare/cfssl/cmd/cfssljson@v1.6.4 10 | 11 | ca: cfssl cfssljson 12 | mkdir -p _certs 13 | cfssl gencert -initca ca-csr.json | cfssljson -bare _certs/ca 14 | 15 | req-dex: cfssl cfssljson 16 | cfssl gencert \ 17 | -ca _certs/ca.pem \ 18 | -ca-key _certs/ca-key.pem \ 19 | -config ca-config.json \ 20 | req-csr-dex.json | cfssljson -bare _certs/dex 21 | 22 | req-k8s: cfssl cfssljson 23 | cfssl gencert \ 24 | -ca _certs/ca.pem \ 25 | -ca-key _certs/ca-key.pem \ 26 | -config ca-config.json \ 27 | req-csr-k8s.json | cfssljson -bare _certs/k8s 28 | 29 | clean: 30 | rm -rf _certs -------------------------------------------------------------------------------- /kind/kind.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | # patch the generated kubeadm config with some extra settings 4 | kubeadmConfigPatches: 5 | - | 6 | apiVersion: kubeadm.k8s.io/v1beta2 7 | kind: ClusterConfiguration 8 | metadata: 9 | name: config 10 | apiServer: 11 | extraArgs: 12 | # dex will be deployed in `dex` namespace, exposed by `dex` svc 13 | oidc-issuer-url: https://dex.dex.svc:32000 14 | # the client-id that is inbuilt in the example-app 15 | oidc-client-id: example-app 16 | # the CA that we generated for Dex 17 | oidc-ca-file: /etc/ssl/certs/dex/ca.pem 18 | # email will be used as the claim 19 | oidc-username-claim: email 20 | oidc-groups-claim: groups 21 | nodes: 22 | - role: control-plane 23 | extraMounts: 24 | - hostPath: "${PROJECT_ROOT}/tls-setup/_certs" 25 | containerPath: /etc/ssl/certs/dex 26 | - role: worker 27 | -------------------------------------------------------------------------------- /ldap/ldif/1-users.ldif: -------------------------------------------------------------------------------- 1 | # admin1 2 | dn: cn=admin1,ou=people,dc=example,dc=org 3 | objectClass: inetOrgPerson 4 | sn: admin1 5 | cn: admin1 6 | uid: admin1 7 | mail: admin1@example.org 8 | # secret, by: slappasswd -h {SSHA} -s secret 9 | userPassword: {SSHA}RRN6AM9u0tpTEOn6oBcIt9X3BbFPKVk5 10 | 11 | # admin2 12 | dn: cn=admin2,ou=people,dc=example,dc=org 13 | objectClass: inetOrgPerson 14 | sn: admin2 15 | cn: admin2 16 | uid: admin2 17 | mail: admin2@example.org 18 | # secret 19 | userPassword: {SSHA}RRN6AM9u0tpTEOn6oBcIt9X3BbFPKVk5 20 | 21 | # developer1 22 | dn: cn=developer1,ou=people,dc=example,dc=org 23 | objectClass: inetOrgPerson 24 | sn: developer1 25 | cn: developer1 26 | uid: developer1 27 | mail: developer1@example.org 28 | userPassword: {SSHA}RRN6AM9u0tpTEOn6oBcIt9X3BbFPKVk5 29 | 30 | # developer2 31 | dn: cn=developer2,ou=people,dc=example,dc=org 32 | objectClass: inetOrgPerson 33 | sn: developer2 34 | cn: developer2 35 | uid: developer2 36 | mail: developer2@example.org 37 | userPassword: {SSHA}RRN6AM9u0tpTEOn6oBcIt9X3BbFPKVk5 -------------------------------------------------------------------------------- /ldap/ldap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: openldap 5 | labels: 6 | app.kubernetes.io/name: openldap 7 | spec: 8 | type: ClusterIP 9 | ports: 10 | - name: tcp-ldap 11 | port: 389 12 | targetPort: tcp-ldap 13 | selector: 14 | app.kubernetes.io/name: openldap 15 | --- 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | name: openldap 20 | labels: 21 | app.kubernetes.io/name: openldap 22 | spec: 23 | selector: 24 | matchLabels: 25 | app.kubernetes.io/name: openldap 26 | replicas: 1 27 | template: 28 | metadata: 29 | labels: 30 | app.kubernetes.io/name: openldap 31 | spec: 32 | containers: 33 | - name: openldap 34 | image: osixia/openldap:1.5.0 35 | imagePullPolicy: "Always" 36 | env: 37 | - name: LDAP_ROOT 38 | value: "dc=example,dc=org" 39 | - name: LDAP_ADMIN_USERNAME 40 | value: "admin" 41 | - name: LDAP_ADMIN_PASSWORD 42 | valueFrom: 43 | secretKeyRef: 44 | key: adminpassword 45 | name: openldap 46 | # - name: LDAP_USERS 47 | # valueFrom: 48 | # secretKeyRef: 49 | # key: users 50 | # name: openldap 51 | # - name: LDAP_PASSWORDS 52 | # valueFrom: 53 | # secretKeyRef: 54 | # key: passwords 55 | # name: openldap 56 | - name: LDAP_CUSTOM_LDIF_DIR 57 | value: "/ldifs" 58 | ports: 59 | - name: tcp-ldap 60 | containerPort: 389 61 | volumeMounts: 62 | - name: ldap 63 | mountPath: /ldifs 64 | volumes: 65 | - name: ldap 66 | configMap: 67 | name: ldap -------------------------------------------------------------------------------- /example-app/templates.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "html/template" 5 | "log" 6 | "net/http" 7 | ) 8 | 9 | var indexTmpl = template.Must(template.New("index.html").Parse(` 10 |
11 | 26 | 27 | `)) 28 | 29 | func renderIndex(w http.ResponseWriter) { 30 | renderTemplate(w, indexTmpl, nil) 31 | } 32 | 33 | type tokenTmplData struct { 34 | IDToken string 35 | AccessToken string 36 | RefreshToken string 37 | RedirectURL string 38 | Claims string 39 | } 40 | 41 | var tokenTmpl = template.Must(template.New("token.html").Parse(` 42 | 43 | 53 | 54 | 55 |ID Token:
{{ .IDToken }}
56 | Access Token:
{{ .AccessToken }}
57 | Claims:
{{ .Claims }}
58 | {{ if .RefreshToken }}
59 | Refresh Token:
{{ .RefreshToken }}
60 |
64 | {{ end }}
65 |
66 |
67 | `))
68 |
69 | func renderToken(w http.ResponseWriter, redirectURL, idToken, accessToken, refreshToken, claims string) {
70 | renderTemplate(w, tokenTmpl, tokenTmplData{
71 | IDToken: idToken,
72 | AccessToken: accessToken,
73 | RefreshToken: refreshToken,
74 | RedirectURL: redirectURL,
75 | Claims: claims,
76 | })
77 | }
78 |
79 | func renderTemplate(w http.ResponseWriter, tmpl *template.Template, data interface{}) {
80 | err := tmpl.Execute(w, data)
81 | if err == nil {
82 | return
83 | }
84 |
85 | switch err := err.(type) {
86 | case *template.Error:
87 | // An ExecError guarantees that Execute has not written to the underlying reader.
88 | log.Printf("Error rendering template %s: %s", tmpl.Name(), err)
89 |
90 | // TODO(ericchiang): replace with better internal server error.
91 | http.Error(w, "Internal server error", http.StatusInternalServerError)
92 | default:
93 | // An error with the underlying write, such as the connection being
94 | // dropped. Ignore for now.
95 | }
96 | }
97 |
--------------------------------------------------------------------------------
/setup.sh:
--------------------------------------------------------------------------------
1 |
2 | function log {
3 | echo "$(date +"%Y-%m-%d %H:%M:%S %Z"): $@"
4 | }
5 |
6 | function logn {
7 | echo -n "$(date +"%Y-%m-%d %H:%M:%S %Z"): $@"
8 | }
9 |
10 | function is_required_tool_missed {
11 | logn "--> Checking required tool: $1 ... "
12 | if [ -x "$(command -v $1)" ]; then
13 | echo "installed"
14 | false
15 | else
16 | echo "NOT installed"
17 | true
18 | fi
19 | }
20 |
21 |
22 | # Firstly, let's do a quick check for required tools
23 | missed_tools=0
24 | log "Firstly, let's do a quick check for required tools ..."
25 | # check docker
26 | if is_required_tool_missed "docker"; then missed_tools=$((missed_tools+1)); fi
27 | # check git
28 | if is_required_tool_missed "git"; then missed_tools=$((missed_tools+1)); fi
29 | # check cfssl
30 | if is_required_tool_missed "cfssl"; then missed_tools=$((missed_tools+1)); fi
31 | # check cfssljson
32 | if is_required_tool_missed "cfssljson"; then missed_tools=$((missed_tools+1)); fi
33 | # check kind
34 | if is_required_tool_missed "kind"; then missed_tools=$((missed_tools+1)); fi
35 | # check kubectl
36 | if is_required_tool_missed "kubectl"; then missed_tools=$((missed_tools+1)); fi
37 | # final check
38 | if [[ $missed_tools > 0 ]]; then
39 | log "Abort! There are some required tools missing, please have a check."
40 | exit 98
41 | fi
42 |
43 |
44 | # Generating TLS for both Kubernetes and Dex
45 | log "Generating TLS for both Kubernetes and Dex ..."
46 | pushd tls-setup
47 | make ca req-dex req-k8s
48 | popd
49 |
50 |
51 | # Creating Kubernetes cluster with API Server configured
52 | log "Creating Kubernetes cluster with API Server configured ..."
53 | PROJECT_ROOT="$(pwd)" envsubst < kind/kind.yaml | kind create cluster --name dex-ldap-cluster --config -
54 |
55 |
56 | # Deploying OpenLDAP in namespace 'ldap' as the LDAP Server
57 | log "Deploying OpenLDAP in namespace 'ldap' as the LDAP Server ..."
58 | kubectl create ns ldap
59 | kubectl create secret generic openldap \
60 | --namespace ldap \
61 | --from-literal=adminpassword=adminpassword
62 | kubectl create configmap ldap \
63 | --namespace ldap \
64 | --from-file=ldap/ldif
65 | kubectl apply --namespace ldap -f ldap/ldap.yaml
66 | kubectl wait --namespace ldap --for=condition=ready pod -l app.kubernetes.io/name=openldap
67 |
68 |
69 | # Initializing some dummy LDAP entities
70 | log "Initializing some dummy LDAP entities ..."
71 | sleep 5
72 | LDAP_POD=$(kubectl -n ldap get pod -l "app.kubernetes.io/name=openldap" -o jsonpath="{.items[0].metadata.name}")
73 | kubectl -n ldap exec $LDAP_POD -- ldapadd -x -D "cn=admin,dc=example,dc=org" -w adminpassword -H ldap://localhost:389 -f /ldifs/0-ous.ldif
74 | kubectl -n ldap exec $LDAP_POD -- ldapadd -x -D "cn=admin,dc=example,dc=org" -w adminpassword -H ldap://localhost:389 -f /ldifs/1-users.ldif
75 | kubectl -n ldap exec $LDAP_POD -- ldapadd -x -D "cn=admin,dc=example,dc=org" -w adminpassword -H ldap://localhost:389 -f /ldifs/2-groups.ldif
76 | # List down the entities loaded
77 | kubectl -n ldap exec $LDAP_POD -- \
78 | ldapsearch -LLL -x -H ldap://localhost:389 -D "cn=admin,dc=example,dc=org" -w adminpassword -b "ou=people,dc=example,dc=org" dn
79 |
80 |
81 | # Deploying Dex in namespace 'dex'
82 | log "Deploying Dex in namespace 'dex' ..."
83 | kubectl create ns dex
84 | kubectl create secret tls dex-tls \
85 | --namespace dex \
86 | --cert=tls-setup/_certs/dex.pem \
87 | --key=tls-setup/_certs/dex-key.pem
88 | kubectl apply --namespace dex -f dex/dex.yaml
89 | kubectl wait --namespace dex --for=condition=ready pod -l app=dex
90 |
91 |
92 | # Creating a proxy to access Dex directly from laptop
93 | log "Creating a proxy to access Dex directly from laptop ..."
94 | SVC_PORT="$(kubectl get -n dex svc/dex -o json | jq '.spec.ports[0].nodePort')"
95 | docker run -d --restart always \
96 | --name dex-kind-proxy-$SVC_PORT \
97 | --publish 127.0.0.1:$SVC_PORT:$SVC_PORT \
98 | --link dex-ldap-cluster-control-plane:target \
99 | --network kind \
100 | alpine/socat -dd \
101 | tcp-listen:$SVC_PORT,fork,reuseaddr tcp-connect:target:$SVC_PORT
102 |
--------------------------------------------------------------------------------
/dex/dex.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: dex
5 | spec:
6 | type: NodePort
7 | ports:
8 | - name: dex
9 | port: 5556
10 | protocol: TCP
11 | targetPort: 5556
12 | nodePort: 32000
13 | selector:
14 | app: dex
15 | ---
16 | apiVersion: v1
17 | kind: ServiceAccount
18 | metadata:
19 | labels:
20 | app: dex
21 | name: dex
22 | ---
23 | apiVersion: apps/v1
24 | kind: Deployment
25 | metadata:
26 | labels:
27 | app: dex
28 | name: dex
29 | spec:
30 | replicas: 1
31 | selector:
32 | matchLabels:
33 | app: dex
34 | template:
35 | metadata:
36 | labels:
37 | app: dex
38 | spec:
39 | serviceAccountName: dex
40 | containers:
41 | - image: ghcr.io/dexidp/dex:v2.30.0
42 | name: dex
43 | command: ["/usr/local/bin/dex", "serve", "/etc/dex/cfg/config.yaml"]
44 |
45 | ports:
46 | - name: https
47 | containerPort: 5556
48 |
49 | volumeMounts:
50 | - name: dex-config
51 | mountPath: /etc/dex/cfg
52 | - name: dex-tls
53 | mountPath: /etc/dex/tls
54 | volumes:
55 | - name: dex-config
56 | configMap:
57 | name: dex-config
58 | items:
59 | - key: config.yaml
60 | path: config.yaml
61 | - name: dex-tls
62 | secret:
63 | secretName: dex-tls
64 | ---
65 | kind: ConfigMap
66 | apiVersion: v1
67 | metadata:
68 | name: dex-config
69 | data:
70 | config.yaml: |
71 | issuer: https://dex.dex.svc:32000
72 | storage:
73 | type: kubernetes
74 | config:
75 | inCluster: true
76 | web:
77 | https: 0.0.0.0:5556
78 | tlsCert: /etc/dex/tls/tls.crt
79 | tlsKey: /etc/dex/tls/tls.key
80 | connectors:
81 | - type: ldap
82 | name: OpenLDAP
83 | id: ldap
84 | config:
85 | host: openldap.ldap.svc:389
86 |
87 | # No TLS for this setup.
88 | insecureNoSSL: true
89 |
90 | # This would normally be a read-only user.
91 | bindDN: cn=admin,dc=example,dc=org
92 | bindPW: adminpassword
93 |
94 | usernamePrompt: Email Address
95 |
96 | userSearch:
97 | baseDN: ou=people,dc=example,dc=org
98 | filter: "(objectclass=inetOrgPerson)"
99 | username: mail
100 | # "DN" (case sensitive) is a special attribute name. It indicates that
101 | # this value should be taken from the entity's DN not an attribute on
102 | # the entity.
103 | idAttr: DN
104 | emailAttr: mail
105 | nameAttr: cn
106 |
107 | groupSearch:
108 | baseDN: ou=groups,dc=example,dc=org
109 | filter: "(objectClass=groupOfNames)"
110 |
111 | userMatchers:
112 | # A user is a member of a group when their DN matches
113 | # the value of a "member" attribute on the group entity.
114 | - userAttr: DN
115 | groupAttr: member
116 |
117 | # The group name should be the "cn" value.
118 | nameAttr: cn
119 | oauth2:
120 | skipApprovalScreen: true
121 |
122 | staticClients:
123 | - id: example-app
124 | redirectURIs:
125 | - 'http://127.0.0.1:5555/callback'
126 | name: 'Example App'
127 | secret: ZXhhbXBsZS1hcHAtc2VjcmV0
128 | ---
129 | apiVersion: rbac.authorization.k8s.io/v1
130 | kind: ClusterRole
131 | metadata:
132 | name: dex
133 | rules:
134 | - apiGroups: ["dex.coreos.com"] # API group created by dex
135 | resources: ["*"]
136 | verbs: ["*"]
137 | - apiGroups: ["apiextensions.k8s.io"]
138 | resources: ["customresourcedefinitions"]
139 | verbs: ["create"] # To manage its own resources, dex must be able to create customresourcedefinitions
140 | ---
141 | apiVersion: rbac.authorization.k8s.io/v1
142 | kind: ClusterRoleBinding
143 | metadata:
144 | name: dex
145 | roleRef:
146 | apiGroup: rbac.authorization.k8s.io
147 | kind: ClusterRole
148 | name: dex
149 | subjects:
150 | - kind: ServiceAccount
151 | name: dex # Service account assigned to the dex pod, created above
152 | namespace: dex # The namespace dex is running in
153 |
--------------------------------------------------------------------------------
/manifests/kube-apiserver.yaml:
--------------------------------------------------------------------------------
1 | # a sample kube-apiserver.yaml manifest when external oidc is configured
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | annotations:
6 | kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 172.18.0.3:6443
7 | creationTimestamp: null
8 | labels:
9 | component: kube-apiserver
10 | tier: control-plane
11 | name: kube-apiserver
12 | namespace: kube-system
13 | spec:
14 | containers:
15 | - command:
16 | - kube-apiserver
17 | - --advertise-address=172.18.0.3
18 | - --allow-privileged=true
19 | - --authorization-mode=Node,RBAC
20 | - --client-ca-file=/etc/kubernetes/pki/ca.crt
21 | - --enable-admission-plugins=NodeRestriction
22 | - --enable-bootstrap-token-auth=true
23 | - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
24 | - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
25 | - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
26 | - --etcd-servers=https://127.0.0.1:2379
27 | - --insecure-port=0
28 | - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
29 | - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
30 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
31 | - --oidc-ca-file=/etc/ssl/certs/dex/ca.pem
32 | - --oidc-client-id=example-app
33 | - --oidc-groups-claim=groups
34 | - --oidc-issuer-url=https://dex.dex.svc:32000
35 | - --oidc-username-claim=email
36 | - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
37 | - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
38 | - --requestheader-allowed-names=front-proxy-client
39 | - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
40 | - --requestheader-extra-headers-prefix=X-Remote-Extra-
41 | - --requestheader-group-headers=X-Remote-Group
42 | - --requestheader-username-headers=X-Remote-User
43 | - --runtime-config=
44 | - --secure-port=6443
45 | - --service-account-key-file=/etc/kubernetes/pki/sa.pub
46 | - --service-cluster-ip-range=10.96.0.0/16
47 | - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
48 | - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
49 | image: k8s.gcr.io/kube-apiserver:v1.19.1
50 | imagePullPolicy: IfNotPresent
51 | livenessProbe:
52 | failureThreshold: 8
53 | httpGet:
54 | host: 172.18.0.3
55 | path: /livez
56 | port: 6443
57 | scheme: HTTPS
58 | initialDelaySeconds: 10
59 | periodSeconds: 10
60 | timeoutSeconds: 15
61 | name: kube-apiserver
62 | readinessProbe:
63 | failureThreshold: 3
64 | httpGet:
65 | host: 172.18.0.3
66 | path: /readyz
67 | port: 6443
68 | scheme: HTTPS
69 | periodSeconds: 1
70 | timeoutSeconds: 15
71 | resources:
72 | requests:
73 | cpu: 250m
74 | startupProbe:
75 | failureThreshold: 24
76 | httpGet:
77 | host: 172.18.0.3
78 | path: /livez
79 | port: 6443
80 | scheme: HTTPS
81 | initialDelaySeconds: 10
82 | periodSeconds: 10
83 | timeoutSeconds: 15
84 | volumeMounts:
85 | - mountPath: /etc/ssl/certs
86 | name: ca-certs
87 | readOnly: true
88 | - mountPath: /etc/ca-certificates
89 | name: etc-ca-certificates
90 | readOnly: true
91 | - mountPath: /etc/kubernetes/pki
92 | name: k8s-certs
93 | readOnly: true
94 | - mountPath: /usr/local/share/ca-certificates
95 | name: usr-local-share-ca-certificates
96 | readOnly: true
97 | - mountPath: /usr/share/ca-certificates
98 | name: usr-share-ca-certificates
99 | readOnly: true
100 | hostNetwork: true
101 | priorityClassName: system-node-critical
102 | volumes:
103 | - hostPath:
104 | path: /etc/ssl/certs
105 | type: DirectoryOrCreate
106 | name: ca-certs
107 | - hostPath:
108 | path: /etc/ca-certificates
109 | type: DirectoryOrCreate
110 | name: etc-ca-certificates
111 | - hostPath:
112 | path: /etc/kubernetes/pki
113 | type: DirectoryOrCreate
114 | name: k8s-certs
115 | - hostPath:
116 | path: /usr/local/share/ca-certificates
117 | type: DirectoryOrCreate
118 | name: usr-local-share-ca-certificates
119 | - hostPath:
120 | path: /usr/share/ca-certificates
121 | type: DirectoryOrCreate
122 | name: usr-share-ca-certificates
123 | status: {}
--------------------------------------------------------------------------------
/example-app/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "crypto/tls"
7 | "crypto/x509"
8 | "encoding/json"
9 | "errors"
10 | "fmt"
11 | "io/ioutil"
12 | "log"
13 | "net"
14 | "net/http"
15 | "net/http/httputil"
16 | "net/url"
17 | "os"
18 | "strings"
19 | "time"
20 |
21 | "github.com/coreos/go-oidc"
22 | "github.com/spf13/cobra"
23 | "golang.org/x/oauth2"
24 | )
25 |
26 | const exampleAppState = "I wish to wash my irish wristwatch"
27 |
28 | type app struct {
29 | clientID string
30 | clientSecret string
31 | redirectURI string
32 |
33 | verifier *oidc.IDTokenVerifier
34 | provider *oidc.Provider
35 |
36 | // Does the provider use "offline_access" scope to request a refresh token
37 | // or does it use "access_type=offline" (e.g. Google)?
38 | offlineAsScope bool
39 |
40 | client *http.Client
41 | }
42 |
43 | // return an HTTP client which trusts the provided root CAs.
44 | func httpClientForRootCAs(rootCAs string) (*http.Client, error) {
45 | tlsConfig := tls.Config{RootCAs: x509.NewCertPool()}
46 | rootCABytes, err := ioutil.ReadFile(rootCAs)
47 | if err != nil {
48 | return nil, fmt.Errorf("failed to read root-ca: %v", err)
49 | }
50 | if !tlsConfig.RootCAs.AppendCertsFromPEM(rootCABytes) {
51 | return nil, fmt.Errorf("no certs found in root CA file %q", rootCAs)
52 | }
53 | return &http.Client{
54 | Transport: &http.Transport{
55 | TLSClientConfig: &tlsConfig,
56 | Proxy: http.ProxyFromEnvironment,
57 | Dial: (&net.Dialer{
58 | Timeout: 30 * time.Second,
59 | KeepAlive: 30 * time.Second,
60 | }).Dial,
61 | TLSHandshakeTimeout: 10 * time.Second,
62 | ExpectContinueTimeout: 1 * time.Second,
63 | },
64 | }, nil
65 | }
66 |
67 | type debugTransport struct {
68 | t http.RoundTripper
69 | }
70 |
71 | func (d debugTransport) RoundTrip(req *http.Request) (*http.Response, error) {
72 | reqDump, err := httputil.DumpRequest(req, true)
73 | if err != nil {
74 | return nil, err
75 | }
76 | log.Printf("%s", reqDump)
77 |
78 | resp, err := d.t.RoundTrip(req)
79 | if err != nil {
80 | return nil, err
81 | }
82 |
83 | respDump, err := httputil.DumpResponse(resp, true)
84 | if err != nil {
85 | resp.Body.Close()
86 | return nil, err
87 | }
88 | log.Printf("%s", respDump)
89 | return resp, nil
90 | }
91 |
92 | func cmd() *cobra.Command {
93 | var (
94 | a app
95 | issuerURL string
96 | listen string
97 | tlsCert string
98 | tlsKey string
99 | rootCAs string
100 | debug bool
101 | )
102 | c := cobra.Command{
103 | Use: "example-app",
104 | Short: "An example OpenID Connect client",
105 | Long: "",
106 | RunE: func(cmd *cobra.Command, args []string) error {
107 | if len(args) != 0 {
108 | return errors.New("surplus arguments provided")
109 | }
110 |
111 | u, err := url.Parse(a.redirectURI)
112 | if err != nil {
113 | return fmt.Errorf("parse redirect-uri: %v", err)
114 | }
115 | listenURL, err := url.Parse(listen)
116 | if err != nil {
117 | return fmt.Errorf("parse listen address: %v", err)
118 | }
119 |
120 | if rootCAs != "" {
121 | client, err := httpClientForRootCAs(rootCAs)
122 | if err != nil {
123 | return err
124 | }
125 | a.client = client
126 | }
127 |
128 | if debug {
129 | if a.client == nil {
130 | a.client = &http.Client{
131 | Transport: debugTransport{http.DefaultTransport},
132 | }
133 | } else {
134 | a.client.Transport = debugTransport{a.client.Transport}
135 | }
136 | }
137 |
138 | if a.client == nil {
139 | a.client = http.DefaultClient
140 | }
141 |
142 | // TODO(ericchiang): Retry with backoff
143 | ctx := oidc.ClientContext(context.Background(), a.client)
144 | provider, err := oidc.NewProvider(ctx, issuerURL)
145 | if err != nil {
146 | return fmt.Errorf("failed to query provider %q: %v", issuerURL, err)
147 | }
148 |
149 | var s struct {
150 | // What scopes does a provider support?
151 | //
152 | // See: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
153 | ScopesSupported []string `json:"scopes_supported"`
154 | }
155 | if err := provider.Claims(&s); err != nil {
156 | return fmt.Errorf("failed to parse provider scopes_supported: %v", err)
157 | }
158 |
159 | if len(s.ScopesSupported) == 0 {
160 | // scopes_supported is a "RECOMMENDED" discovery claim, not a required
161 | // one. If missing, assume that the provider follows the spec and has
162 | // an "offline_access" scope.
163 | a.offlineAsScope = true
164 | } else {
165 | // See if scopes_supported has the "offline_access" scope.
166 | a.offlineAsScope = func() bool {
167 | for _, scope := range s.ScopesSupported {
168 | if scope == oidc.ScopeOfflineAccess {
169 | return true
170 | }
171 | }
172 | return false
173 | }()
174 | }
175 |
176 | a.provider = provider
177 | a.verifier = provider.Verifier(&oidc.Config{ClientID: a.clientID})
178 |
179 | http.HandleFunc("/", a.handleIndex)
180 | http.HandleFunc("/login", a.handleLogin)
181 | http.HandleFunc(u.Path, a.handleCallback)
182 |
183 | switch listenURL.Scheme {
184 | case "http":
185 | log.Printf("listening on %s", listen)
186 | return http.ListenAndServe(listenURL.Host, nil)
187 | case "https":
188 | log.Printf("listening on %s", listen)
189 | return http.ListenAndServeTLS(listenURL.Host, tlsCert, tlsKey, nil)
190 | default:
191 | return fmt.Errorf("listen address %q is not using http or https", listen)
192 | }
193 | },
194 | }
195 | c.Flags().StringVar(&a.clientID, "client-id", "example-app", "OAuth2 client ID of this application.")
196 | c.Flags().StringVar(&a.clientSecret, "client-secret", "ZXhhbXBsZS1hcHAtc2VjcmV0", "OAuth2 client secret of this application.")
197 | c.Flags().StringVar(&a.redirectURI, "redirect-uri", "http://127.0.0.1:5555/callback", "Callback URL for OAuth2 responses.")
198 | c.Flags().StringVar(&issuerURL, "issuer", "http://127.0.0.1:5556/dex", "URL of the OpenID Connect issuer.")
199 | c.Flags().StringVar(&listen, "listen", "http://127.0.0.1:5555", "HTTP(S) address to listen at.")
200 | c.Flags().StringVar(&tlsCert, "tls-cert", "", "X509 cert file to present when serving HTTPS.")
201 | c.Flags().StringVar(&tlsKey, "tls-key", "", "Private key for the HTTPS cert.")
202 | c.Flags().StringVar(&rootCAs, "issuer-root-ca", "", "Root certificate authorities for the issuer. Defaults to host certs.")
203 | c.Flags().BoolVar(&debug, "debug", false, "Print all request and responses from the OpenID Connect issuer.")
204 | return &c
205 | }
206 |
207 | func main() {
208 | if err := cmd().Execute(); err != nil {
209 | fmt.Fprintf(os.Stderr, "error: %v\n", err)
210 | os.Exit(2)
211 | }
212 | }
213 |
214 | func (a *app) handleIndex(w http.ResponseWriter, r *http.Request) {
215 | renderIndex(w)
216 | }
217 |
218 | func (a *app) oauth2Config(scopes []string) *oauth2.Config {
219 | return &oauth2.Config{
220 | ClientID: a.clientID,
221 | ClientSecret: a.clientSecret,
222 | Endpoint: a.provider.Endpoint(),
223 | Scopes: scopes,
224 | RedirectURL: a.redirectURI,
225 | }
226 | }
227 |
228 | func (a *app) handleLogin(w http.ResponseWriter, r *http.Request) {
229 | var scopes []string
230 | if extraScopes := r.FormValue("extra_scopes"); extraScopes != "" {
231 | scopes = strings.Split(extraScopes, " ")
232 | }
233 | var clients []string
234 | if crossClients := r.FormValue("cross_client"); crossClients != "" {
235 | clients = strings.Split(crossClients, " ")
236 | }
237 | for _, client := range clients {
238 | scopes = append(scopes, "audience:server:client_id:"+client)
239 | }
240 | connectorID := ""
241 | if id := r.FormValue("connector_id"); id != "" {
242 | connectorID = id
243 | }
244 |
245 | authCodeURL := ""
246 | scopes = append(scopes, "openid", "profile", "email")
247 | if r.FormValue("offline_access") != "yes" {
248 | authCodeURL = a.oauth2Config(scopes).AuthCodeURL(exampleAppState)
249 | } else if a.offlineAsScope {
250 | scopes = append(scopes, "offline_access")
251 | authCodeURL = a.oauth2Config(scopes).AuthCodeURL(exampleAppState)
252 | } else {
253 | authCodeURL = a.oauth2Config(scopes).AuthCodeURL(exampleAppState, oauth2.AccessTypeOffline)
254 | }
255 | if connectorID != "" {
256 | authCodeURL = authCodeURL + "&connector_id=" + connectorID
257 | }
258 |
259 | http.Redirect(w, r, authCodeURL, http.StatusSeeOther)
260 | }
261 |
262 | func (a *app) handleCallback(w http.ResponseWriter, r *http.Request) {
263 | var (
264 | err error
265 | token *oauth2.Token
266 | )
267 |
268 | ctx := oidc.ClientContext(r.Context(), a.client)
269 | oauth2Config := a.oauth2Config(nil)
270 | switch r.Method {
271 | case http.MethodGet:
272 | // Authorization redirect callback from OAuth2 auth flow.
273 | if errMsg := r.FormValue("error"); errMsg != "" {
274 | http.Error(w, errMsg+": "+r.FormValue("error_description"), http.StatusBadRequest)
275 | return
276 | }
277 | code := r.FormValue("code")
278 | if code == "" {
279 | http.Error(w, fmt.Sprintf("no code in request: %q", r.Form), http.StatusBadRequest)
280 | return
281 | }
282 | if state := r.FormValue("state"); state != exampleAppState {
283 | http.Error(w, fmt.Sprintf("expected state %q got %q", exampleAppState, state), http.StatusBadRequest)
284 | return
285 | }
286 | token, err = oauth2Config.Exchange(ctx, code)
287 | case http.MethodPost:
288 | // Form request from frontend to refresh a token.
289 | refresh := r.FormValue("refresh_token")
290 | if refresh == "" {
291 | http.Error(w, fmt.Sprintf("no refresh_token in request: %q", r.Form), http.StatusBadRequest)
292 | return
293 | }
294 | t := &oauth2.Token{
295 | RefreshToken: refresh,
296 | Expiry: time.Now().Add(-time.Hour),
297 | }
298 | token, err = oauth2Config.TokenSource(ctx, t).Token()
299 | default:
300 | http.Error(w, fmt.Sprintf("method not implemented: %s", r.Method), http.StatusBadRequest)
301 | return
302 | }
303 |
304 | if err != nil {
305 | http.Error(w, fmt.Sprintf("failed to get token: %v", err), http.StatusInternalServerError)
306 | return
307 | }
308 |
309 | rawIDToken, ok := token.Extra("id_token").(string)
310 | if !ok {
311 | http.Error(w, "no id_token in token response", http.StatusInternalServerError)
312 | return
313 | }
314 |
315 | idToken, err := a.verifier.Verify(r.Context(), rawIDToken)
316 | if err != nil {
317 | http.Error(w, fmt.Sprintf("failed to verify ID token: %v", err), http.StatusInternalServerError)
318 | return
319 | }
320 |
321 | accessToken, ok := token.Extra("access_token").(string)
322 | if !ok {
323 | http.Error(w, "no access_token in token response", http.StatusInternalServerError)
324 | return
325 | }
326 |
327 | var claims json.RawMessage
328 | if err := idToken.Claims(&claims); err != nil {
329 | http.Error(w, fmt.Sprintf("error decoding ID token claims: %v", err), http.StatusInternalServerError)
330 | return
331 | }
332 |
333 | buff := new(bytes.Buffer)
334 | if err := json.Indent(buff, []byte(claims), "", " "); err != nil {
335 | http.Error(w, fmt.Sprintf("error indenting ID token claims: %v", err), http.StatusInternalServerError)
336 | return
337 | }
338 |
339 | renderToken(w, a.redirectURI, rawIDToken, accessToken, token.RefreshToken, buff.String())
340 | }
341 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Kubernetes + Dex + LDAP Integration
2 |
3 | A simple walk-through guide for how to integrate `Kubernetes` with `Dex` + `LDAP`.
4 |
5 | In this experiment, we're going to use these major components:
6 |
7 | - Kubernetes v1.21.x, powered by [`kind` v0.11.1](https://kind.sigs.k8s.io/);
8 | - [Dex](https://github.com/dexidp/dex) v2.30.x;
9 | - [OpenLDAP](https://www.openldap.org/) with [osixia/openldap:1.5.x](https://github.com/osixia/docker-openldap)
10 |
11 | A Medium article was posted too, here: https://brightzheng100.medium.com/kubernetes-dex-ldap-integration-f305292a16b9
12 |
13 | The overall idea can be illustrated as below:
14 |
15 | 
16 |
17 | ## Get Started
18 |
19 | ```sh
20 | git clone https://github.com/brightzheng100/kubernetes-dex-ldap-integration.git
21 | cd kubernetes-dex-ldap-integration
22 | ```
23 |
24 | ## The TL;DR Guide
25 |
26 | ### Setup
27 |
28 | The TD;DR guide uses the script here: [setup.sh](setup.sh), which will:
29 | 1. check the required tools -- there are some of them: `docker`, `git`, `cfssl`, `cfssljson`, `kind`, `kubectl`;
30 | 2. generate the necessary TLS certs/keys for both Kubernetes and Dex;
31 | 3. create `kind`-powered Kubernetes with OIDC configured with Dex;
32 | 4. deploy OpenLDAP in namespace `ldap` as the LDAP Server with some dummy entities;
33 | 5. deploy Dex in namespace `dex`;
34 | 6. create a proxy so that we can access Dex from our laptop (e.g. my MBP)
35 |
36 | ```sh
37 | ./setup.sh
38 | ```
39 |
40 | > Note: the populated dummy LDAP entities, all with password `secret`, include:
41 | > - `admin1@example.org`
42 | > - `admin2@example.org`
43 | > - `developer1@example.org`
44 | > - `developer2@example.org`
45 |
46 | ### Use
47 |
48 | It's common to set up the kube config, e.g. `~/.kube/config`, for daily use.
49 |
50 | For that, we may simply follow these steps:
51 |
52 | 1. Bind some users, say **"admin1@example.org"**, as the **"cluster-admin"**
53 |
54 | ```sh
55 | $ kubectl create clusterrolebinding oidc-cluster-admin \
56 | --clusterrole=cluster-admin \
57 | --user="admin1@example.org"
58 | ```
59 |
60 | 2. Use [`kubelogin`](https://github.com/int128/kubelogin) plugin to simplify the integration
61 |
62 | ```sh
63 | $ echo "127.0.0.1 dex.dex.svc" | sudo tee -a /etc/hosts
64 |
65 | $ SVC_PORT="$(kubectl get -n dex svc/dex -o json | jq '.spec.ports[0].nodePort')"
66 | $ kubectl config set-credentials oidc \
67 | --exec-api-version=client.authentication.k8s.io/v1beta1 \
68 | --exec-command=kubectl \
69 | --exec-arg=oidc-login \
70 | --exec-arg=get-token \
71 | --exec-arg=--oidc-issuer-url=https://dex.dex.svc:$SVC_PORT \
72 | --exec-arg=--oidc-redirect-url-hostname=dex.dex.svc \
73 | --exec-arg=--oidc-client-id=example-app \
74 | --exec-arg=--oidc-client-secret=ZXhhbXBsZS1hcHAtc2VjcmV0 \
75 | --exec-arg=--oidc-extra-scope=email \
76 | --exec-arg=--certificate-authority=`pwd`/tls-setup/_certs/ca.pem
77 | ```
78 |
79 | 3. Use the user to access Kubernetes
80 |
81 | ```sh
82 | $ kubectl --user=oidc get nodes
83 | ```
84 |
85 | This will prompt us a authentication UI in our default browser, key in the credential of abovementioned LDAP user:
86 | - Email Address: `admin1@example.org`
87 | - Password: `secret`
88 |
89 | 
90 |
91 | It will be authenticated by Dax + LDAP, and once the authentication is done we can see the output like:
92 |
93 | ```
94 | $ kubectl --user=oidc get nodes
95 | NAME STATUS ROLES AGE VERSION
96 | dex-ldap-cluster-control-plane Ready control-plane,master 8m55s v1.21.1
97 | dex-ldap-cluster-worker Ready