├── tsi-version.txt ├── examples ├── spire-sidecar │ ├── version.txt │ ├── nodejs │ │ ├── .dockerignore │ │ ├── Dockerfile │ │ ├── package.json │ │ ├── .gitignore │ │ └── index.js │ ├── sidecar │ │ ├── requirements.txt │ │ ├── .env.test │ │ ├── Dockerfile │ │ ├── run-sidecar-bash.sh │ │ └── run-sidecar-python.py │ ├── config │ │ ├── config.ini │ │ ├── config.json │ │ ├── app-python.yaml │ │ ├── app-node.yaml │ │ ├── sidecar.yaml │ │ ├── docker-compose.yml │ │ ├── db-node.yaml │ │ ├── apps.bash.yaml │ │ ├── apps.python.yaml │ │ └── vault-oidc.sh │ ├── python │ │ ├── requirements.txt │ │ ├── Dockerfile │ │ ├── .gitignore │ │ └── main.py │ ├── db-scripts │ │ ├── data.sql │ │ └── schema.sql │ └── Makefile ├── .gitignore ├── spire │ ├── mars-spaceX1.yaml │ ├── mars.txt │ ├── Makefile │ ├── mars.yaml │ ├── mars-spaceX.yaml │ ├── sidecar.yaml │ ├── demo.mars-s3.sh │ ├── mars-demo.yaml │ ├── Dockerfile.busybox.mars │ ├── demo.mars-vault.sh │ ├── Dockerfile.spire │ ├── Dockerfile.mars │ └── vault-oidc.sh ├── vault │ ├── README.md │ ├── ingress.IKS.template.yaml │ └── vault.yaml ├── keycloak │ ├── ingress.IKS.template.yaml │ ├── ingress.template.yaml │ ├── ingress.keycloak.yaml │ ├── myubuntu.id.yaml │ └── keycloak.yaml └── vault-client │ ├── Dockerfile.custom │ ├── makefile │ ├── Dockerfile │ ├── setup-vault-cli.sh │ └── vault-cli.template.yaml ├── .dockerignore ├── docs ├── imgs │ ├── CNCF.stack.jpg │ ├── Multi-cloud.jpg │ ├── multi_cluster.jpg │ ├── single_cluster_local.jpg │ ├── CNCF.stack.description.jpg │ └── single_cluster_openshift.jpg ├── ppt │ └── Secure Supply Chain.SPIRE.pptx ├── README.md ├── x509-agent.md ├── spire-keylime-attestion.md ├── vault.md ├── keycloak.md ├── spire-oidc-tutorial.md └── bootstrap.md ├── charts ├── tornjak │ ├── Chart.yaml │ └── templates │ │ ├── account.tpl │ │ ├── spire-secret.tpl │ │ ├── server-oidc-service.tpl │ │ ├── _helpers.tpl │ │ ├── spire-roles.tpl │ │ ├── tornjak-config.tpl │ │ ├── oidc-dp-configmap.tpl │ │ ├── server-service.tpl │ │ ├── ingress.tpl │ │ ├── NOTES.txt │ │ └── server-configmap.tpl └── spire │ ├── Chart.yaml │ ├── templates │ ├── k8s-workload-registrar-account.tpl │ ├── account.tpl │ ├── crd_role_binding.yaml │ ├── agent-cluster-role.tpl │ ├── crd_role.yaml │ ├── k8s-workload-registrar-configmap.tpl │ ├── agent-configmap.tpl │ ├── k8s-workload-registrar-deploy.tpl │ ├── k8s-workload-registrar-roles.tpl │ ├── NOTES.txt │ ├── agent-daemonset.tpl │ └── spiffeid.spiffe.io_spiffeids.tpl │ ├── values.yaml │ └── ext │ └── spiffeid.spiffe.io_spiffeids.yaml ├── components └── tsi-util │ ├── vault-tpl │ ├── tsi-policy.r.hcl.tpl │ ├── tsi-policy.ri.hcl.tpl │ ├── tsi-policy.rcn.hcl.tpl │ └── tsi-policy.rcni.hcl.tpl │ ├── Makefile │ ├── Dockerfile │ ├── getClusterInfo.sh │ ├── load-sample-policies.sh │ ├── register-JSS.sh │ └── vault-setup.sh ├── sample-x509 ├── root.cert.pem ├── intermediate.cert.pem ├── node1.cert.pem ├── node2.cert.pem ├── node3.cert.pem ├── node1.key.pem ├── node2.key.pem ├── root.key.pem ├── node3.key.pem ├── intermediate.key.pem ├── node1-bundle.cert.pem ├── node2-bundle.cert.pem ├── node3-bundle.cert.pem └── generate.go ├── .goreleaser.yml ├── .gitignore ├── utils ├── get-cluster-info.sh ├── spire.db.clean.yaml ├── createKeys.sh ├── deployKeys_keylime.sh └── x509-conf │ ├── createNodeScript.sh │ └── intermediate-config.txt ├── sample-keys ├── openssl.cnf ├── cert.pem └── key.pem ├── MAINTAINERS.md └── README.md /tsi-version.txt: -------------------------------------------------------------------------------- 1 | v1.0.2 2 | -------------------------------------------------------------------------------- /examples/spire-sidecar/version.txt: -------------------------------------------------------------------------------- 1 | v0.1 2 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # all but the binary 2 | **/* 3 | !ti-webhook 4 | -------------------------------------------------------------------------------- /examples/spire-sidecar/nodejs/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log -------------------------------------------------------------------------------- /examples/spire-sidecar/sidecar/requirements.txt: -------------------------------------------------------------------------------- 1 | python-decouple==3.5 2 | requests==2.26 -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | # Do not sync vendor 2 | */vendor/ 3 | */bin/ 4 | */pkg/ 5 | *.hcl 6 | -------------------------------------------------------------------------------- /docs/imgs/CNCF.stack.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/trusted-service-identity/HEAD/docs/imgs/CNCF.stack.jpg -------------------------------------------------------------------------------- /docs/imgs/Multi-cloud.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/trusted-service-identity/HEAD/docs/imgs/Multi-cloud.jpg -------------------------------------------------------------------------------- /docs/imgs/multi_cluster.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/trusted-service-identity/HEAD/docs/imgs/multi_cluster.jpg -------------------------------------------------------------------------------- /examples/spire-sidecar/config/config.ini: -------------------------------------------------------------------------------- 1 | [mysql] 2 | host=db 3 | port=3306 4 | db=testdb 5 | user=root 6 | passwd=testroot 7 | -------------------------------------------------------------------------------- /docs/imgs/single_cluster_local.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/trusted-service-identity/HEAD/docs/imgs/single_cluster_local.jpg -------------------------------------------------------------------------------- /docs/imgs/CNCF.stack.description.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/trusted-service-identity/HEAD/docs/imgs/CNCF.stack.description.jpg -------------------------------------------------------------------------------- /docs/imgs/single_cluster_openshift.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/trusted-service-identity/HEAD/docs/imgs/single_cluster_openshift.jpg -------------------------------------------------------------------------------- /docs/ppt/Secure Supply Chain.SPIRE.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/trusted-service-identity/HEAD/docs/ppt/Secure Supply Chain.SPIRE.pptx -------------------------------------------------------------------------------- /charts/tornjak/Chart.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | appVersion: "0.1" 4 | description: SPIRE Server for TSI Example 5 | name: tornjak 6 | version: 1.0.2 7 | -------------------------------------------------------------------------------- /charts/spire/Chart.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | appVersion: "0.1" 4 | description: SPIRE Agent and Workload Registrar Example 5 | name: spire 6 | version: 1.0.2 7 | -------------------------------------------------------------------------------- /charts/spire/templates/k8s-workload-registrar-account.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: spire-k8s-registrar 5 | namespace: {{ .Values.namespace }} 6 | -------------------------------------------------------------------------------- /examples/spire-sidecar/sidecar/.env.test: -------------------------------------------------------------------------------- 1 | SOCKETFILE=/run/spire/sockets/agent.sock 2 | CFGDIR=/run/db 3 | ROLE=dbrole 4 | VAULT_ADDR=http://tsi-vault.my-cluster-0123456789-0000.eu-de.containers.appdomain.cloud -------------------------------------------------------------------------------- /examples/spire-sidecar/python/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==2.0.1 2 | Flask-SQLAlchemy==2.5.1 3 | SQLAlchemy==1.4.25 4 | Flask-Migrate==3.1.0 5 | Flask-Script==2.0.6 6 | Flask-Cors==3.0.10 7 | requests==2.26.0 8 | mysqlclient==2.0.3 -------------------------------------------------------------------------------- /examples/spire-sidecar/python/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9 2 | ENV PYTHONUNBUFFERED 1 3 | WORKDIR /app 4 | COPY ./python/requirements.txt /app/requirements.txt 5 | RUN pip install -r requirements.txt 6 | COPY ./python /app 7 | 8 | CMD python main.py -------------------------------------------------------------------------------- /examples/spire-sidecar/db-scripts/data.sql: -------------------------------------------------------------------------------- 1 | USE testdb; 2 | 3 | insert into MOVIE(id, name, year, director, genre) values 4 | (1, "Bruce Almighty", 2003, "Tom Shaydac", "Comedy"), 5 | (2, "The Godfather", 1972, "Francis Ford Coppola", "Crime") 6 | ; -------------------------------------------------------------------------------- /components/tsi-util/vault-tpl/tsi-policy.r.hcl.tpl: -------------------------------------------------------------------------------- 1 | # This policy template controls the path using: 2 | # * region 3 | 4 | path "secret/data/tsi-r/{{identity.entity.aliases.<%MOUNT_ACCESSOR%>.metadata.region}}/*" { 5 | capabilities = ["read"] 6 | } 7 | -------------------------------------------------------------------------------- /examples/spire-sidecar/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "host" : "db", 3 | "port" : "3306", 4 | "user" : "root", 5 | "password" : "testroot", 6 | "database" : "testdb", 7 | "multipleStatements": true, 8 | "debug": false 9 | } -------------------------------------------------------------------------------- /charts/spire/templates/account.tpl: -------------------------------------------------------------------------------- 1 | {{- if not .Values.openShift }} 2 | # apiVersion: v1 3 | # kind: Namespace 4 | #metadata: 5 | # name: {{ .Values.namespace }} 6 | #--- 7 | apiVersion: v1 8 | kind: ServiceAccount 9 | metadata: 10 | name: spire-agent 11 | namespace: {{ .Values.namespace }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /charts/tornjak/templates/account.tpl: -------------------------------------------------------------------------------- 1 | {{- if not .Values.openShift }} 2 | # apiVersion: v1 3 | # kind: Namespace 4 | #metadata: 5 | # name: {{ .Values.namespace }} 6 | #--- 7 | apiVersion: v1 8 | kind: ServiceAccount 9 | metadata: 10 | name: spire-server 11 | namespace: {{ .Values.namespace }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /examples/spire-sidecar/nodejs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:stretch-slim 2 | WORKDIR /usr/src/app 3 | COPY ./nodejs/package*.json ./ 4 | 5 | RUN npm install 6 | # If you are building your code for production 7 | # RUN npm ci --only=production 8 | 9 | COPY ./nodejs . 10 | 11 | EXPOSE 8080 12 | 13 | CMD [ "node", "index.js" ] -------------------------------------------------------------------------------- /components/tsi-util/vault-tpl/tsi-policy.ri.hcl.tpl: -------------------------------------------------------------------------------- 1 | # This policy template controls the path using: 2 | # * region 3 | # * images 4 | 5 | path "secret/data/tsi-ri/{{identity.entity.aliases.<%MOUNT_ACCESSOR%>.metadata.region}}/{{identity.entity.aliases.<%MOUNT_ACCESSOR%>.metadata.images}}/*" { 6 | capabilities = ["read"] 7 | } 8 | -------------------------------------------------------------------------------- /charts/tornjak/templates/spire-secret.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: spire-secret 5 | namespace: {{ .Values.namespace }} 6 | type: Opaque 7 | data: 8 | bootstrap.key: |- 9 | {{ .Files.Get "key.pem" | b64enc | indent 4 }} 10 | bootstrap.crt: |- 11 | {{ .Files.Get "cert.pem" | b64enc | indent 4 }} 12 | -------------------------------------------------------------------------------- /examples/spire-sidecar/db-scripts/schema.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE testdb; 2 | USE testdb; 3 | 4 | 5 | DROP TABLE IF EXISTS MOVIE; 6 | 7 | CREATE TABLE MOVIE( 8 | id int(11) NOT NULL AUTO_INCREMENT, 9 | name varchar(20), 10 | year int(11), 11 | director varchar(20), 12 | genre varchar(20), 13 | PRIMARY KEY (id)); -------------------------------------------------------------------------------- /charts/spire/templates/crd_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: spiffe-crd-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: spiffe-crd-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: spire-k8s-registrar 12 | namespace: {{ .Values.namespace }} 13 | -------------------------------------------------------------------------------- /charts/tornjak/templates/server-oidc-service.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Values.oidc.enable }} 2 | # Service definition for the admission webhook 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: spire-oidc 7 | namespace: {{ .Values.namespace }} 8 | spec: 9 | type: LoadBalancer 10 | selector: 11 | app: spire-server 12 | ports: 13 | - name: https 14 | port: 443 15 | targetPort: nginx-oidc-port 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /examples/spire-sidecar/nodejs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nodetest", 3 | "version": "1.0.0", 4 | "description": "nodejs example", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1", 8 | "start": "node index.js" 9 | }, 10 | "author": "", 11 | "license": "ISC", 12 | "dependencies": { 13 | "body-parser": "^1.20.1", 14 | "express": "^4.18.2", 15 | "mysql": "^2.18.1", 16 | "mysql2": "^2.3.0" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /components/tsi-util/vault-tpl/tsi-policy.rcn.hcl.tpl: -------------------------------------------------------------------------------- 1 | # This policy template controls the path using: 2 | # * region 3 | # * cluster-name 4 | # * namespace 5 | 6 | path "secret/data/tsi-rcn/{{identity.entity.aliases.<%MOUNT_ACCESSOR%>.metadata.region}}/{{identity.entity.aliases.<%MOUNT_ACCESSOR%>.metadata.cluster-name}}/{{identity.entity.aliases.<%MOUNT_ACCESSOR%>.metadata.namespace}}/*" { 7 | # list allows listing the secrets: 8 | # capabilities = ["read", "list"] 9 | capabilities = ["read"] 10 | } 11 | -------------------------------------------------------------------------------- /components/tsi-util/vault-tpl/tsi-policy.rcni.hcl.tpl: -------------------------------------------------------------------------------- 1 | # This policy template controls the path using: 2 | # * region 3 | # * cluster-name 4 | # * namespace 5 | # * images 6 | 7 | path "secret/data/tsi-rcni/{{identity.entity.aliases.<%MOUNT_ACCESSOR%>.metadata.region}}/{{identity.entity.aliases.<%MOUNT_ACCESSOR%>.metadata.cluster-name}}/{{identity.entity.aliases.<%MOUNT_ACCESSOR%>.metadata.namespace}}/{{identity.entity.aliases.<%MOUNT_ACCESSOR%>.metadata.images}}/*" { 8 | capabilities = ["read"] 9 | } 10 | -------------------------------------------------------------------------------- /examples/spire/mars-spaceX1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mars-mission1 5 | labels: 6 | app: mars-mission1 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: mars-mission1 12 | template: 13 | metadata: 14 | labels: 15 | identity_template: "true" 16 | app: mars-mission1 17 | spec: 18 | containers: 19 | - name: mars-mission1-main 20 | image: us.gcr.io/scytale-registry/aws-cli:latest 21 | command: ["sleep"] 22 | args: ["1000000000"] 23 | -------------------------------------------------------------------------------- /sample-x509/root.cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBZjCB8aADAgECAgEBMA0GCSqGSIb3DQEBCwUAMAAwIhgPMDAwMTAxMDEwMDAw 3 | MDBaGA85OTk5MTIzMTIzNTk1OVowADB8MA0GCSqGSIb3DQEBAQUAA2sAMGgCYQDd 4 | //BGAUkGgLOzuNfAQC+hdMB8DCmttKgPxVzU29UDd/KL/5jMoGe73zc+57SuKhNZ 5 | Oynnuig1gDLxrGqkAa7Nk6TciIOOdn1EgoMDDko/hxYDX577h4AGdbmHRFkeLg0C 6 | AwEAAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUd1K2rS7hSd1qdMop 7 | GvXcSVRsaYMwDQYJKoZIhvcNAQELBQADYQC4wTzT1fqQxGugUfgqEWUO5kcLlZtn 8 | Mc2CgIL0wvRy7K4r+qVOcQy4XauTK7gYm+O2HSJsdN9bHD7LqwD0dC+B4/AM4Lvs 9 | +n0wiyGQ0zjmxXmrfiOCJdF2ApHEu4V4Rjo= 10 | -----END CERTIFICATE----- 11 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # .goreleaser.yml 2 | release: 3 | github: 4 | owner: kompass 5 | name: TI-KeyRelease 6 | 7 | # Github enterprise 8 | github_urls: 9 | api: https://github.ibm.com/api/v3/ 10 | upload: https://github.ibm.com/api/uploads/ 11 | download: https://github.ibm.com/ 12 | 13 | # Build customization 14 | builds: 15 | - main: main.go 16 | binary: ti-webhook 17 | env: 18 | - CGO_ENABLED=0 19 | goos: 20 | - linux 21 | goarch: 22 | - amd64 23 | dockers: 24 | - image: kompass/TI-KeyRelease 25 | # Archive customization 26 | archive: 27 | format: tar.gz 28 | -------------------------------------------------------------------------------- /sample-x509/intermediate.cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBZjCB8aADAgECAgEBMA0GCSqGSIb3DQEBCwUAMAAwIhgPMDAwMTAxMDEwMDAw 3 | MDBaGA85OTk5MTIzMTIzNTk1OVowADB8MA0GCSqGSIb3DQEBAQUAA2sAMGgCYQC1 4 | yLxOtIjaKFp5KDH4fBYl2l4pCrHJ7z32Hd+vd1LbuPM7FCCmZup8F8WMS9VyfWWX 5 | pjIT9uf5v3tlPqFZl0gpVdwkoFid2572tKFc7amoXiIJjzPhceLW7U0zneBwcV8C 6 | AwEAAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU3HH4tTA6cIcbq/KJ 7 | pLpfJCzQ4pgwDQYJKoZIhvcNAQELBQADYQBw68JTMAr0iqmbjq72JLGPEQj2Y2On 8 | 1jelBhsi5YddjRCXvToueO65w4BMrfJ86CGyTIgZ+Ne2jxC4ojfvNRBDl4mDivN5 9 | udGTxbUWToNmXtKGw/pXglhYQi9H5vN4Se4= 10 | -----END CERTIFICATE----- 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | *.pyc 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 15 | .glide/ 16 | 17 | # Do not sync vendor 18 | vendor/ 19 | 20 | charts/ti-key-release-2/requirements.lock 21 | charts/ti-key-release-2/charts/* 22 | 23 | # remove mac files 24 | *.DS_Store 25 | charts/ti-key-release-1/Chart.yaml 26 | charts/ti-key-release-2/Chart.yaml 27 | /db/**/* 28 | -------------------------------------------------------------------------------- /examples/vault/README.md: -------------------------------------------------------------------------------- 1 | # Vault Experiments Setup 2 | To use Vault examples, please refer to [Vault Setup documentation](/docs/vault.md) 3 | 4 | Demos and Examples: 5 | * [OIDC Tutorial with Vault](/docs/spire-oidc-vault.md) 6 | * [Sidecar with Vault example](/examples/spire-sidecar) 7 | 8 | ## Secrets 9 | Before secrets can be delivered to the application container, they need to be first injected 10 | into Vault. Please follow the steps as outline in the 11 | [vault tutorial](/docs/spire-oidc-vault.md#configure-a-vault-instance) 12 | or in the 13 | [sidecar tutorial](/examples/spire-sidecar#pushing-the-db-credentials-to-vault) 14 | -------------------------------------------------------------------------------- /sample-x509/node1.cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBhDCCAQ6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAAMCIYDzAwMDEwMTAxMDAw 3 | MDAwWhgPOTk5OTEyMzEyMzU5NTlaMBwxGjAYBgNVBAMTEXNvbWUgY29tbW9uIG5h 4 | bWUxMHwwDQYJKoZIhvcNAQEBBQADawAwaAJhANk1T2Yu8aLUYlqx6k/HVBCAMFSr 5 | rAQwHgBIADLKdT81jb+AGSFNzXNaPL/yPb172243qjNS1jCiqZhLUBYF1eDzRTGy 6 | 2DS1FdbiS16AxNbQbpSRcggJLf6tyagoWjuIzQIDAQABozMwMTAOBgNVHQ8BAf8E 7 | BAMCB4AwHwYDVR0jBBgwFoAU3HH4tTA6cIcbq/KJpLpfJCzQ4pgwDQYJKoZIhvcN 8 | AQELBQADYQCw8TbWkO0KOCcrltMMH1IPsHhCY0qC5cFwUbj3mCepoA7Qkrv79RLQ 9 | BtC7HxLg31sqw1CF/oH/2w5to0jveT2FGwV4pBoeCVKdeBEe2zPiSb5nvURuJmv/ 10 | 2resFWVSydo= 11 | -----END CERTIFICATE----- 12 | -------------------------------------------------------------------------------- /sample-x509/node2.cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBhDCCAQ6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAAMCIYDzAwMDEwMTAxMDAw 3 | MDAwWhgPOTk5OTEyMzEyMzU5NTlaMBwxGjAYBgNVBAMTEXNvbWUgY29tbW9uIG5h 4 | bWUyMHwwDQYJKoZIhvcNAQEBBQADawAwaAJhALRajNdmaf57BA8a+bfCzbzjdVMd 5 | dvFPuGx+7r7RZYAIHw7XJm+iOb50N19FNCnabC3P5NcieYLWrEH4tHJV7qUY6dNr 6 | M1RbTAUp8hlnPXl1C8aiJwuaQVfa4Uc82RGyLQIDAQABozMwMTAOBgNVHQ8BAf8E 7 | BAMCB4AwHwYDVR0jBBgwFoAU3HH4tTA6cIcbq/KJpLpfJCzQ4pgwDQYJKoZIhvcN 8 | AQELBQADYQAEPn6rFL+JliOE3PPQzLRIJeFvtaATPQSskINm67gZGj0BScBYckoH 9 | 7GLjueGOV1+C7uiRtXTV4U+olaqteLFD2b+4OPNmNXV1cwrQ8p+6a4zhKYGOUNwF 10 | lDAXtLSCIqM= 11 | -----END CERTIFICATE----- 12 | -------------------------------------------------------------------------------- /sample-x509/node3.cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBhDCCAQ6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAAMCIYDzAwMDEwMTAxMDAw 3 | MDAwWhgPOTk5OTEyMzEyMzU5NTlaMBwxGjAYBgNVBAMTEXNvbWUgY29tbW9uIG5h 4 | bWUzMHwwDQYJKoZIhvcNAQEBBQADawAwaAJhAN8CEtDhU7MTxWeqE3bw/EF15rTq 5 | AABD3/BhgUpvowebqfdiofsUwi2s9azPknbyBSQ9cB4kIUQFOYymOCiEPprMIfna 6 | okyv8qXhX6py7tScOjP345YcSszXhnvgAoxpOQIDAQABozMwMTAOBgNVHQ8BAf8E 7 | BAMCB4AwHwYDVR0jBBgwFoAU3HH4tTA6cIcbq/KJpLpfJCzQ4pgwDQYJKoZIhvcN 8 | AQELBQADYQACZp6kJUjmEv1qq2Yipw5F5yYwffHM1YezZHpaxlJBZ4K4uKhBeGTS 9 | LvGx9Bi+NDpt5FA5RABuaqcAJ7LxrQVGIwseqRZhLrsB5QkK7yD1RztuslWX7pA3 10 | cvFpzejwtFk= 11 | -----END CERTIFICATE----- 12 | -------------------------------------------------------------------------------- /utils/get-cluster-info.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | SCRIPT_PATH=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P ) 3 | TSI_VERSION=$(cat ${SCRIPT_PATH}/../tsi-version.txt) 4 | 5 | docker_cmd="docker --version" 6 | if [[ ! $(eval ${docker_cmd}) ]]; then 7 | echo "docker installation required to proceed" 8 | echo "(https://docs.docker.com/get-docker/)" 9 | exit 1 10 | fi 11 | 12 | TEMPDIR=$(mktemp -d /tmp/tsi.XXX) 13 | 14 | CLUSTERINFO="${TEMPDIR}/clusterinfo.$$" 15 | kubectl get cm -n kube-system cluster-info -o yaml > ${CLUSTERINFO} 16 | 17 | docker run --rm -v ${CLUSTERINFO}:/tmp/clusterinfo \ 18 | docker.io/tsidentity/tsi-util:${TSI_VERSION} /usr/local/bin/getClusterInfo.sh /tmp/clusterinfo 19 | 20 | rm ${CLUSTERINFO} 21 | -------------------------------------------------------------------------------- /examples/keycloak/ingress.IKS.template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: keycloak-ingress 5 | spec: 6 | rules: 7 | # provide the actual Ingress for `host` value: 8 | # use the following command to get the subdomain: 9 | # ibmcloud ks cluster get --cluster | grep Ingress 10 | # any prefix can be added (e.g.): 11 | # host: tsi-keycloak.tsi-fra-8abee0d19746a818fd9d58aa25c34ecfe-0000.eu-de.containers.appdomain.cloud 12 | - host: 13 | http: 14 | paths: 15 | - pathType: Prefix 16 | path: "/" 17 | backend: 18 | service: 19 | name: tsi-keycloak 20 | port: 21 | number: 9090 22 | -------------------------------------------------------------------------------- /examples/vault/ingress.IKS.template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: vault-ingress 5 | namespace: tsi-vault 6 | spec: 7 | rules: 8 | # provide the actual Ingress for `host` value: 9 | # use the following command to get the subdomain: 10 | # ibmcloud ks cluster get --cluster | grep Ingress 11 | # any prefix can be added (e.g.): 12 | # host: tsi-vault.my-tsi-cluster-xxxxxxxxxxx-0000.eu-de.containers.appdomain.cloud 13 | - host: 14 | http: 15 | paths: 16 | - pathType: Prefix 17 | path: "/" 18 | backend: 19 | service: 20 | name: tsi-vault 21 | port: 22 | number: 8200 23 | -------------------------------------------------------------------------------- /charts/tornjak/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Create URL for accessing Tornjak Backend 3 | */}} 4 | {{- define "tornjak.apiURL" -}} 5 | {{- if .Values.tornjak.config.backend.ingress }} 6 | {{- $url := print "http://" .Values.tornjak.config.backend.ingress }} 7 | {{- $url }} 8 | {{- else }} 9 | {{- default .Values.tornjak.config.frontend.apiServerURL }} 10 | {{- end }} 11 | {{- end }} 12 | 13 | {{/* 14 | Create URL for accessing Tornjak Frontend 15 | */}} 16 | {{- define "tornjak.FrontendURL" -}} 17 | {{- if .Values.tornjak.config.frontend.ingress }} 18 | {{- $feurl := print "http://" .Values.tornjak.config.frontend.ingress }} 19 | {{- $feurl }} 20 | {{- else }} 21 | {{- $feurl := print "http://localhost:3000" }} 22 | {{- $feurl }} 23 | {{- end }} 24 | {{- end }} -------------------------------------------------------------------------------- /examples/keycloak/ingress.template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: keycloak-ingress 5 | spec: 6 | rules: 7 | # provide the actual Ingress for `host` value: 8 | # use the following command to get the subdomain: 9 | # ibmcloud ks cluster get --cluster | grep Ingress 10 | # any prefix can be added (e.g.): 11 | # host: tsi-keycloak.tsi-fra-8abee0d19746a818fd9d58aa25c34ecfe-0000.eu-de.containers.appdomain.cloud 12 | - host: 13 | http: 14 | paths: 15 | - pathType: Prefix 16 | path: "/" 17 | backend: 18 | service: 19 | name: tsi-keycloak 20 | port: 21 | # number: 9090 22 | number: 8080 23 | -------------------------------------------------------------------------------- /sample-x509/node1.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIB5AIBADANBgkqhkiG9w0BAQEFAASCAc4wggHKAgEAAmEA2TVPZi7xotRiWrHq 3 | T8dUEIAwVKusBDAeAEgAMsp1PzWNv4AZIU3Nc1o8v/I9vXvbbjeqM1LWMKKpmEtQ 4 | FgXV4PNFMbLYNLUV1uJLXoDE1tBulJFyCAkt/q3JqChaO4jNAgMBAAECYQC4s6V1 5 | 3ftqHDIar2nxNy3b7jWq+mSHFyeb054fkpvLCU4lBtfXwyIushAwpNwtcdCfyNO9 6 | gLOTgXq+BkOOLy8HsODkBt6CHQk6Wg7kCgQm6KblHVmgoBIjrE628WNnch0CMQD3 7 | 5OlJ727L53q7oDx2flgER0KCu2kdFR45VDU6LP4F0PSI8dtw2gNCc6OIdg9rOa8C 8 | MQDgT4fvXNR6Z2ANm+ySBm1fEDbAxc02Otv4El50b73v7GG9zkGj4L69UYGCR84X 9 | kEMCMCfE77BO20WXI1eKg2i3KEO3cvIqQtjJlIm5+rgUk8Q3G6QEuSHHuBInHys7 10 | VYnsPQIwb8DP3b4nEfCs1zrmyv2uwdznGnWxvqHmc5pbOrFtNLpVG+ZFp6rR/Ko9 11 | 1rWFhRHXAjBrSZZva+lyjQVg+ZNvyPbrMx8CtCsLtYfPVmaK4OHgyj20V950AHVy 12 | fLmeD2WQTuI= 13 | -----END PRIVATE KEY----- 14 | -------------------------------------------------------------------------------- /sample-x509/node2.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIB5QIBADANBgkqhkiG9w0BAQEFAASCAc8wggHLAgEAAmEAtFqM12Zp/nsEDxr5 3 | t8LNvON1Ux128U+4bH7uvtFlgAgfDtcmb6I5vnQ3X0U0KdpsLc/k1yJ5gtasQfi0 4 | clXupRjp02szVFtMBSnyGWc9eXULxqInC5pBV9rhRzzZEbItAgMBAAECYB/B4hL7 5 | waZB2xEd2uL0tm2QT9R2177eHsdcAoYvMQQ+admfIPF/S1Ox+x4XfXvxLdc2nyMf 6 | uGWFk0auZrhJJXJV0Ctqylc7fVcduxzGRZtE9n9R33TL2TXo8QhKIt1gsQIxAMDu 7 | g62tC6vqVEf/wIS6HA3HUH+LlgPqhiQY2fh2ESDbD9gQTX68qLD3T0XNOnzb9wIx 8 | AO9PcoPnpDTZg6DYcd88klvLSZ3QDAKrDx3IMYo9V/gtsDwdomL/iYcpsb7QiWJx 9 | +wIwfP8K2Q7wLOWYzTzpohRh8JtdkAUDoG3lkqV3Mv5rgnnq/fUyABaCxIS2G2cn 10 | qZVpAjEAxJZiP0NyHEw3GlHUMYuju66i2fZHd4WoVFHZiHNeKaz2olvBw0uiYZt5 11 | vYO36FwzAjEAtE7UL8x4XolVnHpL6jfos/1zcmP3pqaU4s3hpWgbipM6nihk24Tc 12 | 1Tp+C2MSx5DJ 13 | -----END PRIVATE KEY----- 14 | -------------------------------------------------------------------------------- /sample-x509/root.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIB5AIBADANBgkqhkiG9w0BAQEFAASCAc4wggHKAgEAAmEA3f/wRgFJBoCzs7jX 3 | wEAvoXTAfAwprbSoD8Vc1NvVA3fyi/+YzKBnu983Pue0rioTWTsp57ooNYAy8axq 4 | pAGuzZOk3IiDjnZ9RIKDAw5KP4cWA1+e+4eABnW5h0RZHi4NAgMBAAECYCx/MFnX 5 | yBtVkK9lTVlrhCCIlkxG49O9cjI3DPRdW8TmNed5w9bAEArIIvqo1r5SEE8oo33F 6 | UUQTI76vPSkzx84NOTdZJFdxWttlWK5YCmefNPtEanZA5Ya/zXnagWLewQIxAPTB 7 | 33+69wwzcoqZqKli+it2KKBaTpXVL2gH+uNA9jROOR3OLuVEzg0i72/ZLzKxywIx 8 | AOgydP/9ptgkYAQreNmMmcgPAAst+qaDA2ab0QRZKaTCcH4snCgf2qwzho0hC2nE 9 | hwIwInth0mvzvCtPWsl1IpAIMLFP3e97HM7RA/YD2ZpgVVQj5dgTcUd1RBKE6xwM 10 | 8zZrAjBBdO/stmnRZre2lTa1RWCYuDnP7P9K74YG6AgNTMFjk/i4aQUAfrJbdw8I 11 | O/eGsoMCMQCxiJZwhLOnNugwHtJPWLij3sz4tVTjbPCZz2hHbgEp9ydCVmWAXLAv 12 | ORZ3WX5c29c= 13 | -----END PRIVATE KEY----- 14 | -------------------------------------------------------------------------------- /sample-x509/node3.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIB5gIBADANBgkqhkiG9w0BAQEFAASCAdAwggHMAgEAAmEA3wIS0OFTsxPFZ6oT 3 | dvD8QXXmtOoAAEPf8GGBSm+jB5up92Kh+xTCLaz1rM+SdvIFJD1wHiQhRAU5jKY4 4 | KIQ+mswh+dqiTK/ypeFfqnLu1Jw6M/fjlhxKzNeGe+ACjGk5AgMBAAECYQCNkp5p 5 | VrKxG9sVTWZf+urZj8Svk2H+yRgwXWYb4DVKTisfrtEQOYRwRL4OQBRGSKuFiigD 6 | +NBns+IIhuy7XqwLJvb0hV0qF9Ocl87CKGgha391PHkwcdfZ7l4i8gEJDHUCMQDx 7 | 2rBVM7v16Dt00nHd4OfqM6+N847ptL87Ua5EmVdTuqbboNYRfN5uTjX0WaCPPksC 8 | MQDsDTL4ZJH5N5HHBM0KKc5bRTNvcE92d9vDPlJ2N1nf/4eWj4kkABSlVSqkje7I 9 | tAsCMQDpseS9D3ZJ0RLSeR6pxM6TUUoLIhMv5AT8bpqzkm/9adgdVLZEw6GyeERO 10 | TeSFRS0CMBtmIM9yJPtvds1KAl62oYw4iRLbT23p1alg94NfQ2BbsapJONrj44GZ 11 | VBzLmLbVJwIxALwZ5MNWD4k4lL/oLr6cqdnmlY6SQBwY9DZHroOx8qwSa3nRQYc3 12 | bNf2xfmXCcTBqg== 13 | -----END PRIVATE KEY----- 14 | -------------------------------------------------------------------------------- /sample-x509/intermediate.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIB5AIBADANBgkqhkiG9w0BAQEFAASCAc4wggHKAgEAAmEAtci8TrSI2ihaeSgx 3 | +HwWJdpeKQqxye899h3fr3dS27jzOxQgpmbqfBfFjEvVcn1ll6YyE/bn+b97ZT6h 4 | WZdIKVXcJKBYndue9rShXO2pqF4iCY8z4XHi1u1NM53gcHFfAgMBAAECYCU83p48 5 | yz/tPkNNYZcmTB8q8Vj08OCN2qJ9EiJOjjGmXGUAxYNw153d/MC1lB7k4r9gfhwb 6 | 5M6r/S4ScR0h9YfVw6DbMYcdwlZKOtuTyFcHB2ya0i02TubeE/F+9KRqgQIxANX7 7 | TnSC1pIkSdmrJ5FcSiD6yzJrXy3ZjgHsSnw1Tzijbf4i3zNuOEjKiiTA+f8CwQIx 8 | ANl64imDHW0ovqxJVRBFd7I3Q9knXvRXPQYCkq56/bohDnnxHJjz+GkCT26voMIc 9 | HwIxAL2FGEavP1Yul84W/jOLlwhAuEnOuX0Fa/YeDxaVrLXQYfItdpy5qVYnNVU6 10 | KfGHwQIwVRZ/7+/tjmzXlP6n+lLDjwWSVR7TRtPu+y/8hal3JeCTEmCkaK9jbGpL 11 | +6lZQzjPAjAep8QQjWYkJWWTff2s3WSJZFq6v0w827SMIehLpoZd7bgh+GYL4C8I 12 | ed4XpROlRQ0= 13 | -----END PRIVATE KEY----- 14 | -------------------------------------------------------------------------------- /utils/spire.db.clean.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | generateName: spire-server- 5 | name: spire-server-0 6 | namespace: tornjak 7 | spec: 8 | containers: 9 | - command: ["sleep"] 10 | args: ["1000000000"] 11 | image: ghcr.io/spiffe/tornjak-spire-server:1.1.5 12 | imagePullPolicy: Always 13 | name: spire-server 14 | securityContext: 15 | privileged: true 16 | volumeMounts: 17 | - mountPath: /run/spire/data 18 | name: spire-data 19 | - mountPath: /run/spire-server/private 20 | name: spire-server-socket 21 | volumes: 22 | - hostPath: 23 | path: /run/spire-server/private 24 | type: DirectoryOrCreate 25 | name: spire-server-socket 26 | - hostPath: 27 | path: /var/spire-data 28 | type: DirectoryOrCreate 29 | name: spire-data 30 | -------------------------------------------------------------------------------- /charts/spire/templates/agent-cluster-role.tpl: -------------------------------------------------------------------------------- 1 | # Required cluster role to allow spire-agent to query k8s API server 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: spire-agent-{{ .Values.namespace }}-cluster-role 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods","nodes","nodes/proxy"] 9 | verbs: ["get"] 10 | 11 | --- 12 | # Binds above cluster role to spire-agent service account 13 | kind: ClusterRoleBinding 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | metadata: 16 | name: spire-agent-{{ .Values.namespace }}-cluster-role-binding 17 | subjects: 18 | - kind: ServiceAccount 19 | name: spire-agent 20 | namespace: {{ .Values.namespace }} 21 | roleRef: 22 | kind: ClusterRole 23 | name: spire-agent-{{ .Values.namespace }}-cluster-role 24 | apiGroup: rbac.authorization.k8s.io 25 | -------------------------------------------------------------------------------- /examples/keycloak/ingress.keycloak.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: keycloak-ingress 5 | spec: 6 | rules: 7 | # provide the actual Ingress for `host` value: 8 | # use the following command to get the subdomain: 9 | # ibmcloud ks cluster get --cluster | grep Ingress 10 | # any prefix can be added (e.g.): 11 | # host: tsi-keycloak.tsi-fra-8abee0d19746a818fd9d58aa25c34ecfe-0000.eu-de.containers.appdomain.cloud 12 | - host: keycloak.tornjak-02-9d995c4a8c7c5f281ce13d5467ff6a94-0000.us-south.containers.appdomain.cloud 13 | http: 14 | paths: 15 | - pathType: Prefix 16 | path: "/" 17 | backend: 18 | service: 19 | name: tsi-keycloak 20 | port: 21 | # number: 9090 22 | number: 8080 23 | -------------------------------------------------------------------------------- /examples/spire/mars.txt: -------------------------------------------------------------------------------- 1 | # ############################### # 2 | # Concentrated Dark Matter Recipe # 3 | # mix: # 4 | # * 2 parts cesium # 5 | # * 1 part plutonic quartz # 6 | # * 1 bottle water (12oz/355mL) # 7 | # use for Zigerions simulation # 8 | # o o ^ # 9 | # )-( / \ # 10 | # (O O) |o| # 11 | # \=/ | | # 12 | # .-"-. |-| # 13 | # //\ /\\ / | \ # 14 | # _// / \ \\_ |_ _| # 15 | #=./ {,-.} \.= | _ | # 16 | # || || |___| # 17 | # || || /\/ \/\ # 18 | # __|| ||__ ||||||| # 19 | # `---" "---' # 20 | # R&M S01E04 # 21 | ################################### 22 | -------------------------------------------------------------------------------- /charts/tornjak/templates/spire-roles.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: spire-server-role 5 | rules: 6 | - apiGroups: ["authentication.k8s.io"] 7 | resources: ["tokenreviews"] 8 | verbs: ["get", "watch", "list", "create"] 9 | - apiGroups: [""] 10 | resources: ["nodes","pods"] 11 | verbs: ["list","get"] 12 | - apiGroups: [""] 13 | resources: ["configmaps"] 14 | resourceNames: ["spire-bundle"] 15 | verbs: ["get", "patch"] 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRoleBinding 19 | metadata: 20 | name: spire-server-binding 21 | subjects: 22 | - kind: ServiceAccount 23 | name: spire-server 24 | namespace: {{ .Values.namespace }} 25 | roleRef: 26 | kind: ClusterRole 27 | name: spire-server-role 28 | apiGroup: rbac.authorization.k8s.io 29 | -------------------------------------------------------------------------------- /examples/spire-sidecar/config/app-python.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: app-py 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: app-py 10 | strategy: 11 | type: Recreate 12 | template: 13 | metadata: 14 | labels: 15 | app: app-py 16 | spec: 17 | containers: 18 | - image: tsidentity/tornjak-example-python:v0.1 19 | name: py 20 | ports: 21 | - containerPort: 5000 22 | resources: {} 23 | restartPolicy: Always 24 | status: {} 25 | --- 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | name: app-py 30 | namespace: default 31 | spec: 32 | type: NodePort 33 | selector: 34 | app: app-py 35 | ports: 36 | - name: "8000" 37 | port: 8000 38 | targetPort: 5000 39 | status: 40 | loadBalancer: {} 41 | -------------------------------------------------------------------------------- /examples/spire-sidecar/config/app-node.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: app-node 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: app-node 10 | strategy: 11 | type: Recreate 12 | template: 13 | metadata: 14 | labels: 15 | app: app-node 16 | spec: 17 | containers: 18 | - image: tsidentity/tornjak-example-nodejs:v0.1 19 | name: node 20 | ports: 21 | - containerPort: 8080 22 | resources: {} 23 | restartPolicy: Always 24 | status: {} 25 | --- 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | name: app-node 30 | namespace: default 31 | spec: 32 | type: NodePort 33 | selector: 34 | app: app-node 35 | ports: 36 | - name: "8001" 37 | port: 8001 38 | targetPort: 8080 39 | status: 40 | loadBalancer: {} 41 | -------------------------------------------------------------------------------- /examples/vault-client/Dockerfile.custom: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | ARG ARCH 4 | 5 | RUN apt update && \ 6 | apt install -y curl && \ 7 | apt install -y wget && \ 8 | apt install -y unzip && \ 9 | apt install -y jq && \ 10 | apt install -y vim 11 | RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/${ARCH}/kubectl && \ 12 | chmod +x kubectl 13 | 14 | COPY ./vault /usr/local/bin/vault 15 | COPY ./setup-vault-cli.sh /setup-vault-cli.sh 16 | COPY ./test-vault-cli.sh /test-vault-cli.sh 17 | 18 | # Default values for vault client setup 19 | ARG DEFAULT_VAULT_ADDR="http://vault:8200" 20 | ARG DEFAULT_VAULT_ROLE="tsi-role-rcni" 21 | ENV VAULT_ADDR=${DEFAULT_VAULT_ADDR} 22 | ENV VAULT_ROLE=${DEFAULT_VAULT_ROLE} 23 | 24 | CMD ["/bin/bash", "-c", "while true; do sleep 10; done;"] 25 | -------------------------------------------------------------------------------- /examples/spire/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: bin/mars container-mars 2 | 3 | GIT_COMMIT_SHA="$(shell git rev-parse --short HEAD 2>/dev/null)" 4 | REPO ?= tsidentity 5 | MARS_IMG_NAME ?= mars-demo 6 | VERSION=$(shell cat ../../tsi-version.txt) 7 | # GO_FILES := $(shell find . -type f -name '*.go' -not -name '*_test.go' -not -path './vendor/*') 8 | 9 | MARS_IMG := $(REPO)/$(MARS_IMG_NAME):$(GIT_COMMIT_SHA) 10 | MARS_IMG_MUTABLE := $(REPO)/$(MARS_IMG_NAME):$(VERSION) 11 | MARS_IMG_LATEST := $(REPO)/$(MARS_IMG_NAME):latest 12 | 13 | all: bin/mars container-mars 14 | 15 | bin/mars: 16 | docker build --no-cache -t $(MARS_IMG) -f Dockerfile.mars . 17 | docker tag $(MARS_IMG) $(MARS_IMG_MUTABLE) 18 | docker tag $(MARS_IMG) $(MARS_IMG_LATEST) 19 | 20 | container-mars: 21 | docker push $(MARS_IMG) 22 | docker push $(MARS_IMG_MUTABLE) 23 | docker push $(MARS_IMG_LATEST) 24 | 25 | # vendor: 26 | # go mod tidy 27 | # go mod vendor 28 | -------------------------------------------------------------------------------- /examples/vault/vault.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: tsi-vault 5 | spec: 6 | selector: 7 | app: tsi-vault 8 | ports: 9 | - protocol: TCP 10 | port: 8200 11 | targetPort: 8200 12 | type: NodePort 13 | --- 14 | 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | metadata: 18 | labels: 19 | app: tsi-vault 20 | name: tsi-vault 21 | spec: 22 | replicas: 1 23 | selector: 24 | matchLabels: 25 | app: tsi-vault 26 | template: 27 | metadata: 28 | labels: 29 | app: tsi-vault 30 | name: tsi-vault 31 | spec: 32 | containers: 33 | - name: tsi-vault 34 | image: tsidentity/ti-vault:v1.9 35 | imagePullPolicy: Always 36 | env: 37 | - name: SKIP_SETCAP 38 | value: "true" 39 | - name: SKIP_CHOWN 40 | value: "true" 41 | - name: HOME 42 | value: "/tmp" 43 | -------------------------------------------------------------------------------- /sample-keys/openssl.cnf: -------------------------------------------------------------------------------- 1 | [ req ] 2 | #default_bits = 2048 3 | #default_md = sha256 4 | #default_keyfile = privkey.pem 5 | distinguished_name = req_distinguished_name 6 | attributes = req_attributes 7 | 8 | [ req_distinguished_name ] 9 | countryName = Country Name (2 letter code) 10 | countryName_min = 2 11 | countryName_max = 2 12 | stateOrProvinceName = State or Province Name (full name) 13 | localityName = Locality Name (eg, city) 14 | 0.organizationName = Organization Name (eg, company) 15 | organizationalUnitName = Organizational Unit Name (eg, section) 16 | commonName = Common Name (eg, fully qualified host name) 17 | commonName_max = 64 18 | emailAddress = Email Address 19 | emailAddress_max = 64 20 | 21 | [ req_attributes ] 22 | challengePassword = A challenge password 23 | challengePassword_min = 4 24 | challengePassword_max = 20 25 | [ SAN ] 26 | subjectAltName=DNS:localhost,DNS:example.com,DNS:www.example.com 27 | -------------------------------------------------------------------------------- /examples/vault-client/makefile: -------------------------------------------------------------------------------- 1 | GIT_COMMIT_SHA="$(shell git rev-parse --short HEAD 2>/dev/null)" 2 | GIT_REMOTE_URL="$(shell git config --get remote.origin.url 2>/dev/null)" 3 | BUILD_DATE="$(shell date -u +"%Y-%m-%dT%H:%M:%SZ")" 4 | IMG_NAME="vault-cli" 5 | REPO ?= tsidentity 6 | IMAGE := $(REPO)/$(IMG_NAME):$(GIT_COMMIT_SHA) 7 | MUTABLE_IMAGE := $(REPO)/$(IMG_NAME):v0.3 8 | ARCH=$(shell if test `uname -m` = "x86_64"; then echo amd64 ; else echo `uname -m`; fi;) 9 | 10 | all: docker docker-push timestamp 11 | 12 | fast: 13 | docker build --build-arg ARCH=${ARCH} -t $(IMAGE) . 14 | docker tag $(IMAGE) $(MUTABLE_IMAGE) 15 | docker push $(IMAGE) 16 | docker push $(MUTABLE_IMAGE) 17 | 18 | docker: 19 | docker build --no-cache --build-arg ARCH=${ARCH} -t $(IMAGE) . 20 | docker tag $(IMAGE) $(MUTABLE_IMAGE) 21 | 22 | docker-push: 23 | docker push $(IMAGE) 24 | docker push $(MUTABLE_IMAGE) 25 | 26 | timestamp: 27 | date 28 | 29 | .PHONY: all fast docker docker-push timestamp 30 | -------------------------------------------------------------------------------- /examples/vault-client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | ARG ARCH 4 | 5 | RUN apt update && \ 6 | apt install -y curl && \ 7 | apt install -y wget && \ 8 | apt install -y unzip && \ 9 | apt install -y jq && \ 10 | apt install -y vim 11 | 12 | RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/${ARCH}/kubectl && \ 13 | chmod +x kubectl 14 | RUN wget https://releases.hashicorp.com/vault/1.0.3/vault_1.0.3_linux_amd64.zip && \ 15 | unzip vault_1.0.3_linux_amd64.zip && \ 16 | mv vault /usr/local/bin/ && \ 17 | rm -f vault_1.0.3_linux_amd64.zip 18 | 19 | COPY ./setup-vault-cli.sh /setup-vault-cli.sh 20 | COPY ./test-vault-cli.sh /test-vault-cli.sh 21 | 22 | # Default values for vault client setup 23 | ARG DEFAULT_VAULT_ADDR="http://vault:8200" 24 | ARG DEFAULT_VAULT_ROLE="tsi-role-rcni" 25 | ENV VAULT_ADDR=${DEFAULT_VAULT_ADDR} 26 | ENV VAULT_ROLE=${DEFAULT_VAULT_ROLE} 27 | 28 | CMD ["/bin/bash", "-c", "while true; do sleep 10; done;"] 29 | -------------------------------------------------------------------------------- /examples/vault-client/setup-vault-cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | JWTFILE=/jwt-tokens/token 3 | if [ ! -s "$JWTFILE" ]; then 4 | echo "$JWTFILE does not exist. Make sure Trusted Identity is setup correctly" 5 | exit 1 6 | fi 7 | 8 | ## create help menu: 9 | helpme() 10 | { 11 | cat < 14 | Where: 15 | role - name of the vault role for this login 16 | 17 | HELPMEHELPME 18 | } 19 | 20 | getToken() 21 | { 22 | export TOKEN=$(cat ${JWTFILE}) 23 | export RESP=$(curl -s --request POST --data '{"jwt": "'"${TOKEN}"'", "role": "'"${ROLE}"'"}' ${VAULT_ADDR}/v1/auth/trusted-identity/login) 24 | export VAULT_TOKEN=$(echo $RESP | jq -r '.auth.client_token') 25 | if [ "$VAULT_TOKEN" == "null" ] ; then 26 | echo "ERROR: $RESP" 27 | else 28 | echo "VAULT_TOKEN=$VAULT_TOKEN" 29 | fi 30 | } 31 | 32 | # validate the arguments 33 | if [[ "$1" == "-?" || "$1" == "-h" || "$1" == "--help" ]] ; then 34 | helpme 35 | elif [ "$1" == "" ] ; then 36 | export ROLE="${VAULT_ROLE}" 37 | getToken 38 | else 39 | export ROLE="$1" 40 | getToken 41 | fi 42 | -------------------------------------------------------------------------------- /components/tsi-util/Makefile: -------------------------------------------------------------------------------- 1 | TSI_VERSION=$(shell cat ../../tsi-version.txt) 2 | GIT_COMMIT_SHA="$(shell git rev-parse --short HEAD 2>/dev/null)" 3 | GIT_REMOTE_URL="$(shell git config --get remote.origin.url 2>/dev/null)" 4 | BUILD_DATE="$(shell date -u +"%Y-%m-%dT%H:%M:%SZ")" 5 | BINARY_NAME="tsi-util" 6 | REPO ?= tsidentity 7 | IMAGE := $(REPO)/$(BINARY_NAME):$(GIT_COMMIT_SHA) 8 | MUTABLE_IMAGE := $(REPO)/$(BINARY_NAME):$(TSI_VERSION) 9 | LATEST := $(REPO)/$(BINARY_NAME):latest 10 | 11 | all: docker timestamp 12 | 13 | allpush: docker docker-push timestamp 14 | 15 | fastpush: fast docker-push timestamp 16 | 17 | fast: 18 | docker build -t $(IMAGE) . 19 | docker tag $(IMAGE) $(MUTABLE_IMAGE) 20 | docker tag $(IMAGE) $(LATEST) 21 | date 22 | 23 | docker: 24 | docker build --no-cache -t $(IMAGE) . 25 | docker tag $(IMAGE) $(MUTABLE_IMAGE) 26 | docker tag $(IMAGE) $(LATEST) 27 | 28 | docker-push: 29 | docker push $(IMAGE) 30 | docker push $(MUTABLE_IMAGE) 31 | docker push $(LATEST) 32 | 33 | timestamp: 34 | date 35 | 36 | .PHONY: all fast allpush fastpush docker docker-push timestamp 37 | -------------------------------------------------------------------------------- /charts/spire/templates/crd_role.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | creationTimestamp: null 7 | name: spiffe-crd-role 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - endpoints 13 | verbs: 14 | - create 15 | - delete 16 | - get 17 | - list 18 | - patch 19 | - update 20 | - watch 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - endpoints/status 25 | verbs: 26 | - get 27 | - patch 28 | - update 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - pods 33 | verbs: 34 | - create 35 | - delete 36 | - get 37 | - list 38 | - patch 39 | - update 40 | - watch 41 | - apiGroups: 42 | - "" 43 | resources: 44 | - pods/status 45 | verbs: 46 | - get 47 | - patch 48 | - update 49 | - apiGroups: 50 | - spiffeid.spiffe.io 51 | resources: 52 | - spiffeids 53 | verbs: 54 | - create 55 | - delete 56 | - get 57 | - list 58 | - patch 59 | - update 60 | - watch 61 | - apiGroups: 62 | - spiffeid.spiffe.io 63 | resources: 64 | - spiffeids/status 65 | verbs: 66 | - get 67 | - patch 68 | - update 69 | -------------------------------------------------------------------------------- /examples/spire/mars.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mars-python 5 | labels: 6 | app: mars-python 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: mars-python 12 | template: 13 | metadata: 14 | labels: 15 | identity_template: "true" 16 | app: mars-python 17 | spec: 18 | hostPID: true 19 | hostNetwork: true 20 | dnsPolicy: ClusterFirstWithHostNet 21 | containers: 22 | - name: mars-python-main 23 | securityContext: 24 | # privilaged is needed to create socket and bundle files 25 | privileged: true 26 | #image: us.gcr.io/scytale-registry/aws-cli:latest 27 | image: tsidentity/mars:latest 28 | command: ["sleep"] 29 | args: ["1000000000"] 30 | volumeMounts: 31 | - name: spire-agent-socket 32 | mountPath: /run/spire/sockets 33 | readOnly: true 34 | volumes: 35 | - name: spire-agent-socket 36 | hostPath: 37 | path: /run/spire/sockets 38 | type: Directory 39 | -------------------------------------------------------------------------------- /charts/tornjak/templates/tornjak-config.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: tornjak-config 5 | namespace: {{ .Values.namespace }} 6 | data: 7 | server.conf: | 8 | server { 9 | metadata = "insert metadata" 10 | } 11 | 12 | plugins { 13 | 14 | {{- if .Values.tornjak }} 15 | {{- if .Values.tornjak.config }} 16 | {{- if .Values.tornjak.config.backend }} 17 | {{- if .Values.tornjak.config.backend.dataStore }} 18 | DataStore "sql" { 19 | plugin_data { 20 | drivername = "{{ .Values.tornjak.config.backend.dataStore.driver }}" 21 | # TODO is this a good location? 22 | filename = "{{ .Values.tornjak.config.backend.dataStore.file }}" 23 | } 24 | } 25 | {{- end }} 26 | 27 | {{- if .Values.tornjak.config.enableUserMgmt }} 28 | UserManagement "KeycloakAuth" { 29 | plugin_data { 30 | jwksURL = "{{ .Values.tornjak.config.backend.jwksURL }}" 31 | redirectURL = "{{ .Values.tornjak.config.backend.redirectURL }}" 32 | } 33 | } 34 | {{- end }} 35 | 36 | {{- end }} 37 | {{- end }} 38 | {{- end }} 39 | 40 | } 41 | -------------------------------------------------------------------------------- /charts/spire/templates/k8s-workload-registrar-configmap.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: k8s-workload-registrar 5 | namespace: {{ .Values.namespace }} 6 | data: 7 | registrar.conf: | 8 | log_level = "debug" 9 | mode = "crd" 10 | trust_domain = "{{ .Values.trustdomain }}" 11 | # enable when direct socket access to SPIRE Server available: 12 | # server_socket_path = "/run/spire/sockets/registration.sock" 13 | agent_socket_path = "{{ .Values.spireAgent.socketDir }}/{{ .Values.spireAgent.socketFile }}" 14 | server_address = "{{ .Values.spireServer.address }}:{{ .Values.spireServer.port }}" 15 | cluster = "{{ .Values.clustername }}" 16 | # enable for label based registration: 17 | # pod_label = "spire-workload-id" 18 | # enable for annotation based registration: 19 | # pod_annotation = "spire-workload-id" 20 | identity_template = "{{ "region/{{.Context.Region}}/cluster_name/{{.Context.ClusterName}}/ns/{{.Pod.Namespace}}/sa/{{.Pod.ServiceAccount}}/pod_name/{{.Pod.Name}}" }}" 21 | identity_template_label = "identity_template" 22 | context { 23 | Region = "{{ .Values.region }}" 24 | ClusterName = "{{ .Values.clustername }}" 25 | } 26 | -------------------------------------------------------------------------------- /components/tsi-util/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | RUN apt update && \ 3 | apt install -y wget unzip && \ 4 | apt install -y curl jq vim && \ 5 | apt install -y openssl 6 | 7 | # install yq required for xform YAML to JSON 8 | RUN apt-get install -y software-properties-common && \ 9 | add-apt-repository ppa:rmescandon/yq && \ 10 | apt update && apt install -y yq 11 | 12 | RUN cd /usr/local/bin && \ 13 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ 14 | chmod +x kubectl 15 | 16 | RUN wget https://releases.hashicorp.com/vault/1.4.2/vault_1.4.2_linux_amd64.zip && \ 17 | unzip vault_1.4.2_linux_amd64.zip && \ 18 | mv vault /usr/local/bin/ && \ 19 | rm -f vault_1.4.2_linux_amd64.zip 20 | 21 | COPY secret-maker.sh /usr/local/bin/ 22 | COPY getClusterInfo.sh /usr/local/bin/ 23 | COPY load-sample-policies.sh /usr/local/bin/ 24 | COPY register-JSS.sh /usr/local/bin/ 25 | COPY vault-tpl/ /vault-tpl 26 | COPY vault-setup.sh /usr/local/bin/ 27 | COPY setIdentity.sh getOpensslcnf.sh /usr/local/bin/ 28 | 29 | # run it forever 30 | CMD ["/bin/bash", "-c", "tail -f /dev/null"] 31 | -------------------------------------------------------------------------------- /sample-x509/node1-bundle.cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBhDCCAQ6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAAMCIYDzAwMDEwMTAxMDAw 3 | MDAwWhgPOTk5OTEyMzEyMzU5NTlaMBwxGjAYBgNVBAMTEXNvbWUgY29tbW9uIG5h 4 | bWUxMHwwDQYJKoZIhvcNAQEBBQADawAwaAJhANk1T2Yu8aLUYlqx6k/HVBCAMFSr 5 | rAQwHgBIADLKdT81jb+AGSFNzXNaPL/yPb172243qjNS1jCiqZhLUBYF1eDzRTGy 6 | 2DS1FdbiS16AxNbQbpSRcggJLf6tyagoWjuIzQIDAQABozMwMTAOBgNVHQ8BAf8E 7 | BAMCB4AwHwYDVR0jBBgwFoAU3HH4tTA6cIcbq/KJpLpfJCzQ4pgwDQYJKoZIhvcN 8 | AQELBQADYQCw8TbWkO0KOCcrltMMH1IPsHhCY0qC5cFwUbj3mCepoA7Qkrv79RLQ 9 | BtC7HxLg31sqw1CF/oH/2w5to0jveT2FGwV4pBoeCVKdeBEe2zPiSb5nvURuJmv/ 10 | 2resFWVSydo= 11 | -----END CERTIFICATE----- 12 | -----BEGIN CERTIFICATE----- 13 | MIIBZjCB8aADAgECAgEBMA0GCSqGSIb3DQEBCwUAMAAwIhgPMDAwMTAxMDEwMDAw 14 | MDBaGA85OTk5MTIzMTIzNTk1OVowADB8MA0GCSqGSIb3DQEBAQUAA2sAMGgCYQC1 15 | yLxOtIjaKFp5KDH4fBYl2l4pCrHJ7z32Hd+vd1LbuPM7FCCmZup8F8WMS9VyfWWX 16 | pjIT9uf5v3tlPqFZl0gpVdwkoFid2572tKFc7amoXiIJjzPhceLW7U0zneBwcV8C 17 | AwEAAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU3HH4tTA6cIcbq/KJ 18 | pLpfJCzQ4pgwDQYJKoZIhvcNAQELBQADYQBw68JTMAr0iqmbjq72JLGPEQj2Y2On 19 | 1jelBhsi5YddjRCXvToueO65w4BMrfJ86CGyTIgZ+Ne2jxC4ojfvNRBDl4mDivN5 20 | udGTxbUWToNmXtKGw/pXglhYQi9H5vN4Se4= 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /sample-x509/node2-bundle.cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBhDCCAQ6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAAMCIYDzAwMDEwMTAxMDAw 3 | MDAwWhgPOTk5OTEyMzEyMzU5NTlaMBwxGjAYBgNVBAMTEXNvbWUgY29tbW9uIG5h 4 | bWUyMHwwDQYJKoZIhvcNAQEBBQADawAwaAJhALRajNdmaf57BA8a+bfCzbzjdVMd 5 | dvFPuGx+7r7RZYAIHw7XJm+iOb50N19FNCnabC3P5NcieYLWrEH4tHJV7qUY6dNr 6 | M1RbTAUp8hlnPXl1C8aiJwuaQVfa4Uc82RGyLQIDAQABozMwMTAOBgNVHQ8BAf8E 7 | BAMCB4AwHwYDVR0jBBgwFoAU3HH4tTA6cIcbq/KJpLpfJCzQ4pgwDQYJKoZIhvcN 8 | AQELBQADYQAEPn6rFL+JliOE3PPQzLRIJeFvtaATPQSskINm67gZGj0BScBYckoH 9 | 7GLjueGOV1+C7uiRtXTV4U+olaqteLFD2b+4OPNmNXV1cwrQ8p+6a4zhKYGOUNwF 10 | lDAXtLSCIqM= 11 | -----END CERTIFICATE----- 12 | -----BEGIN CERTIFICATE----- 13 | MIIBZjCB8aADAgECAgEBMA0GCSqGSIb3DQEBCwUAMAAwIhgPMDAwMTAxMDEwMDAw 14 | MDBaGA85OTk5MTIzMTIzNTk1OVowADB8MA0GCSqGSIb3DQEBAQUAA2sAMGgCYQC1 15 | yLxOtIjaKFp5KDH4fBYl2l4pCrHJ7z32Hd+vd1LbuPM7FCCmZup8F8WMS9VyfWWX 16 | pjIT9uf5v3tlPqFZl0gpVdwkoFid2572tKFc7amoXiIJjzPhceLW7U0zneBwcV8C 17 | AwEAAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU3HH4tTA6cIcbq/KJ 18 | pLpfJCzQ4pgwDQYJKoZIhvcNAQELBQADYQBw68JTMAr0iqmbjq72JLGPEQj2Y2On 19 | 1jelBhsi5YddjRCXvToueO65w4BMrfJ86CGyTIgZ+Ne2jxC4ojfvNRBDl4mDivN5 20 | udGTxbUWToNmXtKGw/pXglhYQi9H5vN4Se4= 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /sample-x509/node3-bundle.cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBhDCCAQ6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAAMCIYDzAwMDEwMTAxMDAw 3 | MDAwWhgPOTk5OTEyMzEyMzU5NTlaMBwxGjAYBgNVBAMTEXNvbWUgY29tbW9uIG5h 4 | bWUzMHwwDQYJKoZIhvcNAQEBBQADawAwaAJhAN8CEtDhU7MTxWeqE3bw/EF15rTq 5 | AABD3/BhgUpvowebqfdiofsUwi2s9azPknbyBSQ9cB4kIUQFOYymOCiEPprMIfna 6 | okyv8qXhX6py7tScOjP345YcSszXhnvgAoxpOQIDAQABozMwMTAOBgNVHQ8BAf8E 7 | BAMCB4AwHwYDVR0jBBgwFoAU3HH4tTA6cIcbq/KJpLpfJCzQ4pgwDQYJKoZIhvcN 8 | AQELBQADYQACZp6kJUjmEv1qq2Yipw5F5yYwffHM1YezZHpaxlJBZ4K4uKhBeGTS 9 | LvGx9Bi+NDpt5FA5RABuaqcAJ7LxrQVGIwseqRZhLrsB5QkK7yD1RztuslWX7pA3 10 | cvFpzejwtFk= 11 | -----END CERTIFICATE----- 12 | -----BEGIN CERTIFICATE----- 13 | MIIBZjCB8aADAgECAgEBMA0GCSqGSIb3DQEBCwUAMAAwIhgPMDAwMTAxMDEwMDAw 14 | MDBaGA85OTk5MTIzMTIzNTk1OVowADB8MA0GCSqGSIb3DQEBAQUAA2sAMGgCYQC1 15 | yLxOtIjaKFp5KDH4fBYl2l4pCrHJ7z32Hd+vd1LbuPM7FCCmZup8F8WMS9VyfWWX 16 | pjIT9uf5v3tlPqFZl0gpVdwkoFid2572tKFc7amoXiIJjzPhceLW7U0zneBwcV8C 17 | AwEAAaMyMDAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU3HH4tTA6cIcbq/KJ 18 | pLpfJCzQ4pgwDQYJKoZIhvcNAQELBQADYQBw68JTMAr0iqmbjq72JLGPEQj2Y2On 19 | 1jelBhsi5YddjRCXvToueO65w4BMrfJ86CGyTIgZ+Ne2jxC4ojfvNRBDl4mDivN5 20 | udGTxbUWToNmXtKGw/pXglhYQi9H5vN4Se4= 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /examples/spire/mars-spaceX.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: elon-musk 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | name: mars-mission 10 | labels: 11 | app: mars-mission 12 | spec: 13 | replicas: 1 14 | selector: 15 | matchLabels: 16 | app: mars-mission 17 | template: 18 | metadata: 19 | labels: 20 | identity_template: "true" 21 | app: mars-mission 22 | spec: 23 | hostPID: true 24 | hostNetwork: true 25 | dnsPolicy: ClusterFirstWithHostNet 26 | serviceAccountName: elon-musk 27 | containers: 28 | - name: mars-mission-main 29 | securityContext: 30 | # privilaged is needed to create socket and bundle files 31 | privileged: true 32 | image: us.gcr.io/scytale-registry/aws-cli:latest 33 | command: ["sleep"] 34 | args: ["1000000000"] 35 | volumeMounts: 36 | - name: spire-agent-socket 37 | mountPath: /run/spire/sockets 38 | readOnly: true 39 | volumes: 40 | - name: spire-agent-socket 41 | hostPath: 42 | path: /run/spire/sockets 43 | type: Directory 44 | -------------------------------------------------------------------------------- /sample-keys/cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDQzCCAiugAwIBAgIJAOgvk1cI+LN+MA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV 3 | BAYTAlVTMQswCQYDVQQIDAJDQTETMBEGA1UECgwKQWNtZSwgSW5jLjEUMBIGA1UE 4 | AwwLZXhhbXBsZS5jb20wHhcNMjEwMjE5MjIyMzU2WhcNMjEwMzIxMjIyMzU2WjBF 5 | MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNVBAoMCkFjbWUsIEluYy4x 6 | FDASBgNVBAMMC2V4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB 7 | CgKCAQEAx30Four1F6N6bAMNslBjVvpoIu3aO5az0sUr6mMnYd5DFWD3iW6szqg5 8 | 8XxNkL13LUKalqeO4tjowauOzwWMsfQVuFbW9mwXtFkoJ2Zk1B+YtP5Zym31vRuy 9 | /57enDkcmyjBjsYBqOm317hoAgiJRNxqDkyTjWNNtvKQDiwfA9jGKGCqKX6KIJt0 10 | VP+f/d7GqWj2ANOg3pY1g4nhqHpeGMWA4DXVpGtlZoVI+xU5Eu2GyvL+ne8CL6Xn 11 | Ck7GN2yXywv0cCViryuXZQUtqzhk5QYNIiSibm3FihwCfHicd1aaR5vJwQoVvKl4 12 | rznkrHnK7IjWUEYfhx+Htu1Myo3W7QIDAQABozYwNDAyBgNVHREEKzApgglsb2Nh 13 | bGhvc3SCC2V4YW1wbGUuY29tgg93d3cuZXhhbXBsZS5jb20wDQYJKoZIhvcNAQEL 14 | BQADggEBAD7gcliDJganAzlVS2vmGIiv1AfYp4VSpJ2hELVlsmUi1IK944WFr+/d 15 | x3gVKBklXzfHxrZMv3RBaw+hQJM2tp5DPj5Au8KIvm2gHNZ7jkGX6PdR/o3lrdI4 16 | zDLoj60K3gqtTcdp5fmFg8EkPcBBTa4WT19/VJV97/zjr2/nZIPfmQDpf5+5aFvG 17 | WVWNFhEDv+hWldtJScV4ouM8fzZRVU3J2cms0N1YlDV8eJsgfk2aavXf+faPf28Y 18 | 2naAolYRKs/JyAKRyvYwmyj9/E0r2QVkS8Qgn+mYiqOS9gkBFDfRgccznkb7Zt3H 19 | qKW++pBZQ5Pab/Km7voc6Et7ED91Hm8= 20 | -----END CERTIFICATE----- 21 | -------------------------------------------------------------------------------- /examples/spire/sidecar.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: elon-musk1 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | name: sidecar 10 | labels: 11 | app: sidecar 12 | spec: 13 | replicas: 1 14 | selector: 15 | matchLabels: 16 | app: sidecar 17 | template: 18 | metadata: 19 | labels: 20 | identity_template: "true" 21 | app: sidecar 22 | spec: 23 | hostPID: true 24 | hostNetwork: true 25 | dnsPolicy: ClusterFirstWithHostNet 26 | serviceAccountName: elon-musk1 27 | containers: 28 | - name: sidecar-main 29 | securityContext: 30 | # privilaged is needed to create socket and bundle files 31 | privileged: true 32 | # image: us.gcr.io/scytale-registry/aws-cli:latest 33 | image: tsidentity/tornjak-example-sidecar:v0.1 34 | command: ["sleep"] 35 | args: ["1000000000"] 36 | volumeMounts: 37 | - name: spire-agent-socket 38 | mountPath: /run/spire/sockets 39 | readOnly: true 40 | volumes: 41 | - name: spire-agent-socket 42 | hostPath: 43 | path: /run/spire/sockets 44 | type: Directory 45 | -------------------------------------------------------------------------------- /charts/tornjak/templates/oidc-dp-configmap.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Values.oidc.enable }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: oidc-discovery-provider 6 | namespace: {{ .Values.namespace }} 7 | data: 8 | oidc-discovery-provider.conf: | 9 | log_level = "debug" 10 | domain = "{{ .Values.oidc.serviceName }}.{{ .Values.oidc.myDiscoveryDomain }}" 11 | listen_socket_path = "{{ .Values.oidc.socketDir }}/{{ .Values.oidc.socketFile }}" 12 | server_api { 13 | address = "unix:///{{ .Values.spireServer.socketDir }}/{{ .Values.spireServer.socketFile }}" 14 | } 15 | nginx.conf: | 16 | user root; 17 | events { 18 | # The maximum number of simultaneous connections that can be opened by 19 | # a worker process. 20 | worker_connections 1024; 21 | } 22 | http { 23 | # WARNING: Don't use this directory for virtual hosts anymore. 24 | # This include will be moved to the root context in Alpine 3.14. 25 | #include /etc/nginx/conf.d/*.conf; 26 | server { 27 | listen *:8989; 28 | location / { 29 | proxy_pass http://unix:/run/oidc-discovery-provider/server.sock:/; 30 | } 31 | } 32 | } 33 | {{- end }} 34 | -------------------------------------------------------------------------------- /components/tsi-util/getClusterInfo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## create help menu: 4 | helpme() 5 | { 6 | cat < ${CLYM1} 34 | CLJS1=$(yq r -j ${CLYM1} |jq -r '.data."cluster-config.json"') 35 | rm "${CLYM1}" 36 | CLUSTER=$(echo "$CLJS1" | jq -r '.name') 37 | # DC=$(echo "$CLJS1" | jq -r '.datacenter') 38 | 39 | # Confirmed with Armada team that CRN format should stay consistent for a while 40 | # CRN format example: 41 | # crn:v1:bluemix:public:containers-kubernetes:eu-de:586283a9abda5102d46e1b94b923a6c5:5f4306a2738d4cdd89ff067c9481555e 42 | REGION=$(echo "$CLJS1" | jq -r '."crn"' | cut -d":" -f6) 43 | echo "export CLUSTER_NAME=$CLUSTER" 44 | echo "export REGION=$REGION" 45 | # echo "export DATA_CENTER=$DC" 46 | -------------------------------------------------------------------------------- /utils/createKeys.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | function usage { 3 | echo "$0 [key-directory] [cluster-name] [ingress]" 4 | exit 1 5 | } 6 | [[ -z $3 ]] && usage 7 | KEYSDIR=$1 8 | CERTNAME=$2 9 | ING=$3 10 | 11 | ROOTCA="$KEYSDIR/CA/rootCA" 12 | if [[ ! -f "$ROOTCA.key" ]]; then 13 | echo "Root CA must be created first." 14 | echo "Create CA certs:" 15 | echo " openssl genrsa -out $ROOTCA.key 4096" 16 | echo " openssl req -x509 -subj \"/C=US/ST=CA/O=Acme, Inc./CN=example.com\" -new -nodes -key $ROOTCA.key -sha256 -days 1024 -out $ROOTCA.crt" 17 | echo # empty line 18 | exit 1 19 | fi 20 | 21 | echo "Generating certs..." 22 | 23 | SUBJ="/C=US/ST=CA/O=MyOrg, Inc./CN=mydomain.com" 24 | SANSTR="[SAN]\nsubjectAltName=DNS:*.${ING}" 25 | 26 | openssl genrsa -out ${KEYSDIR}/${CERTNAME}.key 2048 2>/dev/null 27 | openssl req -new -sha256 -key ${KEYSDIR}/${CERTNAME}.key -subj "${SUBJ}" -out ${KEYSDIR}/${CERTNAME}.csr \ 28 | -reqexts SAN -config <(cat /etc/ssl/openssl.cnf <(printf ${SANSTR})) 2>/dev/null 29 | # openssl req -in ${KEYSDIR}/${CERTNAME}.csr -noout -text 30 | openssl x509 -req -extensions SAN \ 31 | -extfile <(cat /etc/ssl/openssl.cnf <(printf $SANSTR)) \ 32 | -in ${KEYSDIR}/${CERTNAME}.csr -CA ${ROOTCA}.crt \ 33 | -CAkey ${ROOTCA}.key -CAcreateserial -out ${KEYSDIR}/${CERTNAME}.crt -days 500 -sha256 2>/dev/null 34 | # openssl x509 -in ${KEYSDIR}/${CERTNAME}.crt -text -noout 35 | -------------------------------------------------------------------------------- /charts/spire/templates/agent-configmap.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: spire-agent 5 | namespace: {{ .Values.namespace }} 6 | data: 7 | agent.conf: | 8 | agent { 9 | data_dir = "/run/spire" 10 | log_level = "DEBUG" 11 | server_address = "{{ .Values.spireServer.address }}" 12 | server_port = "{{ .Values.spireServer.port }}" 13 | socket_path = "{{ .Values.spireAgent.socketDir }}/{{ .Values.spireAgent.socketFile }}" 14 | trust_bundle_path = "/run/spire/bundle/bundle.crt" 15 | trust_domain = "{{ .Values.trustdomain }}" 16 | } 17 | plugins { 18 | {{- if .Values.aws }} 19 | NodeAttestor "aws_iid" { 20 | plugin_data {} 21 | } 22 | {{- else if .Values.azure }} 23 | NodeAttestor "azure_msi" { 24 | plugin_data { 25 | } 26 | } 27 | {{- else }} 28 | NodeAttestor "k8s_psat" { 29 | plugin_data { 30 | cluster = "{{ .Values.clustername }}" 31 | } 32 | } 33 | {{- end }} 34 | KeyManager "memory" { 35 | plugin_data { 36 | } 37 | } 38 | WorkloadAttestor "k8s" { 39 | plugin_data { 40 | {{- if .Values.azure }} 41 | kubelet_read_only_port = 10255 42 | {{- else }} 43 | skip_kubelet_verification = true 44 | {{- end }} 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /examples/keycloak/myubuntu.id.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: myubuntuid 6 | name: myubuntuid 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: myubuntuid 12 | template: 13 | metadata: 14 | annotations: 15 | admission.trusted.identity/inject: "true" 16 | # token-url: complete URL for obtaining a realm token: 17 | tsi.identities: | 18 | - tsi.keycloak/token-url: "http://keycloak.server/auth/realms/tsi-realm/protocol/openid-connect/token" 19 | tsi.keycloak/audiences: "tsi-client" 20 | tsi.keycloak/local-path: "tsi-secrets/identities" 21 | # tsi.secrets: | 22 | # - tsi.secret/name: "mysecret1" 23 | # tsi.secret/constraints: "region,images" 24 | # tsi.secret/local-path: "tsi-secrets/" 25 | # - tsi.secret/name: "mysecret2" 26 | # tsi.secret/constraints: "region" 27 | # tsi.secret/local-path: "tsi-secrets" 28 | labels: 29 | app: myubuntuid 30 | name: myubuntuid 31 | spec: 32 | containers: 33 | - name: myubuntuid 34 | image: ubuntu@sha256:250cc6f3f3ffc5cdaa9d8f4946ac79821aafb4d3afc93928f0de9336eba21aa4 35 | imagePullPolicy: Always 36 | command: [ "/bin/bash", "-c", "--" ] 37 | args: [ "while true; do ls /tsi-secrets; sleep 15; done;" ] 38 | -------------------------------------------------------------------------------- /examples/spire-sidecar/config/sidecar.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: elon-musk 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | name: apps-sidecar 10 | labels: 11 | app: apps-sidecar 12 | spec: 13 | replicas: 1 14 | selector: 15 | matchLabels: 16 | app: apps-sidecar 17 | template: 18 | metadata: 19 | labels: 20 | identity_template: "true" 21 | app: apps-sidecar 22 | spec: 23 | hostPID: true 24 | hostNetwork: true 25 | dnsPolicy: ClusterFirstWithHostNet 26 | serviceAccountName: elon-musk 27 | containers: 28 | - name: apps-sidecar 29 | securityContext: 30 | # privilaged is needed to create socket and bundle files 31 | privileged: true 32 | # image: us.gcr.io/scytale-registry/aws-cli:latest 33 | image: tsidentity/tornjak-example-sidecar:v0.1 34 | imagePullPolicy: Always 35 | command: ["sleep"] 36 | args: ["1000000000"] 37 | volumeMounts: 38 | - name: spire-agent-socket 39 | mountPath: /run/spire/sockets 40 | readOnly: true 41 | - name: db-config 42 | mountPath: /run/db 43 | volumes: 44 | - name: spire-agent-socket 45 | hostPath: 46 | path: /run/spire/sockets 47 | type: Directory 48 | - name: db-config 49 | emptyDir: {} 50 | -------------------------------------------------------------------------------- /examples/spire-sidecar/config/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | services: 3 | app-py: 4 | container_name: py 5 | build: 6 | context: ./.. 7 | dockerfile: ./python/Dockerfile 8 | ports: 9 | - 8000:5000 10 | volumes: 11 | - ./../python:/app 12 | depends_on: 13 | - db 14 | app-node: 15 | container_name: node 16 | build: 17 | context: ./.. 18 | dockerfile: ./nodejs/Dockerfile 19 | ports: 20 | - 8001:8080 21 | volumes: 22 | - ./../nodejs:/usr/src/app 23 | depends_on: 24 | - db 25 | # db-maria: 26 | # container_name: mariadb 27 | # image: mariadb 28 | db: 29 | container_name: mysql57demo 30 | image: mysql:5.7 31 | command: --default-authentication-plugin=mysql_native_password 32 | restart: always 33 | # env_file: 34 | # - ./.env 35 | environment: 36 | MYSQL_ROOT_PASSWORD: testroot 37 | MYSQL_USER: newroot 38 | MYSQL_PASSWORD: testnewroot 39 | MYSQL_ALLOW_EMPTY_PASSWORD: 40 | MYSQL_RANDOM_ROOT_PASSWORD: 41 | ports: 42 | - 3306:3306 43 | volumes: 44 | - ./../db:/var/lib/mysql 45 | # - "./db-scripts/schema.sql:/docker-entrypoint-initdb.d/1.sql" 46 | # - "./db-scripts/data.sql:/docker-entrypoint-initdb.d/2.sql" -------------------------------------------------------------------------------- /examples/vault-client/vault-cli.template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: vault-cli 6 | name: vault-cli 7 | namespace: trusted-identity 8 | spec: 9 | replicas: 1 10 | template: 11 | metadata: 12 | annotations: 13 | admission.trusted.identity/inject: "true" 14 | tsi.secrets: | 15 | - tsi.secret/name: "mysecret1" 16 | tsi.secret/role: "tsi-role-rcni" 17 | tsi.secret/vault-path: "secret/tsi-rcni" 18 | tsi.secret/local-path: "mysecrets/secret-test1" 19 | - tsi.secret/name: "invalid" 20 | tsi.secret/role: "tsi-role-rcni" 21 | tsi.secret/vault-path: "secret/tsi-rcni" 22 | tsi.secret/local-path: "mysecrets/secret-invalid" 23 | - tsi.secret/name: "non-existing" 24 | tsi.secret/role: "tsi-role-rcni" 25 | tsi.secret/vault-path: "secret/nothing" 26 | tsi.secret/local-path: "mysecrets/non-existing" 27 | labels: 28 | app: vault-cli 29 | name: vault-cli 30 | spec: 31 | containers: 32 | - name: vault-cli 33 | image: tsidentity/vault-cli:v0.3 34 | imagePullPolicy: Always 35 | env: 36 | - name: VAULT_ADDR 37 | # provide the public access to the Vault server with TI plugin 38 | # value: "http://mycluster.eu-de.containers.appdomain.cloud:80" 39 | - name: VAULT_ROLE 40 | value: "tsi-role-rcni" 41 | -------------------------------------------------------------------------------- /charts/tornjak/templates/server-service.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: spire-server 5 | namespace: {{ .Values.namespace }} 6 | spec: 7 | type: NodePort 8 | ports: 9 | - name: grpc 10 | port: 8081 11 | targetPort: 8081 12 | protocol: TCP 13 | selector: 14 | app: spire-server 15 | --- 16 | apiVersion: v1 17 | kind: Service 18 | metadata: 19 | name: tornjak-be-http 20 | namespace: {{ .Values.namespace }} 21 | spec: 22 | type: NodePort 23 | ports: 24 | - name: tornjak-be-http 25 | port: 10000 26 | targetPort: 10000 27 | protocol: TCP 28 | selector: 29 | app: spire-server 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: tornjak-be-tls 35 | namespace: {{ .Values.namespace }} 36 | spec: 37 | type: NodePort 38 | ports: 39 | - name: tornjak-be-tls 40 | port: 20000 41 | targetPort: 20000 42 | protocol: TCP 43 | selector: 44 | app: spire-server 45 | --- 46 | apiVersion: v1 47 | kind: Service 48 | metadata: 49 | name: tornjak-be-mtls 50 | namespace: {{ .Values.namespace }} 51 | spec: 52 | type: NodePort 53 | ports: 54 | - name: tornjak-be-mtls 55 | port: 30000 56 | targetPort: 30000 57 | protocol: TCP 58 | selector: 59 | app: spire-server 60 | --- 61 | apiVersion: v1 62 | kind: Service 63 | metadata: 64 | namespace: {{ .Values.namespace }} 65 | name: tornjak-fe 66 | spec: 67 | type: LoadBalancer 68 | selector: 69 | app: spire-server 70 | ports: 71 | - name: tornjak-fe 72 | port: 3000 73 | targetPort: 3000 -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Tornjak Deployment and Demos 2 | This document describes the list of available documents to deploy and 3 | run various Tornjak demos. 4 | 5 | The demos are sorted by complexity, 6 | starting with the simple ones and progressing into more complex. 7 | 8 | We suggest running them in the specified order. 9 | 10 | ## Tornjak Deployment 11 | There are multiple ways to deploy Tornjak on Kubernetes. 12 | The simplest scenario is when Tornjak and SPIRE server 13 | are deployed in the same cluster as the workloads and SPIRE agents. 14 | 15 | ### Single cluster on local `minikube` or `kind` 16 | ![single cluster on minikube or kind](imgs/single_cluster_local.jpg) 17 | 18 | ### Single cluster in the Cloud with OpenShift 19 | ![single cluster on OpenShift](imgs/single_cluster_openshift.jpg) 20 | 21 | ### Multi-Cluster deployment 22 | ![multi-cluster](imgs/multi_cluster.jpg) 23 | 24 | These demos deploy Tornjak Server and SPIRE agents in various scenarios: 25 | 1. deploy in a single cluster locally [via helm charts](./spire-helm.md) 26 | 2. deploy in IBM Cloud [via helm charts](./spire-helm.md) 27 | 3. deploy on [OpenShift in IBM Cloud](./spire-on-openshift.md) 28 | 4. [multi-cluster deployment](./spire-multi-cluster.md) 29 | 5. [SPIRE agents on AWS](./spire-on-aws.md) 30 | 31 | ## Tornjak use-cases 32 | These demos showcase various experiments 33 | 1. [OIDC Tutorial](./spire-oidc-tutorial.md) 34 | 2. [OIDC for Vault](./spire-oidc-vault.md) 35 | 3. [AWS S3 storage access via OIDC](./spire-oidc-aws-s3.md) 36 | 4. [Identity Sidecar for Python and MySQL applications](../examples/spire-sidecar/README.md) 37 | -------------------------------------------------------------------------------- /charts/spire/values.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Default values for spire. 3 | # This is a YAML-formatted file. 4 | 5 | # namespace - namespace for deploying SPIRE agents and workload registrar 6 | namespace: spire 7 | # clustername needs to match between agent-config, k8s-workload-registrar config, 8 | # and the actual cluster name 9 | clustername: spire-example 10 | # region - arbitrary label to describe the region 11 | region: sample-region 12 | 13 | # SPIRE related elements 14 | # trustdomain is arbitrary but needs to match between agent-config and 15 | # k8s-workload-registrar config and SPIRE Server. 16 | # For multi-cluster support, trustdomain must be identical for all clusters and 17 | # SPIRE server 18 | trustdomain: example.org 19 | # SPIRE version: 20 | spireVersion: 1.3.5 21 | 22 | # spireServer - location of the SPIRE server 23 | spireServer: 24 | address: spire-server.appdomain.cloud 25 | port: 443 26 | 27 | # SPIRE Agent related configuration 28 | spireAgent: 29 | img: gcr.io/spiffe-io/spire-agent 30 | # SPIRE Agent socket: 31 | socketDir: /run/spire/sockets 32 | socketFile: agent.sock 33 | 34 | # Workload Registrar configuration 35 | spireRegistrar: 36 | img: gcr.io/spiffe-io/k8s-workload-registrar 37 | 38 | # Additional configuration related to the platform 39 | # azure - enables the plugin to suppor Azure platform, also 40 | # workaround since Azure doesn't support by default 10250 security port for kubelet 41 | azure: false 42 | # aws - enables the plugin for AWS NodeAttestor 43 | aws: false 44 | # openShift requires special configuration, including different security level 45 | openShift: false 46 | -------------------------------------------------------------------------------- /examples/spire/demo.mars-s3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # this script requires https://github.com/duglin/tools/tree/master/demoscript 4 | # or https://github.com/mrsabath/tools/tree/master/demoscript 5 | declare DEMOFILE=/usr/local/bin/demoscript 6 | if [ ! -f "$DEMOFILE" ]; then 7 | echo "$DEMOFILE does not exist." 8 | exit 1 9 | fi 10 | source "${DEMOFILE}" 11 | 12 | AG_SOCK=${AG_SOCK:-"/run/spire/sockets/agent.sock"} 13 | S3_AUD=${S3_AUD:-"mys3"} 14 | S3_ROLE=${S3_ROLE:-"arn:aws:iam::581274594392:role/mars-mission-role-01"} 15 | S3_CMD=${S3_CMD:-"aws s3 cp s3://mars-spire/mars.txt top-secret.txt"} 16 | S3_EXE=${S3_EXE:-"cat top-secret.txt"} 17 | 18 | # bin/spire-agent api fetch jwt -audience mys3 -socketPath /run/spire/sockets/agent.sock 19 | # vi token.jwt # get JWT token 20 | # bin/spire-agent api fetch jwt -audience mys3 -socketPath /run/spire/sockets/agent.sock | sed -n '2p' | x 21 | # args > token.jwt 22 | # AWS_ROLE_ARN=arn:aws:iam::581274594392:role/mars-mission-role AWS_WEB_IDENTITY_TOKEN_FILE=token.jwt aws s3 cp s3://mars-spire/mars.txt top-secret.txt 23 | 24 | # show the JWT token 25 | doit "/opt/spire/bin/spire-agent api fetch jwt -audience $S3_AUD -socketPath $AG_SOCK" 26 | 27 | # parse the JWT token 28 | doit --noexec "/opt/spire/bin/spire-agent api fetch jwt -audience $S3_AUD -socketPath $AG_SOCK | sed -n '2p' | xargs > token.jwt" 29 | /opt/spire/bin/spire-agent api fetch jwt -audience "$S3_AUD" -socketPath "$AG_SOCK" | sed -n '2p' | xargs > token.jwt 30 | 31 | # use the JWT token to request S3 content 32 | doit "AWS_ROLE_ARN=$S3_ROLE AWS_WEB_IDENTITY_TOKEN_FILE=token.jwt $S3_CMD" 33 | doit "$S3_EXE" 34 | -------------------------------------------------------------------------------- /charts/spire/templates/k8s-workload-registrar-deploy.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: spire-registrar 5 | namespace: {{ .Values.namespace }} 6 | labels: 7 | app: spire-registrar 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: spire-registrar 13 | template: 14 | metadata: 15 | namespace: {{ .Values.namespace }} 16 | labels: 17 | app: spire-registrar 18 | spec: 19 | serviceAccountName: spire-k8s-registrar 20 | shareProcessNamespace: true 21 | containers: 22 | - name: k8s-workload-registrar 23 | #image: k8s-workload-registrar:latest 24 | image: {{ .Values.spireRegistrar.img }}:{{ .Values.spireVersion }} 25 | imagePullPolicy: Always 26 | securityContext: 27 | # TODO: review this, maybe applicable for OpenShift only: 28 | # privilaged is needed to create socket and bundle files 29 | privileged: true 30 | args: 31 | - -config 32 | - /run/k8s-workload-registrar/config/registrar.conf 33 | volumeMounts: 34 | - name: spire-registrar-socket 35 | mountPath: {{ .Values.spireAgent.socketDir }} 36 | readOnly: false 37 | - name: k8s-workload-registrar-config 38 | mountPath: /run/k8s-workload-registrar/config 39 | readOnly: true 40 | volumes: 41 | - name: spire-registrar-socket 42 | hostPath: 43 | path: {{ .Values.spireAgent.socketDir }} 44 | type: DirectoryOrCreate 45 | - name: k8s-workload-registrar-config 46 | configMap: 47 | name: k8s-workload-registrar 48 | -------------------------------------------------------------------------------- /examples/spire/mars-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: elon-musk 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | name: mars-mission 10 | labels: 11 | app: mars-mission 12 | spec: 13 | replicas: 1 14 | selector: 15 | matchLabels: 16 | app: mars-mission 17 | template: 18 | metadata: 19 | labels: 20 | identity_template: "true" 21 | app: mars-mission 22 | spec: 23 | hostPID: true 24 | hostNetwork: true 25 | dnsPolicy: ClusterFirstWithHostNet 26 | serviceAccountName: elon-musk 27 | containers: 28 | - name: mars-mission-main 29 | securityContext: 30 | # privilaged is needed to create socket and bundle files 31 | privileged: true 32 | image: tsidentity/mars-demo:latest 33 | command: ["sleep"] 34 | args: ["1000000000"] 35 | env: 36 | - name: AG_SOCK 37 | value: "/run/spire/sockets/agent.sock" 38 | - name: S3_ROLE 39 | value: "arn:aws:iam::581274594392:role/mars-mission-role-01" 40 | - name: S3_AUD 41 | value: "mys3" 42 | - name: VAULT_ADDR 43 | value: "http://tsi-kube01-vault.us-south.containers.appdomain.cloud" 44 | - name: VAULT_ROLE 45 | value: "marsrole" 46 | - name: VAULT_SECRET 47 | value: "/v1/secret/data/my-super-secret" 48 | volumeMounts: 49 | - name: spire-agent-socket 50 | mountPath: /run/spire/sockets 51 | readOnly: true 52 | volumes: 53 | - name: spire-agent-socket 54 | hostPath: 55 | path: /run/spire/sockets 56 | type: Directory 57 | -------------------------------------------------------------------------------- /examples/spire-sidecar/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: bin/python bin/nodejs container-python container-nodejs 2 | 3 | GIT_COMMIT_SHA="$(shell git rev-parse --short HEAD 2>/dev/null)" 4 | REPO ?= tsidentity 5 | PYTHON_IMG_NAME ?= tornjak-example-python 6 | NODEJS_IMG_NAME ?= tornjak-example-nodejs 7 | SIDECAR_IMG_NAME ?= tornjak-example-sidecar 8 | VERSION=$(shell cat version.txt) 9 | # GO_FILES := $(shell find . -type f -name '*.go' -not -name '*_test.go' -not -path './vendor/*') 10 | 11 | PYTHON_IMG := $(REPO)/$(PYTHON_IMG_NAME):$(GIT_COMMIT_SHA) 12 | PYTHON_IMG_MUTABLE := $(REPO)/$(PYTHON_IMG_NAME):$(VERSION) 13 | NODEJS_IMG := $(REPO)/$(NODEJS_IMG_NAME):$(GIT_COMMIT_SHA) 14 | NODEJS_IMG_MUTABLE := $(REPO)/$(NODEJS_IMG_NAME):$(VERSION) 15 | SIDECAR_IMG := $(REPO)/$(SIDECAR_IMG_NAME):$(GIT_COMMIT_SHA) 16 | SIDECAR_IMG_MUTABLE := $(REPO)/$(SIDECAR_IMG_NAME):$(VERSION) 17 | 18 | all: bin/python bin/nodejs bin/sidecar container-python container-nodejs container-sidecar 19 | 20 | sidecar: bin/sidecar container-sidecar 21 | 22 | bin/python: 23 | docker build --no-cache -t $(PYTHON_IMG) -f python/Dockerfile . 24 | docker tag $(PYTHON_IMG) $(PYTHON_IMG_MUTABLE) 25 | 26 | bin/nodejs: 27 | docker build --no-cache -t $(NODEJS_IMG) -f nodejs/Dockerfile . 28 | docker tag $(NODEJS_IMG) $(NODEJS_IMG_MUTABLE) 29 | 30 | bin/sidecar: 31 | docker build --no-cache -t $(SIDECAR_IMG) -f sidecar/Dockerfile . 32 | docker tag $(SIDECAR_IMG) $(SIDECAR_IMG_MUTABLE) 33 | 34 | container-python: 35 | docker push $(PYTHON_IMG) 36 | docker push $(PYTHON_IMG_MUTABLE) 37 | 38 | container-nodejs: 39 | docker push $(NODEJS_IMG) 40 | docker push $(NODEJS_IMG_MUTABLE) 41 | 42 | container-sidecar: 43 | docker push $(SIDECAR_IMG) 44 | docker push $(SIDECAR_IMG_MUTABLE) 45 | 46 | # vendor: 47 | # go mod tidy 48 | # go mod vendor 49 | -------------------------------------------------------------------------------- /examples/spire/Dockerfile.busybox.mars: -------------------------------------------------------------------------------- 1 | FROM us.gcr.io/scytale-registry/aws-cli:latest 2 | # FROM ubuntu:18.04 3 | 4 | RUN apk add jq curl coreutils wget unzip vim 5 | # RUN apt update && \ 6 | # apt install -y curl && \ 7 | # apt install coreutils && \ 8 | # apt install -y wget && \ 9 | # apt install -y unzip && \ 10 | # apt install -y jq && \ 11 | # apt install -y vim 12 | 13 | # install yq required for xform YAML to JSON 14 | #RUN apt-get install -y software-properties-common && \ 15 | # add-apt-repository ppa:rmescandon/yq && \ 16 | # apt update && apt install -y yq 17 | 18 | RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ 19 | chmod +x kubectl 20 | RUN wget https://releases.hashicorp.com/vault/1.4.2/vault_1.4.2_linux_amd64.zip && \ 21 | unzip vault_1.4.2_linux_amd64.zip && \ 22 | mkdir -p /usr/local/bin/ && \ 23 | mv vault /usr/local/bin/ && \ 24 | rm -f vault_1.4.2_linux_amd64.zip 25 | 26 | COPY demoscript /usr/local/bin 27 | COPY demo.mars.sh /root/demo.sh 28 | #COPY run-sidecar.sh execute-get-token.sh execute-get-vault-secrets.sh \ 29 | # get-vault-secrets.sh /usr/local/bin/ 30 | 31 | # adding Spire agent 32 | RUN pwd 33 | RUN VERSION=1.0.2 && \ 34 | wget https://github.com/spiffe/spire/releases/download/v${VERSION}/spire-${VERSION}-linux-x86_64-glibc.tar.gz && \ 35 | tar zvxf spire-${VERSION}-linux-x86_64-glibc.tar.gz && \ 36 | mkdir -p /opt/spire/bin && \ 37 | mv spire-${VERSION}/bin/spire-agent /opt/spire/bin/ && \ 38 | rm -rf spire-${VERSION}/ && \ 39 | rm -f spire-${VERSION}-linux-x86_64-glibc.tar.gz 40 | 41 | RUN cd /root 42 | 43 | # run it forever 44 | CMD ["/bin/bash", "-c", "tail -f /dev/null"] 45 | -------------------------------------------------------------------------------- /sample-keys/key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDHfQWi6vUXo3ps 3 | Aw2yUGNW+mgi7do7lrPSxSvqYydh3kMVYPeJbqzOqDnxfE2QvXctQpqWp47i2OjB 4 | q47PBYyx9BW4Vtb2bBe0WSgnZmTUH5i0/lnKbfW9G7L/nt6cORybKMGOxgGo6bfX 5 | uGgCCIlE3GoOTJONY0228pAOLB8D2MYoYKopfoogm3RU/5/93sapaPYA06DeljWD 6 | ieGoel4YxYDgNdWka2VmhUj7FTkS7YbK8v6d7wIvpecKTsY3bJfLC/RwJWKvK5dl 7 | BS2rOGTlBg0iJKJubcWKHAJ8eJx3VppHm8nBChW8qXivOeSsecrsiNZQRh+HH4e2 8 | 7UzKjdbtAgMBAAECggEBAMEBPeXNK0oumbBNGABQdHxfk0dQFteWS+79YyrHuf+f 9 | WgoiIfyuvrTG4CC64hTKqrZ4fTaLcNwbfr56LJYxSVmWzTjnk90g4aJUrFvKXn05 10 | ALcEa31F+/52AS392GUPgJTBzttCybJSRdOJj99/wMeF9XpWLmAaFsONVUDRAUTm 11 | ziPbIv+s6mdjdthXZ6DHL16UXvmc4iuWJyW6q97gdyDg6PGzX9yEqC9KVl5sPhdS 12 | AouayJeAvXcbb+e5o4sIRM3TsEJjIv8mHM+1Xp3KMrapg4sZVcTwMhSblJXzrj8E 13 | y2txdMFk5iEBjlI21IUlPWyD50jDDdvbe/ITy+5C4SUCgYEA5WA++I5lTrn060yw 14 | QLevGT3Xf4vquLam/jj2zILIFLQ6gnLRSQbtyAl+0hmaQewVARtn32VWyBnJI3ld 15 | CDbueyY1SaAqSjC9cPFGL2QsSgJ7rIKjFCuyTmUeCFlYIo6xtO51gHqESICb7U+W 16 | XiVqzDaTrH40rqwfTLZ/1+WFp28CgYEA3qSvozaXdhh5hnpBwASQHUgWqJ0M33tT 17 | aM57QWQY5sYKzHtq3ZTzEGrqdlhDMdM8E8J5OSwhO4Lfb8f9/6nkFdxUq56eLVyx 18 | eFoI/VYPIpSuphBkfKT0W0E/TCzbHuxFAXKZDH0layZwGa1Sa0d5cWgO/toOihsU 19 | zk/2JrGf2WMCgYBEZHdCO4sfdzj88+fV1C8kcpkP0dNR8jxGrwt/nTs1WSvc+4lF 20 | oghCTfp/gNzK7S6yHs7RAHFoxq9/oU9JBMAE/jaDVntllbTWuUo5FUBTbPwdSvJ/ 21 | MCxBTcsvqBwsaam5DiUBAwbwa1CS6NkoBzYZ3+gWVac1seK/R+dEo44/hwKBgBuP 22 | xZJt4qgi9YBZSCRuLC0DzJJJBtvvl27FCOsaah6/a3zO1nbhxolEwwutd3wGi0xi 23 | 6cDTHrYQSHq9BkuNC4hNQ0oPk8NH00xz0OMTsUH1p/000EvB2+IkHKLgdqvIUe6Q 24 | 8jNilqxCEOSM2UQgj/8jA6A2/E/KdHPp4C4AFSrDAoGBALAtd0Tsy7d4rElUnEut 25 | F+dMFsi1yciE0Cgekmw4gkWFOvyHvfjv0KdApxvrHJQembYMMhPGp1hov88wgHr7 26 | LhAEV0lDFtSZohtDV7lYzcpOsg9yfqqkp7T4unC/3zNhiwszfCqiJlqROQkBxBxm 27 | /9yPd0YJup9AAYk2Dc55UhNB 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /examples/spire-sidecar/sidecar/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | RUN apt update && \ 4 | apt install -y curl && \ 5 | apt install coreutils && \ 6 | apt install -y wget && \ 7 | apt install -y unzip && \ 8 | apt install -y jq && \ 9 | apt install -y vim && \ 10 | apt install -y python3 && \ 11 | apt install -y python3-pip 12 | 13 | 14 | # install Spire agent cli: 15 | RUN VERSION=1.0.2 && \ 16 | wget https://github.com/spiffe/spire/releases/download/v${VERSION}/spire-${VERSION}-linux-x86_64-glibc.tar.gz && \ 17 | tar zvxf spire-${VERSION}-linux-x86_64-glibc.tar.gz && \ 18 | mkdir -p /opt/spire/bin && \ 19 | mv /spire-${VERSION}/bin/spire-agent /opt/spire/bin/ && \ 20 | rm -rf spire-${VERSION}/ && \ 21 | rm -f spire-${VERSION}-linux-x86_64-glibc.tar.gz 22 | 23 | # install Vault client: 24 | RUN wget https://releases.hashicorp.com/vault/1.4.2/vault_1.4.2_linux_amd64.zip && \ 25 | unzip vault_1.4.2_linux_amd64.zip && \ 26 | mkdir -p /usr/local/bin/ && \ 27 | mv vault /usr/local/bin/ && \ 28 | rm -f vault_1.4.2_linux_amd64.zip 29 | 30 | # install AWS CLI 31 | RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ 32 | unzip awscliv2.zip && \ 33 | ./aws/install && \ 34 | rm -rf aws && \ 35 | rm -f awscliv2.zip 36 | 37 | COPY sidecar/run-sidecar-bash.sh /usr/local/bin 38 | COPY sidecar/run-sidecar-python.py /usr/local/bin 39 | 40 | COPY sidecar/requirements.txt /usr/local/bin/requirements.txt 41 | RUN pip3 install -r /usr/local/bin/requirements.txt 42 | 43 | RUN cd /root 44 | 45 | 46 | # Use shell script to obtain files 47 | # CMD ["/usr/local/bin/run-sidecar-bash.sh", "~/inputfile.txt"] 48 | 49 | # Use python script to obtain files 50 | CMD ["python3", "/usr/local/bin/run-sidecar-python.py", "~/inputfile.txt"] 51 | 52 | # CMD ["/bin/bash", "-c", "tail -f /dev/null"] 53 | -------------------------------------------------------------------------------- /examples/keycloak/keycloak.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: tsi-keycloak 5 | spec: 6 | selector: 7 | app: tsi-keycloak 8 | ports: 9 | - protocol: TCP 10 | port: 8080 11 | # port: 9090 12 | targetPort: 8080 13 | type: NodePort 14 | --- 15 | 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | labels: 20 | app: tsi-keycloak 21 | name: tsi-keycloak 22 | spec: 23 | replicas: 1 24 | selector: 25 | matchLabels: 26 | app: tsi-keycloak 27 | template: 28 | metadata: 29 | labels: 30 | app: tsi-keycloak 31 | name: tsi-keycloak 32 | spec: 33 | containers: 34 | - name: tsi-keycloak 35 | image: quay.io/keycloak/keycloak:19.0.3 36 | imagePullPolicy: Always 37 | args: 38 | - start-dev 39 | env: 40 | - name: PROXY_ADDRESS_FORWARDING 41 | value: "true" 42 | - name: KEYCLOAK_ADMIN 43 | value: admin 44 | - name: KEYCLOAK_ADMIN_PASSWORD 45 | value: adminpasswd 46 | - name: KC_PROXY 47 | value: edge 48 | - name: KEYCLOAK_FRONTEND_URL 49 | value: http://keycloak.tornjak-02-9d995c4a8c7c5f281ce13d5467ff6a94-0000.us-south.containers.appdomain.cloud/auth/ 50 | - name: KEYCLOAK_ADMIN_URL 51 | value: http://keycloak.tornjak-02-9d995c4a8c7c5f281ce13d5467ff6a94-0000.us-south.containers.appdomain.cloud/auth/realms/master/admin/ 52 | ports: 53 | - containerPort: 8080 54 | name: http 55 | protocol: TCP 56 | readinessProbe: 57 | failureThreshold: 3 58 | httpGet: 59 | path: /realms/master 60 | port: 8080 61 | scheme: HTTP 62 | periodSeconds: 10 63 | successThreshold: 1 64 | timeoutSeconds: 1 65 | -------------------------------------------------------------------------------- /examples/spire/demo.mars-vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # this script requires https://github.com/duglin/tools/tree/master/demoscript 4 | # or https://github.com/mrsabath/tools/tree/master/demoscript 5 | declare DEMOFILE=/usr/local/bin/demoscript 6 | if [ ! -f "$DEMOFILE" ]; then 7 | echo "$DEMOFILE does not exist." 8 | exit 1 9 | fi 10 | source "${DEMOFILE}" 11 | 12 | AG_SOCK=${AG_SOCK:-"/run/spire/sockets/agent.sock"} 13 | VAULT_ADDR=${VAULT_ADDR:-"http://tsi-kube01-9d995c4a8c7c5f281ce13d5467ff6a94-0000.us-south.containers.appdomain.cloud"} 14 | VAULT_AUD=${VAULT_AUD:-"vault"} 15 | VAULT_ROLE=${VAULT_ROLE:-"marsrole"} 16 | VAULT_SECRET=${VAULT_SECRET:-"/v1/secret/data/my-super-secret"} 17 | VAULT_DATA=${VAULT_DATA:-".data.data"} 18 | 19 | # show the JWT token 20 | doit "/opt/spire/bin/spire-agent api fetch jwt -audience $VAULT_AUD -socketPath $AG_SOCK" 21 | 22 | # parse the JWT token 23 | doit --noexec "/opt/spire/bin/spire-agent api fetch jwt -audience $VAULT_AUD -socketPath $AG_SOCK | sed -n '2p' | xargs > token.jwt" 24 | /opt/spire/bin/spire-agent api fetch jwt -audience "$VAULT_AUD" -socketPath "$AG_SOCK" | sed -n '2p' | xargs > token.jwt 25 | 26 | # use the JWT token to request VAULT token 27 | JWT=$(cat token.jwt) 28 | doit --noexec "curl --max-time 10 -s -o vout --request POST --data '{"'"jwt": "${JWT}", "role": "${VAULT_ROLE}"'" }' ${VAULT_ADDR}/v1/auth/jwt/login" 29 | curl --max-time 10 -s -o vout --request POST --data '{"jwt": "'"${JWT}"'", "role": "'"${VAULT_ROLE}"'" }' "${VAULT_ADDR}"/v1/auth/jwt/login 30 | 31 | # parse the Vault token 32 | doit --noexec 'TOKEN=$(cat vout | jq -r ".auth.client_token")' 33 | TOKEN=$(cat vout | jq -r '.auth.client_token') 34 | 35 | # use Vault token to request the secret 36 | doit --noexec 'curl -s -H "X-Vault-Token: $TOKEN"'" ${VAULT_ADDR}${VAULT_SECRET} | jq -r '$VAULT_DATA'" 37 | curl -s -H "X-Vault-Token: $TOKEN" "${VAULT_ADDR}${VAULT_SECRET}" | jq -r "$VAULT_DATA" 38 | -------------------------------------------------------------------------------- /charts/spire/templates/k8s-workload-registrar-roles.tpl: -------------------------------------------------------------------------------- 1 | # This is copied directly from the spire/support/k8s/k8s-workload-registrar tree. 2 | # These roles are needed for the k8s registrar to work properly in reconciling mode. 3 | --- 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: ClusterRole 6 | metadata: 7 | name: spire-k8s-registrar-{{ .Values.namespace }}-cluster-role 8 | rules: 9 | - apiGroups: [""] 10 | resources: ["pods", "nodes", "endpoints", "configmaps"] 11 | verbs: ["get", "list", "watch"] 12 | --- 13 | apiVersion: rbac.authorization.k8s.io/v1 14 | kind: ClusterRoleBinding 15 | metadata: 16 | name: spire-k8s-registrar-{{ .Values.namespace }}-cluster-role-binding 17 | roleRef: 18 | apiGroup: rbac.authorization.k8s.io 19 | kind: ClusterRole 20 | name: spire-k8s-registrar-{{ .Values.namespace }}-cluster-role 21 | subjects: 22 | - kind: ServiceAccount 23 | name: spire-k8s-registrar 24 | namespace: {{ .Values.namespace }} 25 | --- 26 | apiVersion: rbac.authorization.k8s.io/v1 27 | kind: Role 28 | metadata: 29 | name: spire-k8s-registrar-{{ .Values.namespace }}-role 30 | namespace: {{ .Values.namespace }} 31 | rules: 32 | - apiGroups: [""] 33 | resources: ["configmaps"] 34 | verbs: ["create"] 35 | - apiGroups: [""] 36 | resources: ["configmaps"] 37 | resourceNames: ["controller-leader-election-helper"] 38 | verbs: ["update", "get"] 39 | - apiGroups: [""] 40 | resources: ["events"] 41 | verbs: ["create"] 42 | --- 43 | apiVersion: rbac.authorization.k8s.io/v1 44 | kind: RoleBinding 45 | metadata: 46 | name: spire-k8s-registrar-{{ .Values.namespace }}-role-binding 47 | namespace: {{ .Values.namespace }} 48 | subjects: 49 | - kind: ServiceAccount 50 | name: spire-k8s-registrar 51 | namespace: {{ .Values.namespace }} 52 | roleRef: 53 | kind: Role 54 | name: spire-k8s-registrar-{{ .Values.namespace }}-role 55 | apiGroup: rbac.authorization.k8s.io 56 | -------------------------------------------------------------------------------- /charts/tornjak/templates/ingress.tpl: -------------------------------------------------------------------------------- 1 | {{- if .Values.tornjak }} 2 | {{- if .Values.tornjak.config }} 3 | 4 | {{- if .Values.tornjak.config.frontend }} 5 | {{- if .Values.tornjak.config.frontend.ingress }} 6 | apiVersion: networking.k8s.io/v1 7 | kind: Ingress 8 | metadata: 9 | name: tornjak-fe-ingress 10 | namespace: {{ .Values.namespace }} 11 | spec: 12 | rules: 13 | # provide the actual Ingress for `host` value: 14 | # use the following command to get the subdomain: 15 | # ibmcloud ks cluster get --cluster | grep Ingress 16 | # any prefix can be added (e.g.): 17 | # host: tsi-vault.my-tsi-cluster-xxxxxxxxxxx-0000.eu-de.containers.appdomain.cloud 18 | - host: {{ .Values.tornjak.config.frontend.ingress }} 19 | http: 20 | paths: 21 | - pathType: Prefix 22 | path: "/" 23 | backend: 24 | service: 25 | name: tornjak-fe 26 | port: 27 | number: 3000 28 | {{- end }} 29 | {{- end }} 30 | 31 | {{- if .Values.tornjak.config.backend }} 32 | {{- if .Values.tornjak.config.backend.ingress }} 33 | --- 34 | apiVersion: networking.k8s.io/v1 35 | kind: Ingress 36 | metadata: 37 | name: tornjak-be-ingress 38 | namespace: {{ .Values.namespace }} 39 | spec: 40 | rules: 41 | # provide the actual Ingress for `host` value: 42 | # use the following command to get the subdomain: 43 | # ibmcloud ks cluster get --cluster | grep Ingress 44 | # any prefix can be added (e.g.): 45 | # host: tsi-vault.my-tsi-cluster-xxxxxxxxxxx-0000.eu-de.containers.appdomain.cloud 46 | - host: {{ .Values.tornjak.config.backend.ingress }} 47 | http: 48 | paths: 49 | - pathType: Prefix 50 | path: "/" 51 | backend: 52 | service: 53 | name: tornjak-be-http 54 | port: 55 | number: 10000 56 | {{- end }} 57 | {{- end }} 58 | 59 | {{- end }} 60 | {{- end }} -------------------------------------------------------------------------------- /charts/spire/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | The installation of the SPIRE Agent and Workload Registrar for 2 | Universal Trusted Workload Identity Service has completed. 3 | 4 | Cluster name: {{ .Values.clustername }} 5 | Trust Domain: {{ .Values.trustdomain }} 6 | Namespace: {{ .Values.namespace }} 7 | {{- if .Values.openShift }} 8 | OpenShift mode: true 9 | {{- end }} 10 | {{- if .Values.aws }} 11 | AWS NodeAttestor: true 12 | {{- end }} 13 | 14 | SPIRE info: 15 | Spire Server address: {{ .Values.spireServer.address }}:{{ .Values.spireServer.port }} 16 | Spire Agent image: {{ .Values.spireAgent.img }}:{{ .Values.spireVersion }} 17 | Spire Registrar image: {{ .Values.spireRegistrar.img }}:{{ .Values.spireVersion }} 18 | Spire Agent sockets: {{ .Values.spireAgent.socketDir }}/{{ .Values.spireAgent.socketFile }} 19 | 20 | 21 | To enable Workload Registrar, create an entry on Tornjak UI: 22 | 1. find out what node the registrar is running on: 23 | kubectl -n {{ .Values.namespace }} get pods -o wide 24 | 2. get the SPIFFE ID of the agent for this node (Tornjak -> Agents -> Agent List) 25 | 3. create Entry (Tornjak -> Entries -> Create Entry) using appropriate Agent 26 | SPIFFE ID as Parent ID: 27 | 28 | SPIFFE ID: 29 | spiffe://{{ .Values.trustdomain }}/{{ .Values.clustername }}/workload-registrar 30 | Parent ID: 31 | spiffe://{{ .Values.trustdomain }}/spire/agent/k8s_psat/{{ .Values.clustername }}/xxx 32 | Selectors: 33 | k8s:sa:spire-k8s-registrar,k8s:ns:{{ .Values.namespace }},k8s:container-name:k8s-workload-registrar 34 | * check Admin Flag 35 | 36 | Chart Name: {{ .Chart.Name }}. 37 | Your release is named {{ .Release.Name }}. 38 | 39 | To learn more about the release, try: 40 | 41 | $ helm status {{ .Release.Name }} 42 | $ helm get all {{ .Release.Name }} 43 | 44 | {{- if not (lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" "" "spiffeids.spiffeid.spiffe.io") -}} 45 | {{ printf "\n" }} 46 | Generate new SPIFFEID CRD 47 | {{- else -}} 48 | {{ printf "\n" }} 49 | SPIFFEID CRD already exists 50 | {{- end -}} 51 | -------------------------------------------------------------------------------- /utils/deployKeys_keylime.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | UC=/root/undercloud.yml 3 | MZ=/root/mzone.yml 4 | 5 | # to get the status of the Keylime cluster: 6 | # keylime-op -u /root/undercloud.yml -m /root/mzone.yml -o status 7 | 8 | # since we are processing all the nodes at once, deactive them all: 9 | keylime-op -u ${UC} -m ${MZ} -o deactivate # > /dev/null 2>&1 10 | CLUSTER_STATE=$(keylime-op -u ${UC} -m ${MZ} -o status | jq -r '.concise') 11 | while [[ ${CLUSTER_STATE} != "inactive" ]] 12 | do 13 | # wait until all the notes are deactivated 14 | keylime-op -u ${UC} -m ${MZ} -o wait --wait-pass inactive --wait-fail= --wait-interval=5 --wait-maxcount=3 > /dev/null 2>&1 15 | CLUSTER_STATE=$(keylime-op -u ${UC} -m ${MZ} -o status | jq -r '.concise') 16 | done 17 | 18 | # get all the nodes for x509 deployment 19 | NODES=$(keylime-op -u ${UC} -m ${MZ} -o status | jq -r '.status | keys[]') 20 | for NODE in $NODES 21 | do 22 | echo "*** Processing $NODE...." 23 | ./x509-conf/createNodeScript.sh $NODE 24 | RT=$? 25 | if [ $RT -ne 0 ]; then 26 | echo "Error executing \"createNodeScript.sh $NODE\" script" 27 | exit 1 28 | fi 29 | # keylime-op -u ${UC} -m ${MZ} -o deactivate -n $NODE 30 | # NODE_STATE=$(keylime-op -u ${UC} -m ${MZ} -o status -n $NODE | jq -r '.status[]') 31 | # while [[ ${NODE_STATE} != "inactive" ]] 32 | 33 | # deploy the script to the nodes 34 | keylime-op -u ${UC} -m ${MZ} -o autorun -s `pwd`/scripts/${NODE}.sh -n $NODE 35 | keylime-op -u ${UC} -m ${MZ} -o wait --wait-pass inactive --wait-fail= --wait-interval=5 --wait-maxcount=3 > /dev/null 2>&1 36 | # keylime-op -u ${UC} -m ${MZ} -o activate -n $NODE 37 | # keylime-op -u ${UC} -m ${MZ} -o wait --wait-pass inactive --wait-fail= --wait-interval=5 --wait-maxcount=3 38 | echo "*** Done with $NODE!" 39 | done 40 | keylime-op -u ${UC} -m ${MZ} -o activate 41 | CLUSTER_STATE=$(keylime-op -u ${UC} -m ${MZ} -o status | jq -r '.concise') 42 | while [[ ${CLUSTER_STATE} != "verified" ]] 43 | do 44 | # wait until all the notes are activated 45 | keylime-op -u ${UC} -m ${MZ} -o wait --wait-pass inactive --wait-fail= --wait-interval=5 --wait-maxcount=3 > /dev/null 2>&1 46 | CLUSTER_STATE=$(keylime-op -u ${UC} -m ${MZ} -o status | jq -r '.concise') 47 | done 48 | 49 | keylime-op -u ${UC} -m ${MZ} -o status 50 | -------------------------------------------------------------------------------- /examples/spire/Dockerfile.spire: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | RUN apt update && \ 4 | apt install -y curl && \ 5 | apt install coreutils && \ 6 | apt install -y wget && \ 7 | apt install -y unzip && \ 8 | apt install -y jq && \ 9 | apt install -y vim 10 | 11 | # install yq required for xform YAML to JSON 12 | RUN apt-get install -y software-properties-common && \ 13 | add-apt-repository ppa:rmescandon/yq && \ 14 | apt update && apt install -y yq 15 | 16 | RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ 17 | chmod +x kubectl 18 | RUN wget https://releases.hashicorp.com/vault/1.4.2/vault_1.4.2_linux_amd64.zip && \ 19 | unzip vault_1.4.2_linux_amd64.zip && \ 20 | mkdir -p /usr/local/bin/ && \ 21 | mv vault /usr/local/bin/ && \ 22 | rm -f vault_1.4.2_linux_amd64.zip 23 | 24 | COPY run-sidecar.sh execute-get-token.sh execute-get-vault-secrets.sh \ 25 | get-vault-secrets.sh /usr/local/bin/ 26 | 27 | # adding Spire agent 28 | RUN VERSION=0.12.0 && \ 29 | wget https://github.com/spiffe/spire/releases/download/v${VERSION}/spire-${VERSION}-linux-x86_64-glibc.tar.gz && \ 30 | tar zvxf spire-${VERSION}-linux-x86_64-glibc.tar.gz && \ 31 | mkdir -p /opt/spire/bin && \ 32 | mv /spire-${VERSION}/bin/spire-agent /opt/spire/bin/ && \ 33 | rm -rf spire-${VERSION}/ && \ 34 | rm -f spire-${VERSION}-linux-x86_64-glibc.tar.gz 35 | 36 | # adding Keycloak identities: 37 | COPY execute-get-keycloak-identities.sh get-keycloak-identities.sh /usr/local/bin/ 38 | 39 | COPY test-vault-cli.sh /test-vault-cli.sh 40 | 41 | # Default value for NEW_JWT_WAIT_SEC must be a little shorter than TTL_SEC 42 | ARG DEFAULT_JWT_TTL_SEC=30 43 | ENV JWT_TTL_SEC=${DEFAULT_JWT_TTL_SEC} 44 | # Default values for vault client setup 45 | ARG DEFAULT_VAULT_ADDR="http://vault:8200" 46 | ARG DEFAULT_SECRET_REFRESH_SEC=600 47 | ARG DEFAULT_IDENTITY_REFRESH_SEC=600 48 | ARG DEFAULT_IS_SIDECAR=true 49 | ENV VAULT_ADDR=${DEFAULT_VAULT_ADDR} 50 | ENV SECRET_REFRESH_SEC=${DEFAULT_SECRET_REFRESH_SEC} 51 | ENV IDENTITY_REFRESH_SEC=${DEFAULT_IDENTITY_REFRESH_SEC} 52 | ENV IS_SIDECAR=${DEFAULT_IS_SIDECAR} 53 | 54 | CMD ["/bin/bash", "-c", "/usr/local/bin/run-sidecar.sh"] 55 | -------------------------------------------------------------------------------- /examples/spire-sidecar/nodejs/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | .pnpm-debug.log* 9 | 10 | # Diagnostic reports (https://nodejs.org/api/report.html) 11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 12 | 13 | # Runtime data 14 | pids 15 | *.pid 16 | *.seed 17 | *.pid.lock 18 | 19 | # Directory for instrumented libs generated by jscoverage/JSCover 20 | lib-cov 21 | 22 | # Coverage directory used by tools like istanbul 23 | coverage 24 | *.lcov 25 | 26 | # nyc test coverage 27 | .nyc_output 28 | 29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 30 | .grunt 31 | 32 | # Bower dependency directory (https://bower.io/) 33 | bower_components 34 | 35 | # node-waf configuration 36 | .lock-wscript 37 | 38 | # Compiled binary addons (https://nodejs.org/api/addons.html) 39 | build/Release 40 | 41 | # Dependency directories 42 | node_modules/ 43 | jspm_packages/ 44 | 45 | # Snowpack dependency directory (https://snowpack.dev/) 46 | web_modules/ 47 | 48 | # TypeScript cache 49 | *.tsbuildinfo 50 | 51 | # Optional npm cache directory 52 | .npm 53 | 54 | # Optional eslint cache 55 | .eslintcache 56 | 57 | # Microbundle cache 58 | .rpt2_cache/ 59 | .rts2_cache_cjs/ 60 | .rts2_cache_es/ 61 | .rts2_cache_umd/ 62 | 63 | # Optional REPL history 64 | .node_repl_history 65 | 66 | # Output of 'npm pack' 67 | *.tgz 68 | 69 | # Yarn Integrity file 70 | .yarn-integrity 71 | 72 | # dotenv environment variables file 73 | .env 74 | .env.test 75 | .env.production 76 | 77 | # parcel-bundler cache (https://parceljs.org/) 78 | .cache 79 | .parcel-cache 80 | 81 | # Next.js build output 82 | .next 83 | out 84 | 85 | # Nuxt.js build / generate output 86 | .nuxt 87 | dist 88 | 89 | # Gatsby files 90 | .cache/ 91 | # Comment in the public line in if your project uses Gatsby and not Next.js 92 | # https://nextjs.org/blog/next-9-1#public-directory-support 93 | # public 94 | 95 | # vuepress build output 96 | .vuepress/dist 97 | 98 | # Serverless directories 99 | .serverless/ 100 | 101 | # FuseBox cache 102 | .fusebox/ 103 | 104 | # DynamoDB Local files 105 | .dynamodb/ 106 | 107 | # TernJS port file 108 | .tern-port 109 | 110 | # Stores VSCode versions used for testing VSCode extensions 111 | .vscode-test 112 | 113 | # yarn v2 114 | .yarn/cache 115 | .yarn/unplugged 116 | .yarn/build-state.yml 117 | .yarn/install-state.gz 118 | .pnp.* 119 | -------------------------------------------------------------------------------- /examples/spire-sidecar/python/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | -------------------------------------------------------------------------------- /charts/spire/templates/agent-daemonset.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: spire-agent 5 | namespace: {{ .Values.namespace }} 6 | labels: 7 | app: spire-agent 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: spire-agent 12 | template: 13 | metadata: 14 | namespace: {{ .Values.namespace }} 15 | labels: 16 | app: spire-agent 17 | spec: 18 | hostPID: true 19 | hostNetwork: true 20 | dnsPolicy: ClusterFirstWithHostNet 21 | serviceAccountName: "spire-agent" 22 | initContainers: 23 | - name: init 24 | # This is a small image with wait-for-it, choose whatever image 25 | # you prefer that waits for a service to be up. This image is built 26 | # from https://github.com/lqhl/wait-for-it 27 | image: gcr.io/spiffe-io/wait-for-it 28 | args: ["-t", "30", "{{ .Values.spireServer.address }}:{{ .Values.spireServer.port }}"] 29 | containers: 30 | - name: spire-agent 31 | image: {{ .Values.spireAgent.img }}:{{ .Values.spireVersion }} 32 | securityContext: 33 | # TODO: review this, maybe applicable for OpenShift only: 34 | # privilaged is needed to create socket and bundle files 35 | privileged: true 36 | args: ["-config", "/run/spire/config/agent.conf"] 37 | volumeMounts: 38 | - name: spire-config 39 | mountPath: /run/spire/config 40 | readOnly: true 41 | - name: spire-agent-socket 42 | mountPath: {{ .Values.spireAgent.socketDir }} 43 | readOnly: false 44 | - name: spire-bundle 45 | mountPath: /run/spire/bundle 46 | readOnly: true 47 | - name: spire-agent-token 48 | mountPath: /var/run/secrets/tokens 49 | readOnly: true 50 | livenessProbe: 51 | exec: 52 | command: 53 | - /opt/spire/bin/spire-agent 54 | - healthcheck 55 | - -socketPath 56 | - {{ .Values.spireAgent.socketDir }}/{{ .Values.spireAgent.socketFile }} 57 | failureThreshold: 2 58 | initialDelaySeconds: 15 59 | periodSeconds: 60 60 | timeoutSeconds: 3 61 | volumes: 62 | - name: spire-config 63 | configMap: 64 | name: spire-agent 65 | - name: spire-bundle 66 | configMap: 67 | name: spire-bundle 68 | - name: spire-agent-socket 69 | hostPath: 70 | path: {{ .Values.spireAgent.socketDir }} 71 | type: DirectoryOrCreate 72 | - name: spire-agent-token 73 | projected: 74 | sources: 75 | - serviceAccountToken: 76 | path: spire-agent 77 | expirationSeconds: 600 78 | audience: spire-server 79 | -------------------------------------------------------------------------------- /utils/x509-conf/createNodeScript.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | function usage { 3 | echo "$0 [node] [key-directory]" 4 | echo "where " 5 | echo " node - name of the node to create keys" 6 | echo " key-directory - directory with intermediate key, '../x509' default (optional)" 7 | exit 1 8 | } 9 | [[ -z $1 ]] && usage 10 | NODE=$1 11 | if [[ "$2" != "" ]] ; then 12 | KEYS="$2" 13 | else 14 | KEYS="../x509" 15 | fi 16 | 17 | SCRIPTS="scripts" 18 | mkdir -p ${SCRIPTS} 19 | FILE=${SCRIPTS}/${NODE}.sh 20 | TEMP_DIR="/tmp/ca" 21 | TARGET_DIR="/target/run/spire/x509" 22 | 23 | echo "#!/bin/bash" > ${FILE} 24 | chmod 755 ${FILE} 25 | 26 | echo "mkdir -p ${TEMP_DIR}" >> ${FILE} 27 | echo "cd ${TEMP_DIR}" >> ${FILE} 28 | echo "mkdir certs crl newcerts private" >> ${FILE} 29 | echo "chmod 700 private" >> ${FILE} 30 | echo "touch index.txt" >> ${FILE} 31 | echo "echo 1000 > serial" >> ${FILE} 32 | echo "cd -" >> ${FILE} 33 | 34 | echo "cat > ${TEMP_DIR}/intermediate.cert.pem <> ${FILE} 35 | if [ -f ${KEYS}/intermediate.cert.pem ]; then 36 | cat ${KEYS}/intermediate.cert.pem >> ${FILE} 37 | echo "EOF" >> ${FILE} 38 | echo " " >> ${FILE} 39 | else 40 | echo "Error! Missing file ${KEYS}/intermediate.cert.pem" 41 | exit 1 42 | fi 43 | 44 | if [ -f ${KEYS}/intermediate.key.pem ]; then 45 | echo "cat > ${TEMP_DIR}/intermediate.key.pem <> ${FILE} 46 | cat ${KEYS}/intermediate.key.pem >> ${FILE} 47 | echo "EOF" >> ${FILE} 48 | echo " " >> ${FILE} 49 | else 50 | echo "Error! Missing file ${KEYS}/intermediate.key.pem" 51 | exit 1 52 | fi 53 | 54 | echo "cat > ${TEMP_DIR}/intermediate-openssl.cnf <> ${FILE} 55 | cat x509-conf/intermediate-config.txt >> ${FILE} 56 | echo "EOF" >> ${FILE} 57 | echo " " >> ${FILE} 58 | 59 | echo "openssl genrsa -out ${TEMP_DIR}/node.key.pem 2048" >> ${FILE} 60 | echo "chmod 400 ${TEMP_DIR}/node.key.pem" >> ${FILE} 61 | 62 | echo 'SUBJ="/C=US/ST=CA/O=MyOrg, Inc./CN='"$NODE"'"' >> ${FILE} 63 | 64 | echo "openssl req -new -sha256 -key ${TEMP_DIR}/node.key.pem \\" >> ${FILE} 65 | echo ' -subj "${SUBJ}"'" -out ${TEMP_DIR}/node.csr \\" >> ${FILE} 66 | echo " -config ${TEMP_DIR}/intermediate-openssl.cnf 2>/dev/null" >> ${FILE} 67 | 68 | echo "openssl ca -batch -config ${TEMP_DIR}/intermediate-openssl.cnf \\" >> ${FILE} 69 | echo " -extensions server_cert -days 375 -notext -md sha256 \\" >> ${FILE} 70 | echo " -in ${TEMP_DIR}/node.csr \\" >> ${FILE} 71 | echo " -out ${TEMP_DIR}/node.cert.pem 2>/dev/null" >> ${FILE} 72 | echo "chmod 444 ${TEMP_DIR}/node.cert.pem" >> ${FILE} 73 | 74 | echo "" >> ${FILE} 75 | echo "# cleanup:" >> ${FILE} 76 | echo "mkdir -p ${TARGET_DIR}" >> ${FILE} 77 | echo "cat ${TEMP_DIR}/node.cert.pem \\" >> ${FILE} 78 | echo " ${TEMP_DIR}/intermediate.cert.pem > ${TARGET_DIR}/node-bundle.cert.pem" >> ${FILE} 79 | echo "mv ${TEMP_DIR}/node.key.pem ${TARGET_DIR}/" >> ${FILE} 80 | echo "rm -rf ${TEMP_DIR}/" >> ${FILE} 81 | echo "rm -rf $SCRIPTS/" >> ${FILE} 82 | echo "" >> ${FILE} 83 | -------------------------------------------------------------------------------- /examples/spire/Dockerfile.mars: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | RUN apt update && \ 4 | apt install -y curl && \ 5 | apt install coreutils && \ 6 | apt install -y wget && \ 7 | apt install -y unzip && \ 8 | apt install -y jq && \ 9 | apt install -y vim 10 | 11 | # install yq required for xform YAML to JSON 12 | RUN apt-get install -y software-properties-common && \ 13 | add-apt-repository ppa:rmescandon/yq && \ 14 | apt update && apt install -y yq 15 | 16 | RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ 17 | chmod +x kubectl 18 | RUN wget https://releases.hashicorp.com/vault/1.4.2/vault_1.4.2_linux_amd64.zip && \ 19 | unzip vault_1.4.2_linux_amd64.zip && \ 20 | mkdir -p /usr/local/bin/ && \ 21 | mv vault /usr/local/bin/ && \ 22 | rm -f vault_1.4.2_linux_amd64.zip 23 | 24 | # get a demo script from https://github.com/duglin/tools/tree/master/demoscript 25 | # or https://github.com/mrsabath/tools/tree/master/demoscript 26 | RUN curl -LO https://raw.githubusercontent.com/mrsabath/tools/master/demoscript/demoscript && \ 27 | chmod +x demoscript && \ 28 | mv demoscript /usr/local/bin 29 | 30 | COPY demo.mars-s3.sh /usr/local/bin/demo-s3.sh 31 | COPY demo.mars-vault.sh /usr/local/bin/demo-vault.sh 32 | 33 | # adding Spire agent 34 | RUN VERSION=1.0.2 && \ 35 | wget https://github.com/spiffe/spire/releases/download/v${VERSION}/spire-${VERSION}-linux-x86_64-glibc.tar.gz && \ 36 | tar zvxf spire-${VERSION}-linux-x86_64-glibc.tar.gz && \ 37 | mkdir -p /opt/spire/bin && \ 38 | mv /spire-${VERSION}/bin/spire-agent /opt/spire/bin/ && \ 39 | rm -rf spire-${VERSION}/ && \ 40 | rm -f spire-${VERSION}-linux-x86_64-glibc.tar.gz 41 | 42 | # add AWS CLI 43 | RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ 44 | unzip awscliv2.zip && \ 45 | ./aws/install && \ 46 | rm -rf aws && \ 47 | rm -f awscliv2.zip 48 | 49 | # setup env. variables for the demos: 50 | ARG DEFAULT_AG_SOCK="/run/spire/sockets/agent.sock" 51 | ARG DEFAULT_S3_AUD="mys3" 52 | ARG DEFAULT_S3_ROLE="arn:aws:iam::581274594392:role/mars-mission-role-01" 53 | ARG DEFAULT_S3_CMD="aws s3 cp s3://mars-spire/mars.txt top-secret.txt" 54 | ARG DEFAULT_VAULT_AUD="vault" 55 | ARG DEFAULT_VAULT_ROLE="mars-role" 56 | ARG DEFAULT_VAULT_ADDR="http://vault-service" 57 | ARG DEFAULT_VAULT_SECRET="/v1/secret/data/my-super-secret" 58 | ARG DEFAULT_VAULT_DATA=".data.data" 59 | 60 | ENV AG_SOCK=${DEFAULT_AG_SOCK} 61 | ENV S3_AUD=${DEFAULT_S3_AUD} 62 | ENV S3_ROLE=${DEFAULT_S3_ROLE} 63 | ENV S3_CMD=${DEFAULT_S3_CMD} 64 | ENV AULT_AUD=${DEFAULT_VAULT_AUD} 65 | ENV VAULT_ROLE=${DEFAULT_VAULT_ROLE} 66 | ENV VAULT_ADDR=${DEFAULT_VAULT_ADDR} 67 | ENV VAULT_SECRET=${DEFAULT_VAULT_SECRET} 68 | ENV VAULT_DATA=${DEFAULT_VAULT_DATA} 69 | 70 | WORKDIR /usr/local/bin/ 71 | 72 | # run it forever 73 | CMD ["/bin/bash", "-c", "tail -f /dev/null"] 74 | -------------------------------------------------------------------------------- /examples/spire-sidecar/config/db-node.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: db 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: db 10 | strategy: 11 | type: Recreate 12 | template: 13 | metadata: 14 | labels: 15 | app: db 16 | spec: 17 | containers: 18 | - args: 19 | - --default-authentication-plugin=mysql_native_password 20 | env: 21 | - name: MYSQL_PASSWORD 22 | value: testnewroot 23 | - name: MYSQL_ROOT_PASSWORD 24 | value: testroot 25 | - name: MYSQL_USER 26 | value: newroot 27 | image: mysql:5.7 28 | name: mysql57demo 29 | ports: 30 | - containerPort: 3306 31 | volumeMounts: 32 | - name: mysql-initdb 33 | mountPath: /docker-entrypoint-initdb.d 34 | # livenessProbe: 35 | # exec: 36 | # command: ["mysqladmin", "ping"] 37 | # initialDelaySeconds: 30 38 | # periodSeconds: 10 39 | # timeoutSeconds: 5 40 | readinessProbe: 41 | exec: 42 | # Check we can execute queries over TCP (skip-networking is off). 43 | # command: ["MYSQL_PWD=testroot mysql", "-u", "root", "-e", "SELECT 1"] 44 | # - bash 45 | # - "-c" 46 | # - | 47 | # MYSQL_PWD=testroot mysql -uroot -e'SELECT 1' 48 | # ["mysql", "-u", "root", "--password=testroot", "-e", "SELECT 1"] # 49 | # ["MYSQL_PWD=testroot", "mysql", "-u", "root", "-e", "SELECT 1"] # 50 | command: 51 | - bash 52 | - "-c" 53 | - | 54 | MYSQL_PWD=testroot mysql -uroot -s -e'SELECT 1' 55 | initialDelaySeconds: 5 56 | periodSeconds: 2 57 | timeoutSeconds: 1 58 | resources: {} 59 | volumes: 60 | - name: mysql-initdb 61 | configMap: 62 | name: mysql-initdb-config 63 | restartPolicy: Always 64 | status: {} 65 | --- 66 | apiVersion: v1 67 | kind: Service 68 | metadata: 69 | name: db 70 | namespace: default 71 | spec: 72 | # type: NodePort 73 | ports: 74 | - name: "3306" 75 | port: 3306 76 | targetPort: 3306 77 | protocol: TCP 78 | selector: 79 | app: db 80 | status: 81 | loadBalancer: {} 82 | --- 83 | apiVersion: v1 84 | kind: ConfigMap 85 | metadata: 86 | name: mysql-initdb-config 87 | data: 88 | initdb.sql: | 89 | CREATE DATABASE testdb; 90 | USE testdb; 91 | DROP TABLE IF EXISTS MOVIE; 92 | CREATE TABLE MOVIE( 93 | id int(11) NOT NULL AUTO_INCREMENT, 94 | name varchar(20), 95 | year int(11), 96 | director varchar(20), 97 | genre varchar(20), 98 | PRIMARY KEY (id)); 99 | USE testdb; 100 | insert into MOVIE(id, name, year, director, genre) values 101 | (1, "Bruce Almighty", 2003, "Tom Shaydac", "Comedy"), 102 | (2, "The Godfather", 1972, "Francis Ford Coppola", "Crime"); -------------------------------------------------------------------------------- /examples/spire-sidecar/sidecar/run-sidecar-bash.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SOCKETFILE=${SOCKETFILE:-"/run/spire/sockets/agent.sock"} 4 | CFGDIR=${CFGDIR:-"/run/db"} 5 | ROLE=${ROLE:-"dbrole1"} 6 | 7 | if [[ "$VAULT_ADDR" == "" ]]; then 8 | echo "VAULT_ADDR not set" 9 | exit 1 10 | fi 11 | 12 | WAIT=30 13 | 14 | # method used to obtain a resource/file from vault, using jwt token (i.e. X-Vault-Token) 15 | get_resource () { 16 | if [ "$?" == "0" ]; then 17 | if [[ $1 == *"json"* ]]; then 18 | # JSON dump 19 | curl --max-time 10 -s -H "X-Vault-Token: $VAULT_TOKEN" $VAULT_ADDR/v1/secret/data/$2 | jq -r ".data.data" > $CFGDIR/$1 20 | else 21 | # other files, beside JSON, need to be encoded to base64 prior to storing into Vault 22 | curl --max-time 10 -s -H "X-Vault-Token: $VAULT_TOKEN" $VAULT_ADDR/v1/secret/data/$2 | jq -r ".data.data.sha" | openssl base64 -d > $CFGDIR/$1 23 | fi 24 | fi 25 | } 26 | 27 | if [[ ! -f $1 ]]; then 28 | cat < 31 | Where: 32 | - path to file that would contain resources (required) 33 | HELPMEHELPME 34 | echo "File not found: $1" 35 | exit 1 36 | fi 37 | 38 | # read each line into array (i.e. files) 39 | mapfile -t files < $1 40 | 41 | 42 | while true 43 | do 44 | # make sure the socket file exists before requesting a token 45 | while [ ! -S ${SOCKETFILE} ]; do 46 | sleep 5 47 | done 48 | # obtain the pod identity in JWT format from the Spire agent using a provided socket 49 | IDENTITY_TOKEN=$(/opt/spire/bin/spire-agent api fetch jwt -audience vault -socketPath $SOCKETFILE | sed -n '2p' | xargs) 50 | if [ -z "$IDENTITY_TOKEN" ]; then 51 | echo "IDENTITY_TOKEN not set" 52 | continue 53 | fi 54 | # For use with AWS S3: 55 | # the audience must be switched to 'mys3' 56 | # /opt/spire/bin/spire-agent api fetch jwt -audience mys3 -socketPath /run/spire/sockets/agent.sock > $RESP 57 | # AWS_ROLE_ARN=arn:aws:iam::581274594392:role/mars-mission-role AWS_WEB_IDENTITY_TOKEN_FILE=token.jwt aws s3 cp s3://mars-spire/db-config.json config.json 58 | # mv config.json $CFGFILE 59 | 60 | # Using identity JWT, obtain and extract client_token from Vault login service 61 | VAULT_TOKEN=$(curl --max-time 10 -s --request POST --data '{ "jwt": "'"${IDENTITY_TOKEN}"'", "role": "'"${ROLE}"'"}' "${VAULT_ADDR}"/v1/auth/jwt/login | jq -r '.auth.client_token') 62 | if [ -z "$VAULT_TOKEN" ]; then 63 | echo "VAULT_TOKEN not set" 64 | continue 65 | fi 66 | 67 | filenames=() # will store only files names, used to check if they exists later 68 | for i in "${files[@]}" 69 | do 70 | tmp=$( echo "$i" | sed 's:.*/::' ) 71 | filenames+=("$tmp") 72 | get_resource $tmp $i 73 | done 74 | 75 | success=1 76 | # check if all files were retrieved 77 | for i in "${filenames[@]}" 78 | do 79 | if [[ ! -f "$CFGDIR/$i" ]]; then 80 | echo "File was not found: $i" 81 | success=0 82 | fi 83 | done 84 | 85 | if [ $success -eq 1 ]; then 86 | exit 0 87 | fi 88 | 89 | 90 | sleep "$WAIT" 91 | done 92 | -------------------------------------------------------------------------------- /examples/spire-sidecar/config/apps.bash.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: path-to-inputfile 5 | data: 6 | inputfile.txt: | 7 | db-config/config.ini 8 | db-config/config.json 9 | 10 | --- 11 | apiVersion: apps/v1 12 | kind: Deployment 13 | metadata: 14 | name: apps 15 | spec: 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | app: apps 20 | strategy: 21 | type: Recreate 22 | template: 23 | metadata: 24 | labels: 25 | identity_template: "true" 26 | app: apps 27 | spec: 28 | initContainers: 29 | - name: sidecar 30 | securityContext: 31 | # privilaged is needed to create socket and bundle files 32 | privileged: true 33 | image: tsidentity/tornjak-example-sidecar:v0.1 34 | imagePullPolicy: Always 35 | # use command to override the command of the image/dockerfile 36 | command: ["/usr/local/bin/run-sidecar-bash.sh"] 37 | args: 38 | - "/usr/local/bin/inputfile.txt" 39 | env: 40 | - name: SOCKETFILE 41 | value: "/run/spire/sockets/agent.sock" 42 | - name: ROLE 43 | value: "dbrole" 44 | - name: VAULT_ADDR 45 | # Provide address to your VAULT server 46 | # value: "http://{{vault-address.cloud}}" 47 | volumeMounts: 48 | - name: spire-agent-socket 49 | mountPath: /run/spire/sockets 50 | readOnly: true 51 | - name: db-config 52 | mountPath: /run/db 53 | - name: mount-inputfile 54 | mountPath: /usr/local/bin/inputfile.txt 55 | subPath: inputfile.txt 56 | containers: 57 | - image: tsidentity/tornjak-example-nodejs:v0.1 58 | imagePullPolicy: Always 59 | name: node 60 | ports: 61 | - containerPort: 8080 62 | resources: {} 63 | volumeMounts: 64 | - mountPath: /usr/src/app/config.json 65 | name: db-config 66 | readOnly: true 67 | subPath: config.json 68 | - image: tsidentity/tornjak-example-python:v0.1 69 | imagePullPolicy: Always 70 | name: py 71 | ports: 72 | - containerPort: 5000 73 | resources: {} 74 | volumeMounts: 75 | - mountPath: /app/config.ini 76 | name: db-config 77 | readOnly: true 78 | subPath: config.ini 79 | restartPolicy: Always 80 | volumes: 81 | - name: spire-agent-socket 82 | hostPath: 83 | path: /run/spire/sockets 84 | type: Directory 85 | - name: db-config 86 | emptyDir: {} 87 | - name: mount-inputfile 88 | configMap: 89 | name: path-to-inputfile 90 | status: {} 91 | --- 92 | apiVersion: v1 93 | kind: Service 94 | metadata: 95 | name: apps 96 | spec: 97 | type: NodePort 98 | selector: 99 | app: apps 100 | ports: 101 | - name: "8001" 102 | port: 8001 103 | targetPort: 8080 104 | - name: "8000" 105 | port: 8000 106 | targetPort: 5000 107 | status: 108 | loadBalancer: {} 109 | -------------------------------------------------------------------------------- /examples/spire-sidecar/config/apps.python.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: path-to-inputfile 5 | data: 6 | inputfile.txt: | 7 | db-config/config.ini 8 | db-config/config.json 9 | 10 | --- 11 | apiVersion: apps/v1 12 | kind: Deployment 13 | metadata: 14 | name: apps 15 | spec: 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | app: apps 20 | strategy: 21 | type: Recreate 22 | template: 23 | metadata: 24 | labels: 25 | identity_template: "true" 26 | app: apps 27 | spec: 28 | initContainers: 29 | - name: sidecar 30 | securityContext: 31 | # privilaged is needed to create socket and bundle files 32 | privileged: true 33 | image: tsidentity/tornjak-example-sidecar:v0.1 34 | imagePullPolicy: Always 35 | # use command to override the command of the image/dockerfile 36 | command: ["python3"] 37 | args: 38 | - "/usr/local/bin/run-sidecar-python.py" 39 | - "/usr/local/bin/inputfile.txt" 40 | env: 41 | - name: SOCKETFILE 42 | value: "/run/spire/sockets/agent.sock" 43 | - name: ROLE 44 | value: "dbrole" 45 | - name: VAULT_ADDR 46 | # Provide address to your VAULT server 47 | # value: "http://{{vault-address.cloud}}" 48 | volumeMounts: 49 | - name: spire-agent-socket 50 | mountPath: /run/spire/sockets 51 | readOnly: true 52 | - name: db-config 53 | mountPath: /run/db 54 | - name: mount-inputfile 55 | mountPath: /usr/local/bin/inputfile.txt 56 | subPath: inputfile.txt 57 | containers: 58 | - image: tsidentity/tornjak-example-nodejs:v0.1 59 | imagePullPolicy: Always 60 | name: node 61 | ports: 62 | - containerPort: 8080 63 | resources: {} 64 | volumeMounts: 65 | - mountPath: /usr/src/app/config.json 66 | name: db-config 67 | readOnly: true 68 | subPath: config.json 69 | - image: tsidentity/tornjak-example-python:v0.1 70 | imagePullPolicy: Always 71 | name: py 72 | ports: 73 | - containerPort: 5000 74 | resources: {} 75 | volumeMounts: 76 | - mountPath: /app/config.ini 77 | name: db-config 78 | readOnly: true 79 | subPath: config.ini 80 | restartPolicy: Always 81 | volumes: 82 | - name: spire-agent-socket 83 | hostPath: 84 | path: /run/spire/sockets 85 | type: Directory 86 | - name: db-config 87 | emptyDir: {} 88 | - name: mount-inputfile 89 | configMap: 90 | name: path-to-inputfile 91 | status: {} 92 | --- 93 | apiVersion: v1 94 | kind: Service 95 | metadata: 96 | name: apps 97 | spec: 98 | type: NodePort 99 | selector: 100 | app: apps 101 | ports: 102 | - name: "8001" 103 | port: 8001 104 | targetPort: 8080 105 | - name: "8000" 106 | port: 8000 107 | targetPort: 5000 108 | status: 109 | loadBalancer: {} 110 | -------------------------------------------------------------------------------- /docs/x509-agent.md: -------------------------------------------------------------------------------- 1 | # Deploy SPIRE Agent with x509pop (Proof of Possession) Node Attestor for Confidential Computing project 2 | 3 | The `x509pop` nodeAttestor plugin attests nodes that have been provisioned with 4 | an x509 identity through an out-of-band mechanism. 5 | It verifies that the certificate is rooted to a trusted set of CAs 6 | and issues a signature based proof-of-possession challenge to the agent plugin 7 | to verify that the node is in possession of the private key. 8 | 9 | This document is a second part of the 2 part activity, see [Deploy SPIRE Server with x509pop](./x509.md) 10 | 11 | ## Pre-install: Get the installation code and sample keys 12 | Obtain the clone of the repo: 13 | 14 | ```console 15 | git clone https://github.com/IBM/trusted-service-identity.git 16 | git checkout conf_container 17 | ``` 18 | 19 | Sample keys are already created in `sample-x509` directory. 20 | 21 | 22 | ## Deploy SPIRE Agents 23 | 24 | ### Env. Setup 25 | Setup `KUBECONFIG` for your Kubernetes cluster. 26 | 27 | Setup CLUSTER_NAME, REGION and SPIRE 28 | In IBM Cloud, use the script: 29 | 30 | ```console 31 | utils/get-cluster-info.sh 32 | ``` 33 | 34 | otherwise setup them up directly, for now, use any strings: 35 | ```console 36 | export CLUSTER_NAME= 37 | export REGION= 38 | ``` 39 | 40 | Point at the SPIRE Server, this is the server deployed in previous step: 41 | ```console 42 | export SPIRE_SERVER= 43 | ``` 44 | 45 | ### Deploy the keys 46 | Eventually, the x509 cert will be delivered to the host out-of-bound, but for now, let's pass them as secrets. 47 | 48 | ```console 49 | # create a namespace: 50 | kubectl create ns spire 51 | 52 | # create a secret with keys: 53 | kubectl -n spire create secret generic agent-x509 \ 54 | --from-file=key.pem="sample-x509/leaf1-key.pem" \ 55 | --from-file=cert.pem="sample-x509/leaf1-crt-bundle.pem" 56 | ``` 57 | 58 | ### Deploy `spire-bundle` 59 | Deploy `spire-bundle` obtained from the SPIRE server. 60 | 61 | ```console 62 | kubectl -n spire create -f spire-bundle.yaml 63 | ``` 64 | 65 | ## Install the Spire Agents 66 | 67 | If installing on OpenShift: 68 | 69 | ```console 70 | utils/install-open-shift-spire.sh -c $CLUSTER_NAME -r $REGION -s $SPIRE_SERVER -t openshift.space-x.com 71 | ``` 72 | 73 | If installing in native Kubernetes environment: 74 | 75 | ```console 76 | helm install --set "spireServer.address=$SPIRE_SERVER" \ 77 | --set "namespace=spire" \ 78 | --set "clustername=$CLUSTER_NAME" --set "trustdomain=openshift.space-x.com" \ 79 | --set "region=$REGION" \ 80 | --set "x509=true" \ 81 | --set "openShift=false" spire charts/spire --debug 82 | ``` 83 | 84 | ## Validate the installation 85 | The number of Spire agents corresponds to the number of nodes: 86 | ```console 87 | kubectl -n spire get no 88 | NAME STATUS ROLES AGE VERSION 89 | 10.188.196.81 Ready 1h v1.22.8+IKS 90 | 10.188.196.82 Ready 1h v1.22.8+IKS 91 | kubectl -n spire get po 92 | NAME READY STATUS RESTARTS AGE 93 | spire-agent-h9f2j 1/1 Running 0 11s 94 | spire-agent-s2bjt 1/1 Running 0 11s 95 | spire-registrar-5bb497cfd8-vpxnl 1/1 Running 0 11s 96 | ``` 97 | 98 | ### To cleanup the cluster (removes everything) 99 | 100 | ```console 101 | utils/install-open-shift-spire.sh --clean 102 | ``` 103 | -------------------------------------------------------------------------------- /MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | # Maintainers Guide 2 | 3 | This guide is intended for maintainers - anybody with commit access to Trusted 4 | Service Identity repository. 5 | 6 | [Maintainers List](./MAINTAINERS.md##maintainers-list) 7 | 8 | ## Methodology 9 | 10 | This repository does not have a traditional release management cycle, but 11 | should instead be maintained as a useful, working, and polished reference at 12 | all times. While all work can therefore be focused on the main branch, the 13 | quality of this branch should never be compromised. 14 | 15 | The remainder of this document details how to merge pull requests to the 16 | repositories. 17 | 18 | ## Merge approval 19 | 20 | The project maintainers use LGTM (Looks Good To Me) in comments on the pull 21 | request to indicate acceptance prior to merging. A change requires LGTMs from 22 | two project maintainers. If the code is written by a maintainer, the change 23 | only requires one additional LGTM. 24 | 25 | ## Reviewing Pull Requests 26 | 27 | We recommend reviewing pull requests directly within GitHub. This allows a 28 | public commentary on changes, providing transparency for all users. When 29 | providing feedback be civil, courteous, and kind. Disagreement is fine, so long 30 | as the discourse is carried out politely. If we see a record of uncivil or 31 | abusive comments, we will revoke your commit privileges and invite you to leave 32 | the project. 33 | 34 | During your review, consider the following points: 35 | 36 | ### Does the change have positive impact? 37 | 38 | Some proposed changes may not represent a positive impact to the project. Ask 39 | whether or not the change will make understanding the code easier, or if it 40 | could simply be a personal preference on the part of the author (see 41 | [bikeshedding](https://en.wiktionary.org/wiki/bikeshedding)). 42 | 43 | Pull requests that do not have a clear positive impact should be closed without 44 | merging. 45 | 46 | ### Do the changes make sense? 47 | 48 | If you do not understand what the changes are or what they accomplish, ask the 49 | author for clarification. Ask the author to add comments and/or clarify test 50 | case names to make the intentions clear. 51 | 52 | At times, such clarification will reveal that the author may not be using the 53 | code correctly, or is unaware of features that accommodate their needs. If you 54 | feel this is the case, work up a code sample that would address the pull 55 | request for them, and feel free to close the pull request once they confirm. 56 | 57 | ### Does the change introduce a new feature? 58 | 59 | For any given pull request, ask yourself "is this a new feature?" If so, does 60 | the pull request (or associated issue) contain narrative indicating the need 61 | for the feature? If not, ask them to provide that information. 62 | 63 | Are new unit tests in place that test all new behaviors introduced? If not, do 64 | not merge the feature until they are! Is documentation in place for the new 65 | feature? (See the documentation guidelines). If not do not merge the feature 66 | until it is! Is the feature necessary for general use cases? Try and keep the 67 | scope of any given component narrow. If a proposed feature does not fit that 68 | scope, recommend to the user that they maintain the feature on their own, and 69 | close the request. You may also recommend that they see if the feature gains 70 | traction among other users, and suggest they re-submit when they can show such 71 | support. 72 | 73 | ## Maintainers List 74 | IBM Research: 75 | * Mariusz Sabath - [@mrsabath](https://github.com/mrsabath) - sabath@us.ibm.com 76 | * Brandon Lum - [@lumjjb](https://github.com/lumjjb) - Brandon.Lum@ibm.com 77 | -------------------------------------------------------------------------------- /components/tsi-util/load-sample-policies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Trusted Service Identiy plugin name 4 | export PLUGIN="vault-plugin-auth-ti-jwt" 5 | 6 | ## create help menu: 7 | helpme() 8 | { 9 | cat < 12 | Where: 13 | vault_addr - vault address (or ingress) in format http://vault.server:8200 14 | token - vault root token to setup the plugin 15 | 16 | Or make sure ROOT_TOKEN and VAULT_ADDR are set as environment variables. 17 | export ROOT_TOKEN= 18 | export VAULT_ADDR=(vault address in format http://vault.server:8200) 19 | 20 | HELPMEHELPME 21 | } 22 | 23 | loadVault() 24 | { 25 | #docker run -d --name=dev-vault -v ${PWD}/local.json:/vault/config/local.json -v ${PWD}/pkg/linux_amd64/${PLUGIN}:/plugins/${PLUGIN} -p 127.0.0.1:8200:8200/tcp vault 26 | # echo "Root Token: ${ROOT_TOKEN}" 27 | vault login -no-print ${ROOT_TOKEN} 28 | RT=$? 29 | 30 | if [ $RT -ne 0 ] ; then 31 | echo "Vault login failed!" 32 | exit 1 33 | fi 34 | 35 | export MOUNT_ACCESSOR=$(curl --header "X-Vault-Token: ${ROOT_TOKEN}" --request GET ${VAULT_ADDR}/v1/sys/auth | jq -r '.["trusted-identity/"].accessor') 36 | 37 | # Use policy templates to create policy files. 38 | # The example below uses 4 different policies with the following constraints: 39 | # - rcni - uses region, cluster-name, namespace and images 40 | # - rcn - uses region, cluster-name, namespace 41 | # - ri - uses region and images 42 | # - r - uses region only 43 | 44 | # replace mount accessor in policy 45 | sed "s/<%MOUNT_ACCESSOR%>/$MOUNT_ACCESSOR/g" /vault-tpl/tsi-policy.rcni.hcl.tpl > /vault-tpl/tsi-policy.rcni.hcl 46 | sed "s/<%MOUNT_ACCESSOR%>/$MOUNT_ACCESSOR/g" /vault-tpl/tsi-policy.rcn.hcl.tpl > /vault-tpl/tsi-policy.rcn.hcl 47 | sed "s/<%MOUNT_ACCESSOR%>/$MOUNT_ACCESSOR/g" /vault-tpl/tsi-policy.ri.hcl.tpl > /vault-tpl/tsi-policy.ri.hcl 48 | sed "s/<%MOUNT_ACCESSOR%>/$MOUNT_ACCESSOR/g" /vault-tpl/tsi-policy.r.hcl.tpl > /vault-tpl/tsi-policy.r.hcl 49 | 50 | # write policy to grant access to secrets 51 | vault policy write tsi-policy-rcni /vault-tpl/tsi-policy.rcni.hcl 52 | vault policy read tsi-policy-rcni 53 | vault policy write tsi-policy-rcn /vault-tpl/tsi-policy.rcn.hcl 54 | vault policy read tsi-policy-rcn 55 | vault policy write tsi-policy-ri /vault-tpl/tsi-policy.ri.hcl 56 | vault policy read tsi-policy-ri 57 | vault policy write tsi-policy-r /vault-tpl/tsi-policy.r.hcl 58 | vault policy read tsi-policy-r 59 | 60 | # create role to associate policy with login 61 | # we choosed to use one role, one policy association 62 | # *NOTE* the first role MUST include all the metadata that would be used by other roles/policies, not only the first one. 63 | vault write auth/trusted-identity/role/tsi-role-rcni bound_subject="wsched@us.ibm.com" user_claim="pod" metadata_claims="region,cluster-name,namespace,images" policies=tsi-policy-rcni 64 | vault read auth/trusted-identity/role/tsi-role-rcni 65 | 66 | vault write auth/trusted-identity/role/tsi-role-rcn bound_subject="wsched@us.ibm.com" user_claim="pod" metadata_claims="region,cluster-name,namespace" policies=tsi-policy-rcn 67 | vault read auth/trusted-identity/role/tsi-role-rcn 68 | 69 | vault write auth/trusted-identity/role/tsi-role-ri bound_subject="wsched@us.ibm.com" user_claim="pod" metadata_claims="region,images" policies=tsi-policy-ri 70 | vault read auth/trusted-identity/role/tsi-role-ri 71 | 72 | vault write auth/trusted-identity/role/tsi-role-r bound_subject="wsched@us.ibm.com" user_claim="pod" metadata_claims="region" policies=tsi-policy-r 73 | vault read auth/trusted-identity/role/tsi-role-r 74 | } 75 | 76 | # validate the arguments 77 | if [[ "$1" != "" && "$2" != "" ]] ; then 78 | export VAULT_ADDR="$1" 79 | export ROOT_TOKEN="$2" 80 | fi 81 | 82 | if [[ "$ROOT_TOKEN" == "" || "$VAULT_ADDR" == "" ]] ; then 83 | echo "ROOT_TOKEN and VAULT_ADDR must be set" 84 | helpme 85 | exit 1 86 | else 87 | loadVault 88 | fi 89 | -------------------------------------------------------------------------------- /docs/spire-keylime-attestion.md: -------------------------------------------------------------------------------- 1 | # Setting up the SPIRE NodeAttestor with Keylime 2 | [Keylime](https://keylime.dev) is an open-source tool, 3 | part of the [CNCF](https://cncf.io/) project, 4 | that provides a highly scalable remote boot attestation 5 | and runtime integrity measurement solution. 6 | Keylime enables users to monitor remote nodes 7 | using a hardware based cryptographic root of trust. 8 | 9 | In this example, the Node Attestation is done using Keylime (and TPM), 10 | tying the Workload Identity with Hardware Root of Trust: 11 | * It guarantees the identity of the node beyond any doubt 12 | * It attests the software stack, from booting to the kernel. 13 | We know the firmware, packages, libraries. Enforcement of the software bill of materials (SBOM) 14 | * It measures and enforces the integrity of files (IMA) 15 | 16 | Once the node is attested by Keylime, the Keylime agents deliver securely 17 | `intermediate.key.pem` and `intermediate.cert.pem` 18 | to the node, and then create and sign 19 | `node.key.pem` and `node.cert.pem` used by 20 | SPIRE `x509pop` NodeAttestor (x509 proof of possession): 21 | ([server plugin](https://github.com/spiffe/spire/blob/main/doc/plugin_server_nodeattestor_x509pop.md), 22 | [agent plugin](https://github.com/spiffe/spire/blob/main/doc/plugin_agent_nodeattestor_x509pop.md)) 23 | 24 | This example requires x509 certificates. The samples are provided in 25 | [../sample-x509](../sample-x509). 26 | Instructions for creating your own are available [here](x509-create.md) 27 | 28 | ## Obtain a Kubernetes cluster with deployed Keylime 29 | We use an internal process for deploying a cluster with Keylime. 30 | Connect to the node that has Keylime server. 31 | 32 | 33 | ## Deploy the x509 keys to all the nodes 34 | Obtain the Trusted Service Identity project 35 | ```console 36 | cd ~ 37 | git clone https://github.com/IBM/trusted-service-identity.git 38 | cd trusted-service-identity 39 | git checkout conf_container 40 | ``` 41 | 42 | Check the status of the current Keylime nodes and make sure they are all in 43 | `verified` state: 44 | 45 | ```console 46 | keylime-op -u /root/undercloud.yml -m /root/mzone.yml -o status 47 | ``` 48 | Sample response: 49 | ``` 50 | { 51 | "concise": "verified", 52 | "status": { 53 | "small7-agent0": "verified", 54 | "small7-agent1": "verified", 55 | "small7-agent2": "verified", 56 | "small7-agent3": "verified", 57 | "small7-agent4": "verified" 58 | } 59 | } 60 | ``` 61 | Execute the key deployment script 62 | ```console 63 | cd utils 64 | ./deployKeys_keylime.sh 65 | ``` 66 | 67 | Once all the nodes show Keylime agents as verified again, check if the keys 68 | were correctly deployed. Ssh to a hosts: 69 | 70 | ```console 71 | ssh small7-agent0 "ls -l /run/spire/x509/; cat /run/spire/x509/*" 72 | ``` 73 | 74 | When everything is good, setup the `spire-bundle` and execute the helm installation. 75 | 76 | Capture the spire-bundle on the SPIRE Server: 77 | 78 | ```console 79 | kubectl -n tornjak get configmap spire-bundle -oyaml | kubectl patch --type json --patch '[{"op": "replace", "path": "/metadata/namespace", "value":"spire"}]' -f - --dry-run=client -oyaml > spire-bundle.yaml 80 | ``` 81 | 82 | Bring it to the newly created cluster with deployed x509 keys and install: 83 | ```console 84 | kubectl create ns spire 85 | kubectl create -f spire-bundle.yaml 86 | ``` 87 | 88 | Setup the CLUSTER_NAME, REGION variables, and location of your SPIRE_SERVER: 89 | 90 | ``` 91 | cd ~/trusted-service-identity/ 92 | export CLUSTER_NAME=css 93 | export REGION=us-ykt 94 | export SPIRE_SERVER=spire-server-tornjak.us-east.containers.appdomain.cloud 95 | ``` 96 | 97 | Execute the SPIRE Agent installation: 98 | ```console 99 | helm install --set "spireServer.address=$SPIRE_SERVER" \ 100 | --set "namespace=spire" \ 101 | --set "clustername=$CLUSTER_NAME" --set "trustdomain=openshift.space-x.com" \ 102 | --set "region=$REGION" \ 103 | --set "x509=true" \ 104 | --set "openShift=false" spire charts/spire --debug 105 | ``` 106 | -------------------------------------------------------------------------------- /docs/vault.md: -------------------------------------------------------------------------------- 1 | # Vault Setup 2 | Some demos require access to the Vault instance. 3 | If you have a Vault instance, make sure you have admin privileges to access it. 4 | Otherwise, follow the simple steps below to create a Vault instance, as a pod and 5 | service, deployed in `tsi-vault` namespace in your cluster. 6 | 7 | ## Vault Instance Deployment 8 | We are using the following deployment file: 9 | [../examples/vault/vault.yaml](../examples/vault/vault.yaml) 10 | 11 | ```console 12 | kubectl create namespace tsi-vault 13 | kubectl -n tsi-vault create -f examples/vault/vault.yaml 14 | service/tsi-vault created 15 | deployment.apps/tsi-vault created 16 | ``` 17 | 18 | #### Obtain remote access to Vault service 19 | For `minikube` obtain the current endpoint as follow 20 |
[Click] to view minikube steps 21 | 22 | ```console 23 | minikube service tsi-vault -n tsi-vault --url 24 | http://192.168.99.105:30229 25 | # assign it to VAULT_ADDR env. variable: 26 | export VAULT_ADDR=http://192.168.99.105:30229 27 | ``` 28 |
29 | 30 | 31 | To access Vault remotely in `IKS`, setup ingress access. 32 |
[Click] to view IKS steps 33 | 34 | Obtain the ingress name using `ibmcloud` cli: 35 | ```console 36 | $ # first obtain the cluster name: 37 | $ ibmcloud ks clusters 38 | $ # then use the cluster name to get the Ingress info: 39 | $ ibmcloud ks cluster get --cluster | grep Ingress 40 | Ingress Subdomain: tsi-kube01-xxxxxxxxxxx-0000.eu-de.containers.appdomain.cloud 41 | Ingress Secret: tsi-kube01-xxxxxxxxxxx-0000 42 | Ingress Status: healthy 43 | Ingress Message: All Ingress components are healthy 44 | ``` 45 | Build an ingress file from `example/vault/ingress.IKS.template.yaml`, 46 | using the `Ingress Subdomain` information obtained above. You can use any arbitrary 47 | prefix in addition to the Ingress value. For example: 48 | 49 | `host: tsi-vault.my-tsi-cluster-xxxxxxxxxxx-0000.eu-de.containers.appdomain.cloud` 50 | 51 | ```yaml 52 | apiVersion: networking.k8s.io/v1 53 | kind: Ingress 54 | metadata: 55 | name: vault-ingress 56 | namespace: tsi-vault 57 | spec: 58 | rules: 59 | - host: tsi-vault.my-tsi-cluster-xxxxxxxxxxx-0000.eu-de.containers.appdomain.cloud 60 | http: 61 | paths: 62 | - pathType: Prefix 63 | path: "/" 64 | backend: 65 | service: 66 | name: tsi-vault 67 | port: 68 | number: 8200 69 | ``` 70 | 71 | create ingress: 72 | ```console 73 | $ kubectl -n tsi-vault create -f ingress-IKS.yaml 74 | ``` 75 | 76 | Create VAULT_ADDR env. variable: 77 | ```console 78 | export VAULT_ADDR="http://tsi-vault." 79 | ``` 80 |
81 | 82 | To access Vault remotely OpenShift (including IKS ROKS) 83 |
[Click] to view OpenShift steps 84 | 85 | This assumes the OpenShift command line is already installed. Otherwise see 86 | the [documentation](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html) 87 | and you can get `oc` cli from https://mirror.openshift.com/pub/openshift-v4/clients/oc/4.3/ 88 | 89 | ```console 90 | oc -n tsi-vault expose svc/tsi-vault 91 | export VAULT_ADDR="http://$(oc -n tsi-vault get route tsi-vault -o jsonpath='{.spec.host}')" 92 | export ROOT_TOKEN=$(kubectl -n tsi-vault logs $(kubectl -n tsi-vault get po | grep tsi-vault-| awk '{print $1}') | grep Root | cut -d' ' -f3); echo "export ROOT_TOKEN=$ROOT_TOKEN" 93 | ``` 94 | 95 |
96 | 97 | Test the remote connection to vault: 98 | ```console 99 | $ curl $VAULT_ADDR 100 | Temporary Redirect. 101 | ``` 102 | At this point, this is an expected result. 103 | 104 | Once the Vault service is running and `VAULT_ADDR` is defined, 105 | Vault requires a setup. See a demo specifics to continue. 106 | 111 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Trusted Service Identity (TSI) 2 | 3 | This Universal Workload Identity project 4 | (also known as Trusted Service Identity) 5 | provides a deployment and an orchestration layer to support 6 | CNCF community initiatives 7 | [Tornjak](https://github.com/spiffe/tornjak) 8 | and [SPIRE](https://github.com/spiffe/spire). 9 | 10 | *Notice1:* 11 | * For all the original *Trusted Workload Identity* project, 12 | that was preceding the SPIRE and Tornjak integration, 13 | and focusing on keys and credentials management, and preventing 14 | access to secrets by untrusted administrator, 15 | please visit our [main-no-spire](../../tree/main-no-spire/) branch. 16 | 17 | *Notice2:* 18 | * The TSI version [tsi-version.txt] attempts to match the most 19 | recent SPIRE version that is currently supported by Tornjak. 20 | (See the 21 | [Tornjak version](https://github.com/spiffe/tornjak/blob/main/SPIRE_BUILD_VERSIONS) 22 | file) 23 | 24 | ## Introduction 25 | Here is the stack that represents the layers of 26 | the Universal Workload Identity project. 27 | Most of the components are part of the [CNCF](https://www.cncf.io/) 28 | (Cloud Native Computing Fundation) initiative. 29 | 30 | ![technology-stack](docs/imgs/CNCF.stack.jpg) 31 | 32 | Starting from the bottom, we support any Kubernetes Platform, whether this is 33 | a native Kubernetes or OpenShift. 34 | 35 | Then we have SPIFFE, which defines the identity format and specifies how 36 | workloads can securely obtain identity. 37 | 38 | Then we have SPIRE, which implements SPIFFE, 39 | and it provides the zero trust attestation of workloads and infrastructure. 40 | SPIRE is responsible for issuing and rotating of x509 certificates or 41 | JWT tokens that are used for representing identity. 42 | SPIRE also provides a single point of federation with OIDC discovery 43 | to be used across multi-cloud or multi-cluster deployments. 44 | 45 | Above the SPIRE, we have Tornjak, a control plane and UI for SPIRE, 46 | which together with the 47 | [*K8s workload registrar*](https://github.com/spiffe/spire/blob/main/support/k8s/k8s-workload-registrar/README.md), 48 | defines the organization-wise Universal Workload Identity schema. 49 | It provides the identity management across the SPIRE servers. 50 | 51 | Then on the top layer we have Universal Trusted Workload Identity that is a 52 | guiding principle. It's a concept for using workload identity frameworks. 53 | This is not a specific technology. 54 | 55 | ## Attributes of Universal Workload Identity 56 | * Define a single, consistent organizational identity schema across clusters in different clouds 57 | * Provide a Zero Trust workload identity framework with strong workload attestation with SPIRE + Cloud-provider Plugins with each K8s installation 58 | * Manage and audit workload identity, attestation and policies for all k8s workloads in every cluster 59 | * Linear and centralized management of identities/policies to handle quadratic complexity 60 | * Single configuration per cloud to federate all cloud access 61 | * Based on CNCF SPIFFE identity specification 62 | 63 | ![multi-cloud](docs/imgs/Multi-cloud.jpg) 64 | 65 | The Universal, Zero Trust Workload Identity, runs on everything that 66 | supports Kubernetes platform. 67 | It strengthens the security, by executing the cloud-provider and the platform attestation of the hosting nodes. 68 | It can support various identity mechanisms like IAM, Keycloak, and 69 | open standards like OpenID using a consistent, universal identity schema. 70 | 71 | ## Tornjak Deployment and Demo examples 72 | For the documentation about deploying Tornjak 73 | with the SPIRE server 74 | and various demo examples, please see our [documentation](./docs/README.md) 75 | 76 | ## Reporting security issues 77 | 78 | Our [maintainers](./MAINTAINERS.md) take security seriously. If you discover a security 79 | issue, please bring it to their attention right away! 80 | 81 | Please DO NOT file a public issue, they will be removed; instead please reach out 82 | to the maintainers privately. 83 | 84 | Security reports are greatly appreciated, and Trusted Service Identity team will 85 | publicly thank you for it. 86 | -------------------------------------------------------------------------------- /sample-x509/generate.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "crypto/rand" 6 | "crypto/rsa" 7 | "crypto/x509" 8 | "crypto/x509/pkix" 9 | "encoding/pem" 10 | "math/big" 11 | "os" 12 | "time" 13 | ) 14 | 15 | func panice(err error) { 16 | if err != nil { 17 | panic(err) 18 | } 19 | } 20 | 21 | func main() { 22 | // The "never expires" timestamp from RFC5280 23 | neverExpires := time.Date(9999, 12, 31, 23, 59, 59, 0, time.UTC) 24 | 25 | rootKey := generateRSAKey() 26 | writeKey("root.key.pem", rootKey) 27 | 28 | rootCert := createRootCertificate(rootKey, &x509.Certificate{ 29 | SerialNumber: big.NewInt(1), 30 | BasicConstraintsValid: true, 31 | IsCA: true, 32 | NotAfter: neverExpires, 33 | }) 34 | 35 | intermediateKey := generateRSAKey() 36 | writeKey("intermediate.key.pem", intermediateKey) 37 | 38 | intermediateCert := createCertificate(intermediateKey, &x509.Certificate{ 39 | SerialNumber: big.NewInt(1), 40 | BasicConstraintsValid: true, 41 | IsCA: true, 42 | NotAfter: neverExpires, 43 | }, rootKey, rootCert) 44 | 45 | nodeKey := generateRSAKey() 46 | 47 | nodeCert := createCertificate(nodeKey, &x509.Certificate{ 48 | SerialNumber: big.NewInt(1), 49 | KeyUsage: x509.KeyUsageDigitalSignature, 50 | NotAfter: neverExpires, 51 | Subject: pkix.Name{CommonName: "some common name1"}, 52 | }, intermediateKey, intermediateCert) 53 | 54 | writeKey("node1.key.pem", nodeKey) 55 | writeCerts("node1-bundle.cert.pem", nodeCert, intermediateCert) 56 | writeCerts("node1.cert.pem", nodeCert) 57 | writeCerts("intermediate.cert.pem", intermediateCert) 58 | writeCerts("root.cert.pem", rootCert) 59 | 60 | nodeKey = generateRSAKey() 61 | 62 | nodeCert = createCertificate(nodeKey, &x509.Certificate{ 63 | SerialNumber: big.NewInt(1), 64 | KeyUsage: x509.KeyUsageDigitalSignature, 65 | NotAfter: neverExpires, 66 | Subject: pkix.Name{CommonName: "some common name2"}, 67 | }, intermediateKey, intermediateCert) 68 | 69 | writeKey("node2.key.pem", nodeKey) 70 | writeCerts("node2-bundle.cert.pem", nodeCert, intermediateCert) 71 | writeCerts("node2.cert.pem", nodeCert) 72 | 73 | nodeKey = generateRSAKey() 74 | 75 | nodeCert = createCertificate(nodeKey, &x509.Certificate{ 76 | SerialNumber: big.NewInt(1), 77 | KeyUsage: x509.KeyUsageDigitalSignature, 78 | NotAfter: neverExpires, 79 | Subject: pkix.Name{CommonName: "some common name3"}, 80 | }, intermediateKey, intermediateCert) 81 | 82 | writeKey("node3.key.pem", nodeKey) 83 | writeCerts("node3-bundle.cert.pem", nodeCert, intermediateCert) 84 | writeCerts("node3.cert.pem", nodeCert) 85 | } 86 | 87 | func createRootCertificate(key *rsa.PrivateKey, tmpl *x509.Certificate) *x509.Certificate { 88 | return createCertificate(key, tmpl, key, tmpl) 89 | } 90 | 91 | func createCertificate(key *rsa.PrivateKey, tmpl *x509.Certificate, parentKey *rsa.PrivateKey, parent *x509.Certificate) *x509.Certificate { 92 | certDER, err := x509.CreateCertificate(rand.Reader, tmpl, parent, &key.PublicKey, parentKey) 93 | panice(err) 94 | cert, err := x509.ParseCertificate(certDER) 95 | panice(err) 96 | return cert 97 | } 98 | 99 | func generateRSAKey() *rsa.PrivateKey { 100 | key, err := rsa.GenerateKey(rand.Reader, 768) //nolint: gosec // small key is to keep test fast... not a security feature 101 | panice(err) 102 | return key 103 | } 104 | 105 | func writeKey(path string, key interface{}) { 106 | keyBytes, err := x509.MarshalPKCS8PrivateKey(key) 107 | panice(err) 108 | pemBytes := pem.EncodeToMemory(&pem.Block{ 109 | Type: "PRIVATE KEY", 110 | Bytes: keyBytes, 111 | }) 112 | err = os.WriteFile(path, pemBytes, 0600) 113 | panice(err) 114 | } 115 | 116 | func writeCerts(path string, certs ...*x509.Certificate) { 117 | data := new(bytes.Buffer) 118 | for _, cert := range certs { 119 | err := pem.Encode(data, &pem.Block{ 120 | Type: "CERTIFICATE", 121 | Bytes: cert.Raw, 122 | }) 123 | panice(err) 124 | } 125 | err := os.WriteFile(path, data.Bytes(), 0600) 126 | panice(err) 127 | } 128 | -------------------------------------------------------------------------------- /charts/tornjak/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | The installation of the Tornjak with SPIRE Server for 2 | Universal Trusted Workload Identity Service has completed. 3 | 4 | Chart Name: {{ .Chart.Name }} 5 | Your release is named {{ .Release.Name }} 6 | 7 | Cluster name: {{ .Values.clustername }} 8 | Trust Domain: {{ .Values.trustdomain }} 9 | Tornjak Image: {{ .Values.spireServer.img }}:{{ .Values.spireVersion }} 10 | SPIRE Server socket: {{ .Values.spireServer.socketDir }}/{{ .Values.spireServer.socketFile }} 11 | {{- if .Values.openShift }} 12 | OpenShift mode: true 13 | {{- end }} 14 | {{- if .Values.attestors.aws_iid -}} 15 | {{- if .Values.attestors.aws_iid.access_key_id -}} 16 | {{- if .Values.attestors.aws_iid.secret_access_key -}} 17 | Using NodeAttestor "aws_iid" with 18 | "access_key_id" and "secret_access_key" 19 | {{- end }} 20 | {{- end }} 21 | {{- end }} 22 | {{- if .Values.oidc.enable }} 23 | OIDC enable: true 24 | OIDC image: {{ .Values.oidc.image }} 25 | OIDC service name: {{ .Values.oidc.serviceName }} 26 | listen_socket_path: {{ .Values.oidc.socketDir }}/{{ .Values.oidc.socketFile }} 27 | myDiscoveryDomain: {{ .Values.oidc.myDiscoveryDomain }} 28 | {{- end }} 29 | 30 | {{- if .Values.tornjak.config.enableUserMgmt }} 31 | User Mgmnt enabled: true 32 | {{- if .Values.tornjak.config.frontend.authServerURL }} 33 | authServerURL: {{ .Values.tornjak.config.frontend.authServerURL }} 34 | {{- end }} 35 | In tornjak-config.tpl: 36 | UserManagement "KeycloakAuth" { 37 | plugin_data { 38 | jwksURL = "{{ .Values.tornjak.config.backend.jwksURL }}" 39 | redirectURL = "{{ .Values.tornjak.config.backend.redirectURL }}" 40 | } 41 | } 42 | {{- end }} 43 | 44 | {{- if .Values.tornjak.config.separateFrontend }} 45 | Tornjak Backend: 46 | img: {{ .Values.tornjak.config.backend.img }} 47 | socket: "{{ .Values.tornjak.config.backend.socketDir }}/{{ .Values.tornjak.config.backend.socketFile }}" 48 | jwksURL: {{ .Values.tornjak.config.backend.jwksURL }} 49 | redirectURL: {{ .Values.tornjak.config.backend.redirectURL }} 50 | dataStore driver: {{ .Values.tornjak.config.backend.dataStore.driver }} 51 | dataStore file: {{ .Values.tornjak.config.backend.dataStore.file }} 52 | Tornjak Frontend: 53 | img: {{ .Values.tornjak.config.frontend.img }} 54 | Backend API: {{ .Values.tornjak.config.frontend.apiServerURL }} 55 | {{- else }} 56 | Tornjak img: {{ .Values.tornjak.config.img }} 57 | {{- end }} 58 | 59 | {{- if .Values.attestors.k8s_psat.remoteClusters }} 60 | Multi-cluster support enabled. 61 | Make sure `kubeconfig` secret is created to support a following: 62 | 63 | clusters = { 64 | "{{ .Values.clustername }}" = { 65 | # use_token_review_api_validation = true 66 | service_account_allow_list = ["spire:spire-agent"] 67 | }, 68 | {{- range $k, $v := .Values.attestors.k8s_psat.remoteClusters }} 69 | "{{ $v.name }}" = { 70 | service_account_allow_list = ["{{ $v.namespace | default "spire" }}:{{ $v.serviceAccount | default "spire-agent" }}"] 71 | kube_config_file = "/run/spire/kubeconfigs/{{ $v.name }}" 72 | }, 73 | {{- end }} 74 | } 75 | {{- end }} 76 | 77 | {{- if not .Values.tornjak.config.backend.ingress }} 78 | 79 | No Backend Ingress provided. 80 | Use port forwarding to access the Backend port: 81 | kubectl -n {{ .Values.namespace }} port-forward spire-server-0 10000:10000 82 | {{- end }} 83 | 84 | {{- if not .Values.tornjak.config.frontend.ingress }} 85 | 86 | No Frontend Ingress provided. 87 | Use port forwarding to access the Frontend port: 88 | kubectl -n {{ .Values.namespace }} port-forward spire-server-0 3000:3000 89 | {{- end }} 90 | 91 | Tornjak Backend API Access: {{ include "tornjak.apiURL" . }} 92 | Tornjak Frontend Access: {{ include "tornjak.FrontendURL" . }} 93 | 94 | To learn more about the release, try: 95 | 96 | $ helm status {{ .Release.Name }} 97 | $ helm get all {{ .Release.Name }} 98 | 99 | -------------------------------------------------------------------------------- /components/tsi-util/register-JSS.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEMPDIR=$(mktemp -d /tmp/tsi.XXX) 4 | export CSR_DIR=${CSR_DIR:-/tmp/vault} 5 | #export CSR=${CSR:-${CSR_DIR}/csr} 6 | 7 | ## create help menu: 8 | helpme() 9 | { 10 | cat < 13 | 14 | Where: 15 | container name - owner of the scr file 16 | Required environment variables: 17 | ROOT_TOKEN - vault root token to setup the plugin 18 | VAULT_ADDR - vault address (or ingress) in format http://vault.server:8200 19 | Optional: 20 | CSR_DIR - local directory where the scr file stored 21 | Currently: 22 | ROOT_TOKEN=${ROOT_TOKEN} 23 | VAULT_ADDR=${VAULT_ADDR} 24 | CSR_DIR=${CSR_DIR} 25 | HELPMEHELPME 26 | } 27 | 28 | cleanup() 29 | { 30 | rm -rf ${TEMPDIR} 31 | } 32 | 33 | # this function registers individual nodes 34 | register() 35 | { 36 | CSR="${CSR_DIR}/$1.csr" 37 | if [ ! -s "${CSR}" ]; then 38 | echo "File ${CSR} do not exist or it is empty" 39 | helpme 40 | cleanup 41 | exit 1 42 | fi 43 | 44 | # check for errors 45 | if [[ $(cat ${CSR}) == *errors* ]] ; then 46 | echo "Invalid CSR from JSS for $1. Please make sure tsi-node-setup was correctly executed" 47 | cleanup 48 | exit 1 49 | fi 50 | 51 | # extract the X509v3 TSI fields: 52 | TSIEXT="${TEMPDIR}/$1.csr.tsi" 53 | openssl req -in "${CSR}" -noout -text |grep "URI:TSI" > $TSIEXT 54 | RT=$? 55 | if [ $RT -ne 0 ] ; then 56 | echo "Missing x509v3 URI:TSI extensions for cluter-name and region" 57 | cleanup 58 | exit 1 59 | fi 60 | 61 | # format: 62 | # URI:TSI:cluster-name:my-cluster-name, URI:TSI:region:eu-de 63 | # remove the "URI:" prefix and leading spaces 64 | TSI_URI=$(cat $TSIEXT | sed 's/URI://g' | sed 's/ //g') 65 | 66 | # echo "Root Token: ${ROOT_TOKEN}" 67 | vault login -no-print ${ROOT_TOKEN} 68 | RT=$? 69 | if [ $RT -ne 0 ] ; then 70 | echo "ROOT_TOKEN is not correctly set" 71 | echo "ROOT_TOKEN=${ROOT_TOKEN}" 72 | cleanup 73 | exit 1 74 | fi 75 | # remove any previously set VAULT_TOKEN, that overrides ROOT_TOKEN in Vault client 76 | export VAULT_TOKEN= 77 | 78 | X5C="${TEMPDIR}/$1.x5c" 79 | OUT="${TEMPDIR}/out.$$" 80 | ERR="${TEMPDIR}/err.$$" 81 | 82 | # create an intermedate certificate for 50 years 83 | vault write pki/root/sign-intermediate csr=@$CSR format=pem_bundle ttl=438000h uri_sans="$TSI_URI" -format=json 1> "${OUT}" 2> "${ERR}" 84 | if [ "$?" != "0" ]; then 85 | echo "ERROR signing intermediate CA. Was Vault setup successfully executed? $(cat ${ERR})" 86 | cleanup 87 | exit 1 88 | fi 89 | CERT=$(cat ${OUT} | jq -r '.["data"].certificate' | grep -v '\-\-\-') 90 | CHAIN=$(cat ${OUT} | jq -r '.["data"].issuing_ca' | grep -v '\-\-\-') 91 | echo "[\"${CERT}\",\"${CHAIN}\"]" > "$X5C" 92 | 93 | # cleanup CSR 94 | # rm "${CSR}" 95 | rm "${TSIEXT}" 96 | rm "${OUT}" 97 | 98 | cat "$X5C" 99 | 100 | # cleanup x5c 101 | rm "${X5C}" 102 | 103 | # # using the copied x5c post the content to JSS server via setup-pod 104 | # RESP=$($kk exec -it $1 -- sh -c 'curl -X POST -H "Content-Type: application/json" -d @/tmp/x5c ${HOST_IP}:5000/public/postX5c') 105 | # #RESP=$(curl -X POST -H "Content-Type: application/json" -d @x5c ${JSS_ADDR}/public/postX5c) 106 | # echo $RESP 107 | # echo "processing $1 for node: $nodeIP completed." 108 | # # Take both output certificates, comma separated move to JSS as 'x5c' format: 109 | # # based on spec: https://tools.ietf.org/html/rfc7515#appendix-B 110 | # # ["MIIE3jCCA8agAwIBAgICAwEwDQYJKoZIhvcNAQEFBQAwYzELMAkGA1UEBhMCVVM 111 | # # ... 112 | # # H0aBsXBTWVU+4=","MIIE+zCC....wCW/POuZ6lcg5Ktz885hZo+L7tdEy8W9ViH0Pd"] 113 | } 114 | 115 | if [ -z "${VAULT_ADDR}" ]; then 116 | echo "VAULT_ADDR is not set" 117 | helpme 118 | cleanup 119 | exit 1 120 | fi 121 | 122 | if [ -z "${ROOT_TOKEN}" ]; then 123 | echo "ROOT_TOKEN is not set" 124 | helpme 125 | cleanup 126 | exit 1 127 | fi 128 | 129 | # validate the arguments 130 | if [[ "$1" == "-?" || "$1" == "-h" || "$1" == "--help" ]] ; then 131 | helpme 132 | else 133 | register $1 134 | fi 135 | 136 | cleanup 137 | -------------------------------------------------------------------------------- /components/tsi-util/vault-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #ibmcloud plugin install cloud-object-storage 4 | export PLUGIN="vault-plugin-auth-ti-jwt" 5 | COMMON_NAME="trusted-identity.ibm.com" 6 | CONFIG="/tmp/plugin-config.json" 7 | 8 | ## create help menu: 9 | helpme() 10 | { 11 | cat < 14 | Where: 15 | vault-plugin-sha - SHA 256 of the TSI Vault plugin (required) 16 | token - vault root token to setup the plugin (optional, if set as env. var) 17 | vault_addr - vault address in format http://vault.server:8200 (optional, if set as env. var) 18 | 19 | HELPMEHELPME 20 | } 21 | 22 | setupVault() 23 | { 24 | vault login -no-print "${ROOT_TOKEN}" 25 | RT=$? 26 | if [ $RT -ne 0 ] ; then 27 | echo "ROOT_TOKEN is not correctly set" 28 | echo "ROOT_TOKEN=${ROOT_TOKEN}" 29 | exit 1 30 | fi 31 | # remove any previously set VAULT_TOKEN, that overrides ROOT_TOKEN in Vault client 32 | export VAULT_TOKEN= 33 | 34 | # vault status 35 | vault secrets enable pki 36 | RT=$? 37 | if [ $RT -ne 0 ] ; then 38 | echo " 'vault secrets enable pki' command failed" 39 | echo "maybe already set?" 40 | read -n 1 -s -r -p 'Press any key to continue' 41 | #exit 1 42 | fi 43 | # Increase the TTL by tuning the secrets engine. The default value of 30 days may 44 | # be too short, so increase it to 1 year: 45 | vault secrets tune -max-lease-ttl=8760h pki 46 | vault delete pki/root 47 | 48 | # create internal root CA 49 | # expire in 100 years 50 | export OUT 51 | OUT=$(vault write pki/root/generate/internal common_name=${COMMON_NAME} \ 52 | ttl=876000h -format=json) 53 | # echo "$OUT" 54 | 55 | # capture the public key as plugin-config.json 56 | CERT=$(echo "$OUT" | jq -r '.["data"].issuing_ca'| awk '{printf "%s\\n", $0}') 57 | echo "{ \"jwt_validation_pubkeys\": \"${CERT}\" }" > ${CONFIG} 58 | 59 | # register the trusted-identity plugin 60 | vault write /sys/plugins/catalog/auth/vault-plugin-auth-ti-jwt sha_256="${SHA256}" command="vault-plugin-auth-ti-jwt" 61 | RT=$? 62 | if [ $RT -ne 0 ] ; then 63 | echo " 'vault write /sys/plugins/catalog/auth/vault-plugin-auth-ti-jwt ...' command failed" 64 | exit 1 65 | fi 66 | # useful for debugging: 67 | # vault read sys/plugins/catalog/auth/vault-plugin-auth-ti-jwt -format=json 68 | 69 | # then enable this plugin 70 | vault auth enable -path="trusted-identity" -plugin-name="vault-plugin-auth-ti-jwt" plugin 71 | RT=$? 72 | if [ $RT -ne 0 ] ; then 73 | echo " 'vault auth enable plugin' command failed" 74 | exit 1 75 | fi 76 | 77 | export MOUNT_ACCESSOR 78 | MOUNT_ACCESSOR=$(curl -sS --header "X-Vault-Token: ${ROOT_TOKEN}" --request GET "${VAULT_ADDR}/v1/sys/auth" | jq -r '.["trusted-identity/"].accessor') 79 | 80 | # configure plugin using the Issuing CA created internally above 81 | curl -sS --header "X-Vault-Token: ${ROOT_TOKEN}" --request POST --data @${CONFIG} "${VAULT_ADDR}/v1/auth/trusted-identity/config" 82 | RT=$? 83 | if [ $RT -ne 0 ] ; then 84 | echo "failed to configure trusted-identity plugin" 85 | exit 1 86 | fi 87 | 88 | # for debugging only: 89 | # CONFIG=$(curl -sS --header "X-Vault-Token: ${ROOT_TOKEN}" --request GET "${VAULT_ADDR}/v1/auth/trusted-identity/config" | jq) 90 | # echo "*** $CONFIG" 91 | } 92 | 93 | if [ ! "$1" == "" ] ; then 94 | export ROOT_TOKEN=$1 95 | fi 96 | if [ ! "$2" == "" ] ; then 97 | export VAULT_ADDR=$2 98 | fi 99 | 100 | # validate the arguments 101 | if [[ "$1" == "-?" || "$1" == "-h" || "$1" == "--help" ]] ; then 102 | helpme 103 | fi 104 | 105 | # SHA256 of the TSI plugin must be provided 106 | if [[ "$1" == "" ]] ; then 107 | helpme 108 | exit 1 109 | else 110 | SHA256="$1" 111 | fi 112 | 113 | # validate the Vault arguments 114 | if [[ "$3" != "" ]] ; then 115 | export ROOT_TOKEN="$2" 116 | export VAULT_ADDR="$3" 117 | elif [[ "$ROOT_TOKEN" == "" || "$VAULT_ADDR" == "" ]] ; then 118 | echo "ROOT_TOKEN and VAULT_ADDR must be set" 119 | helpme 120 | exit 1 121 | fi 122 | 123 | setupVault 124 | # once the vault is setup, load the sample policies 125 | load-sample-policies.sh 126 | -------------------------------------------------------------------------------- /examples/spire-sidecar/sidecar/run-sidecar-python.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import subprocess 4 | import requests 5 | import json 6 | import base64 7 | import sys 8 | from functools import reduce 9 | from decouple import config 10 | 11 | 12 | # read enviroment variable 13 | SOCKETFILE=os.getenv('SOCKETFILE') 14 | if (SOCKETFILE is None): 15 | # if the enviroment variable is not set, try to read from .env file 16 | # .env file mode for testing 17 | SOCKETFILE = config('SOCKETFILE', default='/run/spire/sockets/agent.sock') 18 | 19 | CFGDIR=os.getenv('CFGDIR') 20 | if (CFGDIR is None): 21 | CFGDIR = config('CFGDIR', default='/run/db') 22 | 23 | ROLE=os.getenv('ROLE') 24 | if (ROLE is None): 25 | ROLE = config('ROLE', default='dbrole1') 26 | 27 | VAULT_ADDR=os.getenv('VAULT_ADDR') 28 | if (VAULT_ADDR is None or len(VAULT_ADDR)==0): 29 | print("VAULT_ADDR not set") 30 | exit(1) 31 | 32 | TIMEOUT=0.5 # 30 sec 33 | 34 | # method used to obtain a resource/file from vault, using jwt token (i.e. X-Vault-Token) 35 | def getfile(filename, filePath, token): 36 | try: 37 | headers = {'X-Vault-Token': token} 38 | url = VAULT_ADDR + "/v1/secret/data/" + filePath + ("" if filePath.endswith('/') else "/") + filename 39 | response = requests.get(url, headers=headers) 40 | obj=json.loads(response.text) 41 | with open(CFGDIR + ("" if CFGDIR.endswith('/') else "/") + filename, "w") as f: 42 | if (filename.upper().endswith(".JSON")): 43 | data = obj["data"]["data"] 44 | # JSON dump, if not we need to cast to string 45 | f.write(json.dumps(data)) 46 | else: 47 | # other files, beside JSON, need to be encoded to base64 prior to storing into Vault 48 | data = base64.b64decode(obj["data"]["data"]["sha"]).decode() # force cast to string 49 | f.write(data) 50 | return True 51 | except Exception as e: 52 | print("Error at file retrieval:", e) 53 | return False 54 | 55 | # Method used to obtain a dictionary with the file name as the key and the path as the value (i.e. {filename:path}) 56 | def arraytodict(a, b): 57 | splitvalue = b.split("/") 58 | filename = splitvalue.pop().strip() # get last element 59 | path = "/".join(splitvalue) # joins path parts 60 | a[filename] = path 61 | return a 62 | 63 | if __name__ == "__main__": 64 | # sanity check for input file 65 | if len(sys.argv) == 0: 66 | print("No input file was provided") 67 | exit(1) 68 | inputfile = sys.argv[1] 69 | # sanity check if input file exists 70 | if not os.path.exists(inputfile): 71 | print("Input file was not found") 72 | exit(1) 73 | with open(inputfile, 'r') as f: 74 | files = f.readlines() 75 | # convert file to dictionary 76 | listOfFile = reduce(arraytodict, files, {}) 77 | 78 | while True: 79 | # make sure the socket file exists before requesting a token 80 | while not os.path.exists(SOCKETFILE): 81 | time.sleep(0.08) 82 | 83 | # obtain identity from spire server 84 | output = subprocess.check_output(["/opt/spire/bin/spire-agent", "api", "fetch", "jwt", "-audience", "vault", "-socketPath", SOCKETFILE]) 85 | token = output.split()[1].decode() # supress bytes/cast to string 86 | 87 | # Vault URL for identity check 88 | authurl = VAULT_ADDR+"/v1/auth/jwt/login" 89 | # data load for the request 90 | authdata = {"jwt": token, "role": ROLE} 91 | 92 | authresponse = requests.post(url = authurl, data = authdata, timeout=10) 93 | obj=json.loads(authresponse.text) 94 | # Vault token 95 | VAULT_TOKEN=obj["auth"]["client_token"] 96 | 97 | success = True 98 | # check if all files were retrieved 99 | for file, path in listOfFile.items(): 100 | getfile(file, path, VAULT_TOKEN) 101 | foundfile = os.path.exists(CFGDIR+"/"+file) 102 | if not foundfile: 103 | print("File was not found $s.", file) 104 | success = success and foundfile 105 | 106 | if success: 107 | exit() 108 | 109 | time.sleep(TIMEOUT) 110 | -------------------------------------------------------------------------------- /examples/spire-sidecar/nodejs/index.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const bodyParser = require('body-parser') 3 | const mysql = require('mysql'); 4 | // const mysql2 = require('mysql2'); 5 | const configfile = require("./config.json"); 6 | const app = express(); 7 | const PORT = 8080; 8 | const HOST = '0.0.0.0'; 9 | 10 | let connection; 11 | 12 | app.use( bodyParser.json() ); // to support JSON-encoded bodies 13 | app.use(bodyParser.urlencoded({ // to support URL-encoded bodies 14 | extended: true 15 | })); 16 | 17 | app.get('/', (req, res) => { 18 | if (!connection){ 19 | connection = mysql.createConnection(configfile); 20 | connection.connect(); 21 | } 22 | connection.query('SELECT * FROM MOVIE', function (error, results, fields) { 23 | if (error) { 24 | res.send(error); 25 | return; 26 | // throw error; 27 | } 28 | 29 | var output = ""; 30 | output += "" 31 | if (fields && fields.length){ 32 | output += fields.reduce((acc,field)=> {return acc+``;}, ""); 33 | } else { 34 | output += ""; 35 | output += ""; 36 | output += ""; 37 | output += ""; 38 | output += ""; 39 | output += ""; 40 | } 41 | output += "" 42 | if (results && results.length){ 43 | output += results.reduce((acc,result)=> { 44 | return acc+""+ 45 | fields.reduce((acc2, field)=> { 46 | return acc2+``; 47 | }, "")+ "" +""; 48 | }, ""); 49 | } 50 | output += "
${field.name}Movie IDNameYearDirectorGenre-
${result[field.name]}Delete
"; 51 | 52 | output += "
"; 53 | output += 54 | 55 | '
'+ 56 | ''+ 57 | ''+ 58 | ''+ 59 | ''+ 60 | ''+ 61 | ''+ 62 | ''+ 63 | ''+ 64 | ''+ 65 | ''+ 66 | ''+ 67 | ''+ 68 | ''+ 69 | ''+ 70 | ''+ 71 | ''+ 72 | ''+ 73 | ''+ 74 | '
'+ 75 | '
'; 76 | 77 | output += "
"; 78 | 79 | res.send(output); 80 | }); 81 | }) 82 | 83 | app.post('/addmovie', (req, res) => { 84 | if (!connection){ 85 | connection = mysql.createConnection(configfile); 86 | connection.connect(); 87 | } 88 | connection.query('INSERT INTO MOVIE SET ?', req.body, function (error, results, fields) { 89 | if (error) { 90 | throw error; 91 | } 92 | if (results.insertId){ 93 | return res.redirect('/'); 94 | } 95 | }); 96 | }); 97 | 98 | app.get('/delete/:id', (req, res) => { 99 | if (!connection){ 100 | connection = mysql.createConnection(configfile); 101 | connection.connect(); 102 | } 103 | connection.query('DELETE FROM MOVIE WHERE id = '+req.params.id, function (error, results, fields) { 104 | if (error) { 105 | throw error; 106 | } 107 | if (results.affectedRows){ 108 | return res.redirect('/'); 109 | } 110 | }); 111 | }); 112 | 113 | app.listen(PORT, HOST, () => { 114 | console.log(`Running on http://${HOST}:${PORT}`); 115 | }) -------------------------------------------------------------------------------- /docs/keycloak.md: -------------------------------------------------------------------------------- 1 | # Keycloak Deployment to Support IAM for Tornjak 2 | 3 | Tornjak is a very popular open-source Identity Access Management solution, that 4 | allows  management of users, their roles, and privileges for accessing a specific 5 | system and applications. It has plenty of customizable features. 6 | And it supports standard protocols such as OIDC (Open ID Connect). 7 | 8 | ## Keycloak Instance Deployment 9 | Keycloak instance can be deployed in any location as long as it will be available 10 | for Tornjak and the users to access. 11 | 12 | 1. Create a namespace e.g. `tornjak` 13 | ```console 14 | kubectl create ns tornjak 15 | ``` 16 | 1. Update the attributes in the Keycloak deployment file 17 | [examples/keycloak/keycloak.yaml](../examples/keycloak/keycloak.yaml) 18 | * `KEYCLOAK_ADMIN` - userid for the Keycloak admin 19 | * `KEYCLOAK_ADMIN_PASSWORD` - password for the Keycloak 20 | * `KEYCLOAK_FRONTEND_URL` - URL for the Keycloak Auhentication // TODO 21 | * `KEYCLOAK_ADMIN_URL` - URL for the Keycloak Admin realm // TODO 22 | 23 | 1. Create a Keycloak deployment 24 | ```console 25 | kubectl create -f examples/keycloak/ 26 | ``` 27 | This would start Keycloak instance as 28 | Kubernetes [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) 29 | and a [Service](https://kubernetes.io/docs/concepts/services-networking/service/) 30 | 31 | 1. Get remote access to Keycloak service 32 | For `minikube` obtain the current endpoint as follow 33 |
[Click] to view minikube steps 34 | 35 | ```console 36 | minikube service tsi-keycloak -n keycloak --url 37 | http://192.168.99.105:30229 38 | # keycloak is running on the above address now 39 | ``` 40 |
41 | 42 | 43 | To access Keycloak remotely in `IKS`, setup ingress access. 44 |
[Click] to view IKS steps 45 | 46 | Obtain the ingress name using `ibmcloud` cli: 47 | ```console 48 | $ # first obtain the cluster name: 49 | $ ibmcloud ks clusters 50 | $ # then use the cluster name to get the Ingress info: 51 | $ ibmcloud ks cluster get --cluster | grep Ingress 52 | Ingress Subdomain: my-cluster-xxxxxxxxxxx-0000.eu-de.containers.appdomain.cloud 53 | Ingress Secret: my-cluster-xxxxxxxxxxx-0000 54 | Ingress Status: healthy 55 | Ingress Message: All Ingress components are healthy 56 | ``` 57 | Build an ingress file from `example/keycloak/ingress.template.yaml`, 58 | using the `Ingress Subdomain` information obtained above. You can use any arbitrary 59 | prefix in addition to the Ingress value. For example: 60 | 61 | `host: keycloak.my-cluster-xxxxxxxxxxx-0000.eu-de.containers.appdomain.cloud` 62 | 63 | ```yaml 64 | apiVersion: networking.k8s.io/v1 65 | kind: Ingress 66 | metadata: 67 | name: keycloak-ingress 68 | spec: 69 | rules: 70 | - host: keycloak.my-cluster-xxxxxxxxxxx-0000.eu-de.containers.appdomain.cloud 71 | http: 72 | paths: 73 | - pathType: Prefix 74 | path: "/" 75 | backend: 76 | service: 77 | name: tsi-keycloak 78 | port: 79 | # number: 9090 80 | number: 8080 81 | ``` 82 | 83 | create ingress: 84 | ```console 85 | $ kubectl -n keycloak create -f examples/keycloak/ingress.template.yaml 86 | ``` 87 | 88 | Keycloak should be available under the address specified in `host` 89 |
90 | 91 | To access Keycloak remotely OpenShift (including IKS ROKS) 92 |
[Click] to view OpenShift steps 93 | 94 | This assumes the OpenShift command line is already installed. Otherwise see 95 | the [documentation](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html) 96 | and you can get `oc` cli from https://mirror.openshift.com/pub/openshift-v4/clients/oc/4.3/ 97 | 98 | ```console 99 | oc -n keycloak expose svc/tsi-keycloak 100 | # get the Keycloak URL: 101 | oc -n keycloak get route tsi-keycloak -o jsonpath='{.spec.host}' 102 | ``` 103 | 104 | Keycloak should be available under the above address. 105 |
106 | 107 | Test the remote connection to vault: 108 | ```console 109 | curl http:/// | grep "Welcome to Keycloak" 110 | ``` 111 | 112 | ## Configure the Keycloak 113 | 114 | // TODO 115 | . . . 116 | 117 | Test the connection to newly configured Keycloak: 118 | 119 | ```console 120 | curl http:///realms/tornjak 121 | ``` 122 | 123 | ## Configure the Tornjak 124 | // TODO 125 | . . . 126 | -------------------------------------------------------------------------------- /docs/spire-oidc-tutorial.md: -------------------------------------------------------------------------------- 1 | # OIDC Tutorial 2 | This tutorial shows steps how to deploy and enable the OIDC Discovery Provider Service 3 | Once the OIDC is enabled, we can set up workload access to [AWS S3 storage](./spire-oidc-aws-s3.md) 4 | or [Vault secrets](./spire-oidc-vault.md). 5 | 6 | In this example we will deploy Tornjak and SPIRE server on OpenShift in IBM Cloud as documented [here](./spire-on-openshift.md]) 7 | 8 | ## Deploy Tornjak, SPIRE Server and Agents 9 | Follow the documentation to deploy [Tornjak on Openshift](./spire-on-openshift.md#deploy-on-openshift]) 10 | with exception of enabling the `--oidc` flag. 11 | 12 | ``` 13 | # install: 14 | utils/install-open-shift-tornjak.sh -c $CLUSTER_NAME -t $TRUST_DOMAIN -p $PROJECT_NAME --oidc 15 | ``` 16 | 17 | for example: 18 | 19 | ```console 20 | utils/install-open-shift-tornjak.sh -c $CLUSTER_NAME -t openshift.space-x.com --oidc 21 | ``` 22 | 23 | This creates an output that has a following ending: 24 | 25 | ``` 26 | export SPIRE_SERVER=spire-server-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud 27 | 28 | Tornjak (http): http://tornjak-http-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud/ 29 | Tornjak (TLS): https://tornjak-tls-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud/ 30 | Tornjak (mTLS): https://tornjak-mtls-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud/ 31 | Trust Domain: openshift.space-x.com 32 | Tornjak (oidc): https://oidc-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud/ 33 | For testing oidc: 34 | 35 | curl -k https://oidc-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud/.well-known/openid-configuration 36 | 37 | curl -k https://oidc-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud/keys 38 | ``` 39 | 40 | Let’s test the OIDC endpoint: 41 | ``` 42 | curl -k https://oidc-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud/.well-known/openid-configuration 43 | { 44 | "issuer": "https://oidc-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud", 45 | "jwks_uri": "https://oidc-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud/keys", 46 | "authorization_endpoint": "", 47 | "response_types_supported": [ 48 | "id_token" 49 | ], 50 | "subject_types_supported": [], 51 | "id_token_signing_alg_values_supported": [ 52 | "RS256", 53 | "ES256", 54 | "ES384" 55 | ] 56 | } 57 | ``` 58 | 59 | This output confirms that the OIDC endpoint is accessible and responds with valid information. 60 | 61 | Let's install the [SPIRE Agents](./spire-on-openshift.md#step-2-installing-spire-agents-on-openshift): 62 | 63 | ```console 64 | oc new-project spire --description="My TSI Spire Agent project on OpenShift" 65 | kubectl get configmap spire-bundle -n tornjak -o yaml | sed "s/namespace: tornjak/namespace: spire/" | kubectl apply -n spire -f - 66 | ``` 67 | 68 | Then export the value of the SPIRE_SERVER frome above: 69 | 70 | ``` 71 | export SPIRE_SERVER=spire-server-tornjak.space-x-01-9d995c4a8c7c5f281ce13d5467ff-0000.us-south.containers.appdomain.cloud 72 | ``` 73 | and run the agents installation: 74 | 75 | ```console 76 | utils/install-open-shift-spire.sh -c $CLUSTER_NAME -r $REGION -s $SPIRE_SERVER -t openshift.space-x.com 77 | ``` 78 | 79 | Confirm the agents were successfully deployed and get the host for the registrar: 80 | 81 | ```console 82 | oc get po -owide 83 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 84 | spire-agent-222kh 1/1 Running 0 3m 10.38.240.206 10.38.240.206 85 | spire-agent-6l9tf 1/1 Running 0 3m 10.38.240.213 10.38.240.213 86 | spire-agent-tgbmn 1/1 Running 0 3m 10.38.240.212 10.38.240.212 87 | spire-registrar-85fcc94797-v9q6w 1/1 Running 0 3m 172.30.118.57 10.38.240.206 88 | ``` 89 | Now follow the steps for registering the [Workload Registrar](./spire-workload-registrar.md#register-workload-registrar-with-the-spire-server) so the new workloads get SPIFFE ids. 90 | 91 | ## Start OIDC use-cases 92 | Once the SPIRE server is enabled with OIDC plugin, we can continue the tutorial 93 | for enabling access to [AWS S3 storage](./spire-oidc-aws-s3.md) 94 | or [Vault secrets](./spire-oidc-vault.md). 95 | -------------------------------------------------------------------------------- /examples/spire-sidecar/config/vault-oidc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ROLE="dbrole" 4 | POLICY="dbpolicy" 5 | OIDC_URL=${OIDC_URL:-$1} 6 | ROOT_TOKEN=${ROOT_TOKEN:-$2} 7 | VAULT_ADDR=${VAULT_ADDR:-$3} 8 | export VAULT_ADDR=$VAULT_ADDR 9 | export ROOT_TOKEN=$ROOT_TOKEN 10 | # remove any previously set VAULT_TOKEN, that overrides ROOT_TOKEN in Vault client 11 | export VAULT_TOKEN= 12 | 13 | ## create help menu: 14 | helpme() 15 | { 16 | cat < 19 | Where: 20 | OIDC_URL - OIDC URL (https://) (optional, if set as env. var) 21 | ROOT_TOKEN - Vault root token to setup the plugin (optional, if set as env. var) 22 | VAULT_ADDR - Vault address in format http://vault.server:8200 (optional, if set as env. var) 23 | 24 | HELPMEHELPME 25 | } 26 | 27 | setupVault() 28 | { 29 | vault login -no-print "${ROOT_TOKEN}" 30 | RT=$? 31 | if [ $RT -ne 0 ] ; then 32 | echo "ROOT_TOKEN is not correctly set" 33 | echo "ROOT_TOKEN=${ROOT_TOKEN}" 34 | echo "VAULT_ADDR=${VAULT_ADDR}" 35 | exit 1 36 | fi 37 | 38 | # Enable JWT authentication 39 | vault auth enable jwt 40 | RT=$? 41 | if [ $RT -ne 0 ] ; then 42 | echo " 'vault auth enable jwt' command failed" 43 | echo "jwt maybe already enabled?" 44 | read -n 1 -s -r -p 'Press any key to continue' 45 | #exit 1 46 | fi 47 | 48 | 49 | # Connect OIDC - Set up our OIDC Discovery URL, 50 | vault write auth/jwt/config oidc_discovery_url=$OIDC_URL default_role=“$ROLE” 51 | RT=$? 52 | if [ $RT -ne 0 ] ; then 53 | echo " 'vault write auth/jwt/config oidc_discovery_url=' command failed" 54 | echo "jwt maybe already enabled?" 55 | read -n 1 -s -r -p 'Press any key to continue' 56 | #exit 1 57 | fi 58 | 59 | # Define a policy my-mars-policy that will be assigned to a marsrole role that we’ll create in the next step. 60 | cat > $POLICY.hcl < role.json < -- sh 99 | 100 | Once inside: 101 | # install jq parser 102 | apk add jq 103 | 104 | # get the JWT token, and export it as JWT env. variable: 105 | bin/spire-agent api fetch jwt -audience vault -socketPath /run/spire/sockets/agent.sock 106 | 107 | # setup env. variables: 108 | export JWT= 109 | export ROLE=marsrole 110 | export VAULT_ADDR=$VAULT_ADDR 111 | 112 | # using this JWT to login with vault and get a token: 113 | EOF 114 | 115 | echo " curl --max-time 10 -s -o out --request POST --data '{" '"jwt": "'"'"'"${JWT}"'"'"'", "role": "'"'"'"${ROLE}"'"'"'"}'"' "'"${VAULT_ADDR}"/v1/auth/jwt/login' 116 | echo # empty line 117 | echo " # get the client_token from the response" 118 | echo ' TOKEN=$(cat out | jq -r ' "'.auth.client_token')" 119 | echo ' curl -s -H "X-Vault-Token: $TOKEN" $VAULT_ADDR/v1/secret/data/db-config.json' " | jq -r '.data.data'" 120 | } 121 | 122 | # validate the arguments 123 | if [[ "$1" == "-?" || "$1" == "-h" || "$1" == "--help" ]] ; then 124 | helpme 125 | exit 0 126 | fi 127 | 128 | # Make sure the OIDC_URL parameter is set 129 | if [[ "$OIDC_URL" == "" ]] ; then 130 | echo "OIDC_URL must be set" 131 | helpme 132 | exit 1 133 | fi 134 | 135 | # when paramters provider, overrid the env. variables 136 | if [[ "$3" != "" ]] ; then 137 | export OIDC_URL="$1" 138 | export ROOT_TOKEN="$2" 139 | export VAULT_ADDR="$3" 140 | elif [[ "$ROOT_TOKEN" == "" || "$VAULT_ADDR" == "" ]] ; then 141 | echo "ROOT_TOKEN and VAULT_ADDR must be set" 142 | helpme 143 | exit 1 144 | fi 145 | 146 | setupVault 147 | footer 148 | -------------------------------------------------------------------------------- /examples/spire/vault-oidc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ROLE="marsrole" 4 | OIDC_URL=${OIDC_URL:-$1} 5 | ROOT_TOKEN=${ROOT_TOKEN:-$2} 6 | VAULT_ADDR=${VAULT_ADDR:-$3} 7 | export VAULT_ADDR=$VAULT_ADDR 8 | export ROOT_TOKEN=$ROOT_TOKEN 9 | # remove any previously set VAULT_TOKEN, that overrides ROOT_TOKEN in Vault client 10 | export VAULT_TOKEN= 11 | 12 | ## create help menu: 13 | helpme() 14 | { 15 | cat < 18 | Where: 19 | OIDC_URL - OIDC URL (https://) (optional, if set as env. var) 20 | ROOT_TOKEN - Vault root token to setup the plugin (optional, if set as env. var) 21 | VAULT_ADDR - Vault address in format http://vault.server:8200 (optional, if set as env. var) 22 | 23 | HELPMEHELPME 24 | } 25 | 26 | setupVault() 27 | { 28 | vault login -no-print "${ROOT_TOKEN}" 29 | RT=$? 30 | if [ $RT -ne 0 ] ; then 31 | echo "ROOT_TOKEN is not correctly set" 32 | echo "ROOT_TOKEN=${ROOT_TOKEN}" 33 | echo "VAULT_ADDR=${VAULT_ADDR}" 34 | exit 1 35 | fi 36 | 37 | # Enable JWT authentication 38 | vault auth enable jwt 39 | RT=$? 40 | if [ $RT -ne 0 ] ; then 41 | echo " 'vault auth enable jwt' command failed" 42 | echo "jwt maybe already enabled?" 43 | read -n 1 -s -r -p 'Press any key to continue' 44 | #exit 1 45 | fi 46 | 47 | 48 | # Connect OIDC - Set up our OIDC Discovery URL, 49 | vault write auth/jwt/config oidc_discovery_url=$OIDC_URL default_role=“$ROLE” 50 | RT=$? 51 | if [ $RT -ne 0 ] ; then 52 | echo " 'vault write auth/jwt/config oidc_discovery_url=' command failed" 53 | echo "jwt maybe already enabled?" 54 | read -n 1 -s -r -p 'Press any key to continue' 55 | #exit 1 56 | fi 57 | 58 | # Define a policy my-mars-policy that will be assigned to a marsrole role that we’ll create in the next step. 59 | cat > vault-policy.hcl < role.json < -- sh 98 | 99 | Once inside: 100 | # install jq parser 101 | apk add jq 102 | 103 | # get the JWT token, and export it as JWT env. variable: 104 | bin/spire-agent api fetch jwt -audience vault -socketPath /run/spire/sockets/agent.sock 105 | 106 | # setup env. variables: 107 | export JWT= 108 | export ROLE=marsrole 109 | export VAULT_ADDR=$VAULT_ADDR 110 | 111 | # using this JWT to login with vault and get a token: 112 | EOF 113 | 114 | echo " curl --max-time 10 -s -o out --request POST --data '{" '"jwt": "'"'"'"${JWT}"'"'"'", "role": "'"'"'"${ROLE}"'"'"'"}'"' "'"${VAULT_ADDR}"/v1/auth/jwt/login' 115 | echo # empty line 116 | echo " # get the client_token from the response" 117 | echo ' TOKEN=$(cat out | jq -r ' "'.auth.client_token')" 118 | echo ' curl -s -H "X-Vault-Token: $TOKEN" $VAULT_ADDR/v1/secret/data/my-super-secret' " | jq -r '.data.data'" 119 | } 120 | 121 | # validate the arguments 122 | if [[ "$1" == "-?" || "$1" == "-h" || "$1" == "--help" ]] ; then 123 | helpme 124 | exit 0 125 | fi 126 | 127 | # Make sure the OIDC_URL parameter is set 128 | if [[ "$OIDC_URL" == "" ]] ; then 129 | echo "OIDC_URL must be set" 130 | helpme 131 | exit 1 132 | fi 133 | 134 | # when paramters provider, overrid the env. variables 135 | if [[ "$3" != "" ]] ; then 136 | export OIDC_URL="$1" 137 | export ROOT_TOKEN="$2" 138 | export VAULT_ADDR="$3" 139 | elif [[ "$ROOT_TOKEN" == "" || "$VAULT_ADDR" == "" ]] ; then 140 | echo "ROOT_TOKEN and VAULT_ADDR must be set" 141 | helpme 142 | exit 1 143 | fi 144 | 145 | setupVault 146 | footer 147 | -------------------------------------------------------------------------------- /charts/tornjak/templates/server-configmap.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: spire-bundle 5 | namespace: {{ .Values.namespace }} 6 | --- 7 | apiVersion: v1 8 | kind: ConfigMap 9 | metadata: 10 | name: spire-server 11 | namespace: {{ .Values.namespace }} 12 | data: 13 | server.conf: | 14 | server { 15 | bind_address = "0.0.0.0" 16 | bind_port = "8081" 17 | trust_domain = "{{ .Values.trustdomain }}" 18 | data_dir = "/run/spire/data" 19 | log_level = "DEBUG" 20 | default_svid_ttl = "1h" 21 | socket_path = "{{ .Values.spireServer.socketDir }}/{{ .Values.spireServer.socketFile }}" 22 | 23 | {{- if .Values.oidc.enable }} 24 | #AWS requires the use of RSA. EC cryptography is not supported 25 | ca_key_type = "rsa-2048" 26 | 27 | # Creates the iss claim in JWT-SVIDs. 28 | jwt_issuer = "https://{{ .Values.oidc.serviceName }}.{{ .Values.oidc.myDiscoveryDomain }}" 29 | 30 | experimental { 31 | // Turns on the bundle endpoint (required, true) 32 | bundle_endpoint_enabled = true 33 | 34 | // The address to listen on (optional, defaults to 0.0.0.0) 35 | // bundle_endpoint_address = "0.0.0.0" 36 | 37 | // The port to listen on (optional, defaults to 443) 38 | bundle_endpoint_port = 8443 39 | } 40 | {{- end }} 41 | 42 | ca_subject = { 43 | country = ["US"], 44 | organization = ["SPIFFE"], 45 | common_name = "", 46 | } 47 | } 48 | plugins { 49 | DataStore "sql" { 50 | plugin_data { 51 | database_type = "sqlite3" 52 | connection_string = "/run/spire/data/datastore.sqlite3" 53 | } 54 | } 55 | NodeAttestor "k8s_psat" { 56 | plugin_data { 57 | clusters = { 58 | "{{ .Values.clustername }}" = { 59 | # use_token_review_api_validation = true 60 | service_account_allow_list = ["spire:spire-agent"] 61 | }, 62 | {{- if .Values.attestors.k8s_psat.remoteClusters }} 63 | {{- range $k, $v := .Values.attestors.k8s_psat.remoteClusters }} 64 | "{{ $v.name }}" = { 65 | service_account_allow_list = ["{{ $v.namespace | default "spire" }}:{{ $v.serviceAccount | default "spire-agent" }}"] 66 | kube_config_file = "/run/spire/kubeconfigs/{{ $v.name }}" 67 | }, 68 | {{- end }} 69 | {{- end }} 70 | } 71 | } 72 | } 73 | 74 | {{- if .Values.attestors.aws_iid -}} 75 | {{- if .Values.attestors.aws_iid.access_key_id -}} 76 | {{- if .Values.attestors.aws_iid.secret_access_key -}} 77 | NodeAttestor "aws_iid" { 78 | plugin_data { 79 | access_key_id = "{{- .Values.attestors.aws_iid.access_key_id -}}" 80 | secret_access_key = "{{- .Values.attestors.aws_iid.secret_access_key -}}" 81 | skip_block_device = {{- .Values.attestors.aws_iid.skip_block_device -}} 82 | } 83 | } 84 | 85 | {{- end }} 86 | {{- end }} 87 | {{- end }} 88 | 89 | {{- if .Values.attestors.azure_msi -}} 90 | {{- if .Values.attestors.azure_msi.tenants -}} 91 | NodeAttestor "azure_msi" { 92 | enabled = true 93 | plugin_data { 94 | tenants = { 95 | // Tenant configured with the default resource id (i.e. the resource manager) 96 | {{- range $k, $v := .Values.attestors.azure_msi.tenants }} 97 | "{{ $v.tenant }}" = {}, 98 | {{- end }} 99 | } 100 | } 101 | } 102 | {{- end }} 103 | {{- end }} 104 | 105 | KeyManager "disk" { 106 | plugin_data { 107 | keys_path = "/run/spire/data/keys.json" 108 | } 109 | } 110 | 111 | {{- if not .Values.spireServer }} 112 | {{- if not .Values.spireServer.selfSignedCA }} 113 | UpstreamAuthority "disk" { 114 | plugin_data { 115 | ttl = "12h" 116 | key_file_path = "/run/spire/secret/bootstrap.key" 117 | cert_file_path = "/run/spire/secret/bootstrap.crt" 118 | } 119 | } 120 | {{- end }} 121 | {{- end }} 122 | Notifier "k8sbundle" { 123 | plugin_data { 124 | # This plugin updates the bundle.crt value in the spire:spire-bundle 125 | # ConfigMap by default, so no additional configuration is necessary. 126 | namespace = "{{ .Values.namespace }}" 127 | } 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /charts/spire/ext/spiffeid.spiffe.io_spiffeids.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | annotations: 5 | controller-gen.kubebuilder.io/version: v0.2.4 6 | "helm.sh/resource-policy": keep 7 | creationTimestamp: null 8 | name: spiffeids.spiffeid.spiffe.io 9 | spec: 10 | group: spiffeid.spiffe.io 11 | names: 12 | kind: SpiffeID 13 | listKind: SpiffeIDList 14 | plural: spiffeids 15 | singular: spiffeid 16 | scope: Namespaced 17 | versions: 18 | - name: v1beta1 19 | served: true 20 | storage: true 21 | subresources: 22 | status: {} 23 | schema: 24 | openAPIV3Schema: 25 | description: SpiffeID is the Schema for the spiffeid API 26 | properties: 27 | apiVersion: 28 | description: 'APIVersion defines the versioned schema of this representation 29 | of an object. Servers should convert recognized schemas to the latest 30 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 31 | type: string 32 | kind: 33 | description: 'Kind is a string value representing the REST resource this 34 | object represents. Servers may infer this from the endpoint the client 35 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 36 | type: string 37 | metadata: 38 | type: object 39 | spec: 40 | description: SpiffeIDSpec defines the desired state of SpiffeID 41 | properties: 42 | dnsNames: 43 | items: 44 | type: string 45 | type: array 46 | federatesWith: 47 | items: 48 | type: string 49 | type: array 50 | parentId: 51 | type: string 52 | downstream: 53 | type: boolean 54 | selector: 55 | properties: 56 | arbitrary: 57 | description: Arbitrary selectors 58 | items: 59 | type: string 60 | type: array 61 | containerImage: 62 | description: Container image to match for this spiffe ID 63 | type: string 64 | containerName: 65 | description: Container name to match for this spiffe ID 66 | type: string 67 | namespace: 68 | description: Namespace to match for this spiffe ID 69 | type: string 70 | nodeName: 71 | description: Node name to match for this spiffe ID 72 | type: string 73 | podLabel: 74 | additionalProperties: 75 | type: string 76 | description: Pod label name/value to match for this spiffe ID 77 | type: object 78 | podName: 79 | description: Pod name to match for this spiffe ID 80 | type: string 81 | podUid: 82 | description: Pod UID to match for this spiffe ID 83 | type: string 84 | serviceAccount: 85 | description: ServiceAccount to match for this spiffe ID 86 | type: string 87 | cluster: 88 | description: The k8s_psat cluster name 89 | type: string 90 | agent_node_uid: 91 | description: UID of the node 92 | type: string 93 | type: object 94 | spiffeId: 95 | type: string 96 | required: 97 | - parentId 98 | - selector 99 | - spiffeId 100 | type: object 101 | status: 102 | description: SpiffeIDStatus defines the observed state of SpiffeID 103 | properties: 104 | entryId: 105 | description: 'INSERT ADDITIONAL STATUS FIELD - define observed state 106 | of cluster Important: Run "make" to regenerate code after modifying 107 | this file' 108 | type: string 109 | type: object 110 | type: object 111 | -------------------------------------------------------------------------------- /docs/bootstrap.md: -------------------------------------------------------------------------------- 1 | # Bootstrap Overview 2 | 3 | This document describes the bootstrap process of a cluster that will utilize trusted identity. 4 | The important points about bootstrapping is ensuring a setup where the keys are accessible to the components, and that they are delivered in a trusted and secure way. This will be the focus of the document. 5 | Bootstrapping of a cluster can be done in 3 different settings of key storage and usage, they are: 6 | 1. bootstrapping key on 1 vTPM/TPM as CA per cluster 7 | 2. bootstrapping key on per node vTPM/TPM as CA per cluster 8 | 3. bootstrapping key on per node vTPM/TPM per cluster with a central CA 9 | 10 | We will first go through how (1) is done. (2) and (3) are incremental steps on top of (1). 11 | 12 | ## 1. bootstrapping key on 1 vTPM/TPM as CA per cluster 13 | 14 | 15 | ### Provisioning a cluster 16 | 17 | The first step of bootstrapping is provisioning. We assume a secure provisioning process with measured and secure boot to ensure that the system is in a trusted initial state. This is a step controlled by the on-premise provider or cloud provider. 18 | 19 | Before installing Trusted Identity, it is important to verify that the setup is secure (ideally with an attestation of measured boot and TPM). Trust can also be established by evidence of a secure provisioning process. 20 | 21 | ### Installing Trusted Identity 22 | 23 | We now assume that we have a cluster provisioned with a kubernetes cluster. We install the trusted identity helm chart deployment, which will create the necessary components of trusted identity in the cluster. 24 | 25 | In deploying the helm chart, we assume that there is components of the orchestration system that validates that the integrity of the deployment. 26 | 27 | Part of this process includes a key-server component of TI initializing and setting up of the vTPM/TPM. It will establish ownership of the module and create a RSA key pair. This key pair in this case will act as a CA (this is where it will differ in (3)). 28 | 29 | ### Binding trust with CA to key server 30 | 31 | In order to bind the trust of the CA to the key server, this requires a trusted operator to create the trust binding. This portion of the bootstrap process is about telling the key server which CA is trusted. 32 | 33 | Assuming a trusted key server, a trusted operator obtains the CA from the vTPM/TPM with attesting that it is from that vTPM/TPM. The trusted operator then put the CA in the key server alongside a set of claims that the cluster is required to have (i.e. this CA only can have region:US). 34 | 35 | This will allow the key server to validate the tokens that TI generates and prevent spoofing across certain established trust boundaries. 36 | 37 | 38 | ### End of bootstrap 39 | 40 | This is the end of the boostrap process, and the regular TI process and system integrity components will continue to uphold the integrity of the system. 41 | 42 | 43 | ## Other setup options (2) and (3) 44 | 45 | The other setup options talked about earlier are slight modifications of the bootstrap process. In fact, most of the steps are identitcal, besides generation of the keys in the vTPM/TPM and bootstrapping of trust. Below, we will describe the differences with the above bootstrap process for each of the setups. 46 | 47 | ### 2. bootstrapping key on per node vTPM/TPM as CA per cluster 48 | 49 | The difference here is that the CA is per node instead of one per cluster. The difference here is that for every node, TI will generate a RSA key pair (each of which will act as a node CA of the cluster). 50 | 51 | In the step of "Binding trust with CA to key server", the trusted operator would perform the same process for a single CA cluster with all the node CAs of the cluster. 52 | 53 | 54 | ### 3. bootstrapping key on per node vTPM/TPM per cluster with a central CA 55 | 56 | In this case, there is a central CA, instead of a CA per cluster (i.e. less CAs than the original setup in (1)). In this case, a RSA key pair will be generated per node, but will not be used as a CA. Instead, an external CA is used, which will perform a secure signing process with the vTPM/TPMs. 57 | 58 | The RSA key pairs will be used to generate a certificate that endorses the key pair that is generated by the vTPM/TPMs, and the claims that the node/cluster is required to have (as described in "Binding trust with CA to key server"). 59 | 60 | The trust model is stronger since the operator no longer needs to be trusted, and that trust is now cetralized with the certificate authority. 61 | 62 | In the step of "Binding trust with CA to key server", the trusted operator of the CA would perform the process only once with the centralized CA. Therefore, whenever new nodes/clusters are added, there is no need to perform this step again. 63 | 64 | 65 | TODO: Add diagrams from PPT 66 | -------------------------------------------------------------------------------- /charts/spire/templates/spiffeid.spiffe.io_spiffeids.tpl: -------------------------------------------------------------------------------- 1 | {{- if not (lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" "" "spiffeids.spiffeid.spiffe.io") -}} 2 | {{/* 3 | If does not exist, generate new SPIFFEID CRD 4 | */}} 5 | 6 | apiVersion: apiextensions.k8s.io/v1 7 | kind: CustomResourceDefinition 8 | metadata: 9 | annotations: 10 | controller-gen.kubebuilder.io/version: v0.2.4 11 | "helm.sh/resource-policy": keep 12 | name: spiffeids.spiffeid.spiffe.io 13 | spec: 14 | group: spiffeid.spiffe.io 15 | names: 16 | kind: SpiffeID 17 | listKind: SpiffeIDList 18 | plural: spiffeids 19 | singular: spiffeid 20 | scope: Namespaced 21 | versions: 22 | - name: v1beta1 23 | served: true 24 | storage: true 25 | subresources: 26 | status: {} 27 | schema: 28 | openAPIV3Schema: 29 | description: SpiffeID is the Schema for the spiffeid API 30 | properties: 31 | apiVersion: 32 | description: 'APIVersion defines the versioned schema of this representation 33 | of an object. Servers should convert recognized schemas to the latest 34 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 35 | type: string 36 | kind: 37 | description: 'Kind is a string value representing the REST resource this 38 | object represents. Servers may infer this from the endpoint the client 39 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 40 | type: string 41 | metadata: 42 | type: object 43 | spec: 44 | description: SpiffeIDSpec defines the desired state of SpiffeID 45 | properties: 46 | dnsNames: 47 | items: 48 | type: string 49 | type: array 50 | federatesWith: 51 | items: 52 | type: string 53 | type: array 54 | parentId: 55 | type: string 56 | downstream: 57 | type: boolean 58 | selector: 59 | properties: 60 | arbitrary: 61 | description: Arbitrary selectors 62 | items: 63 | type: string 64 | type: array 65 | containerImage: 66 | description: Container image to match for this spiffe ID 67 | type: string 68 | containerName: 69 | description: Container name to match for this spiffe ID 70 | type: string 71 | namespace: 72 | description: Namespace to match for this spiffe ID 73 | type: string 74 | nodeName: 75 | description: Node name to match for this spiffe ID 76 | type: string 77 | podLabel: 78 | additionalProperties: 79 | type: string 80 | description: Pod label name/value to match for this spiffe ID 81 | type: object 82 | podName: 83 | description: Pod name to match for this spiffe ID 84 | type: string 85 | podUid: 86 | description: Pod UID to match for this spiffe ID 87 | type: string 88 | serviceAccount: 89 | description: ServiceAccount to match for this spiffe ID 90 | type: string 91 | cluster: 92 | description: The k8s_psat cluster name 93 | type: string 94 | agent_node_uid: 95 | description: UID of the node 96 | type: string 97 | type: object 98 | spiffeId: 99 | type: string 100 | required: 101 | - parentId 102 | - selector 103 | - spiffeId 104 | type: object 105 | status: 106 | description: SpiffeIDStatus defines the observed state of SpiffeID 107 | properties: 108 | entryId: 109 | description: 'INSERT ADDITIONAL STATUS FIELD - define observed state 110 | of cluster Important: Run "make" to regenerate code after modifying 111 | this file' 112 | type: string 113 | type: object 114 | type: object 115 | 116 | {{- end -}} 117 | -------------------------------------------------------------------------------- /utils/x509-conf/intermediate-config.txt: -------------------------------------------------------------------------------- 1 | # OpenSSL intermediate CA configuration file. 2 | # Copy to '/root/ca/intermediate/openssl.cnf'. 3 | 4 | [ ca ] 5 | # 'man ca' 6 | default_ca = CA_default 7 | 8 | [ CA_default ] 9 | # Directory and file locations. 10 | dir = /tmp/ca 11 | certs = \$dir/certs 12 | crl_dir = \$dir/crl 13 | new_certs_dir = \$dir/newcerts 14 | database = \$dir/index.txt 15 | serial = \$dir/serial 16 | RANDFILE = \$dir/private/.rand 17 | 18 | # The root key and root certificate. 19 | private_key = \$dir/intermediate.key.pem 20 | certificate = \$dir/intermediate.cert.pem 21 | 22 | # For certificate revocation lists. 23 | crlnumber = \$dir/crlnumber 24 | crl = \$dir/crl/intermediate.crl.pem 25 | crl_extensions = crl_ext 26 | default_crl_days = 30 27 | 28 | # SHA-1 is deprecated, so use SHA-2 instead. 29 | default_md = sha256 30 | 31 | name_opt = ca_default 32 | cert_opt = ca_default 33 | default_days = 375 34 | preserve = no 35 | policy = policy_loose 36 | 37 | [ policy_strict ] 38 | # The root CA should only sign intermediate certificates that match. 39 | # See the POLICY FORMAT section of 'man ca'. 40 | countryName = match 41 | stateOrProvinceName = match 42 | organizationName = match 43 | organizationalUnitName = optional 44 | commonName = supplied 45 | emailAddress = optional 46 | 47 | [ policy_loose ] 48 | # Allow the intermediate CA to sign a more diverse range of certificates. 49 | # See the POLICY FORMAT section of the 'ca' man page. 50 | countryName = optional 51 | stateOrProvinceName = optional 52 | localityName = optional 53 | organizationName = optional 54 | organizationalUnitName = optional 55 | commonName = supplied 56 | emailAddress = optional 57 | 58 | [ req ] 59 | # Options for the 'req' tool ('man req'). 60 | default_bits = 2048 61 | distinguished_name = req_distinguished_name 62 | string_mask = utf8only 63 | 64 | # SHA-1 is deprecated, so use SHA-2 instead. 65 | default_md = sha256 66 | 67 | # Extension to add when the -x509 option is used. 68 | x509_extensions = v3_ca 69 | 70 | [ req_distinguished_name ] 71 | # See . 72 | countryName = Country Name (2 letter code) 73 | stateOrProvinceName = State or Province Name 74 | localityName = Locality Name 75 | 0.organizationName = Organization Name 76 | organizationalUnitName = Organizational Unit Name 77 | commonName = Common Name 78 | emailAddress = Email Address 79 | 80 | # Optionally, specify some defaults. 81 | countryName_default = US 82 | stateOrProvinceName_default = NY 83 | localityName_default = 84 | 0.organizationName_default = SPIRE Ltd 85 | organizationalUnitName_default = 86 | emailAddress_default = 87 | 88 | [ v3_ca ] 89 | # Extensions for a typical CA ('man x509v3_config'). 90 | subjectKeyIdentifier = hash 91 | authorityKeyIdentifier = keyid:always,issuer 92 | basicConstraints = critical, CA:true 93 | keyUsage = critical, digitalSignature, cRLSign, keyCertSign 94 | 95 | [ v3_intermediate_ca ] 96 | # Extensions for a typical intermediate CA ('man x509v3_config'). 97 | subjectKeyIdentifier = hash 98 | authorityKeyIdentifier = keyid:always,issuer 99 | basicConstraints = critical, CA:true, pathlen:0 100 | keyUsage = critical, digitalSignature, cRLSign, keyCertSign 101 | 102 | [ usr_cert ] 103 | # Extensions for client certificates ('man x509v3_config'). 104 | basicConstraints = CA:FALSE 105 | nsCertType = client, email 106 | nsComment = "OpenSSL Generated Client Certificate" 107 | subjectKeyIdentifier = hash 108 | authorityKeyIdentifier = keyid,issuer 109 | keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment 110 | extendedKeyUsage = clientAuth, emailProtection 111 | 112 | [ server_cert ] 113 | # Extensions for server certificates ('man x509v3_config'). 114 | basicConstraints = CA:FALSE 115 | nsCertType = server 116 | nsComment = "OpenSSL Generated Server Certificate" 117 | subjectKeyIdentifier = hash 118 | authorityKeyIdentifier = keyid,issuer:always 119 | keyUsage = critical, digitalSignature, keyEncipherment 120 | extendedKeyUsage = serverAuth 121 | 122 | [ crl_ext ] 123 | # Extension for CRLs ('man x509v3_config'). 124 | authorityKeyIdentifier=keyid:always 125 | 126 | [ ocsp ] 127 | # Extension for OCSP signing certificates ('man ocsp'). 128 | basicConstraints = CA:FALSE 129 | subjectKeyIdentifier = hash 130 | authorityKeyIdentifier = keyid,issuer 131 | keyUsage = critical, digitalSignature 132 | extendedKeyUsage = critical, OCSPSigning 133 | -------------------------------------------------------------------------------- /examples/spire-sidecar/python/main.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request, redirect 2 | from flask_cors import CORS 3 | import MySQLdb 4 | import json 5 | import configparser 6 | 7 | app = Flask(__name__) 8 | CORS(app) 9 | 10 | config = configparser.ConfigParser() 11 | config.read('./config.ini') 12 | 13 | @app.route('/') 14 | def index(): 15 | try: 16 | db=MySQLdb.connect(host=config["mysql"]["host"],port=int(config["mysql"]["port"]),user=config["mysql"]["user"], 17 | passwd=config["mysql"]["passwd"],db=config["mysql"]["db"]) 18 | try: 19 | cursor = db.cursor() 20 | output = "" 21 | # field_names = [i[0] for i in cursor.description] 22 | # for header in field_names: 23 | # output += "".format(header) 24 | output += "" 25 | output += "" 26 | output += "" 27 | output += "" 28 | output += "" 29 | output += "" 30 | cursor.execute("SELECT * FROM MOVIE") 31 | resultList = cursor.fetchall() 32 | for row in resultList: 33 | output += "" 34 | for i in range(len(row)): 35 | output += "".format(row[i]) 36 | output += "" 37 | output += "" 38 | output += "
{}Movie IDNameYearDirectorGenre
{}Delete
" 39 | 40 | output += "
"; 41 | output += """
42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 |
61 |
"""; 62 | output += "
"; 63 | except Exception as e: 64 | output = "Encountered error while retrieving data from database: {}".format(e) 65 | finally: 66 | db.close() 67 | return output 68 | except MySQLdb.Error as err: 69 | return "Something went wrong: {}".format(err) 70 | 71 | @app.route('/delete/', methods = ['GET']) 72 | def deletemovie(id): 73 | try: 74 | db=MySQLdb.connect(host=config["mysql"]["host"],port=int(config["mysql"]["port"]),user=config["mysql"]["user"], 75 | passwd=config["mysql"]["passwd"],db=config["mysql"]["db"]) 76 | try: 77 | mycursor = db.cursor() 78 | 79 | sql = "DELETE FROM MOVIE WHERE id = "+id 80 | mycursor.execute(sql) 81 | db.commit() 82 | print(mycursor.rowcount, "record(s) deleted") 83 | except Exception as e: 84 | return "Encountered error while retrieving data from database: {}".format(e) 85 | finally: 86 | db.close() 87 | return redirect("/") 88 | except MySQLdb.Error as err: 89 | return "Something went wrong: {}".format(err) 90 | 91 | @app.route('/addmovie', methods = ['POST']) 92 | def addmovie(): 93 | try: 94 | data = request.form 95 | # print(data) 96 | db=MySQLdb.connect(host=config["mysql"]["host"],port=int(config["mysql"]["port"]),user=config["mysql"]["user"], 97 | passwd=config["mysql"]["passwd"],db=config["mysql"]["db"]) 98 | try: 99 | mycursor = db.cursor() 100 | 101 | sql = "INSERT INTO MOVIE (`name`,`year`,`director`,`genre`) VALUES (%s, %s, %s, %s)" 102 | val = [(v) for k, v in data.items()] 103 | print (val) 104 | mycursor.execute(sql, val) 105 | 106 | db.commit() 107 | 108 | print(mycursor.rowcount, "record inserted.") 109 | except Exception as e: 110 | return "Encountered error while retrieving data from database: {}".format(e) 111 | finally: 112 | db.close() 113 | return redirect("/") 114 | except MySQLdb.Error as err: 115 | return "Something went wrong: {}".format(err) 116 | 117 | if __name__ == '__main__': 118 | app.run(debug=True, host='0.0.0.0') 119 | --------------------------------------------------------------------------------