├── .gitignore ├── LICENSE ├── README.md ├── jupyterhub ├── Dockerfile ├── etc │ ├── build.sh │ ├── entrypoint.sh │ ├── profile │ ├── profile.d │ │ ├── httpd.sh │ │ ├── nodejs.sh │ │ ├── python.sh │ │ └── sh.local │ └── run.sh └── src │ ├── configs │ ├── hosted-workshop.py │ ├── hosted-workshop.sh │ ├── jumpbox-server.py │ ├── jumpbox-server.sh │ ├── learning-portal.py │ ├── learning-portal.sh │ ├── terminal-server.py │ ├── terminal-server.sh │ ├── user-workspace.py │ └── user-workspace.sh │ ├── images │ └── HomeroomIcon.png │ ├── jupyterhub_config.py │ ├── jupyterhub_config.sh │ ├── requirements.txt │ └── scripts │ ├── create-terminals.sh │ ├── cull-idle-servers.py │ ├── cull-idle-servers.sh │ ├── delete-projects.py │ └── delete-projects.sh ├── keycloak ├── Dockerfile ├── realm.json └── start-keycloak.sh ├── resources ├── bases │ └── spawner-resources │ │ ├── kustomization.yaml │ │ ├── session-envvars-config-map.yaml │ │ ├── spawner-basic-role-binding.yaml │ │ ├── spawner-configs-config-map.yaml │ │ ├── spawner-deployment.yaml │ │ ├── spawner-environ-config-map.yaml │ │ ├── spawner-ingress.yaml │ │ ├── spawner-service-account.yaml │ │ └── spawner-service.yaml └── overlays │ ├── hosted-workshop │ ├── kustomization.yaml │ ├── spawner-console-oauth-client.yaml │ ├── spawner-data-persistent-volume-claim.yaml │ └── spawner-deployment-patch.yaml │ └── learning-portal │ ├── kustomization.yaml │ ├── session-resources-config-map.yaml │ ├── session-rules-cluster-role-binding.yaml │ ├── session-rules-cluster-role.yaml │ ├── spawner-basic-cluster-role-binding.yaml │ ├── spawner-deployment-patch.yaml │ ├── spawner-extra-cluster-role-binding.yaml │ ├── spawner-extra-cluster-role.yaml │ ├── spawner-rules-cluster-role-binding.yaml │ └── spawner-rules-cluster-role.yaml └── templates ├── hosted-workshop-development.json ├── hosted-workshop-production.json ├── jumpbox-server-development.json ├── jumpbox-server-production.json ├── learning-portal-development.json ├── learning-portal-production.json ├── terminal-server-development.json ├── terminal-server-production.json ├── user-workspace-development.json └── user-workspace-production.json /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Workshop Terminal/Dashboard Spawner 2 | =================================== 3 | 4 | This repository contains an application spawner for deploying a containerised user environment in OpenShift, for the purposes of supporting workshops using OpenShift. 5 | 6 | The environment created for each user is pre-populated with OpenShift and Kubernetes command line clients, along with development tools for Java, Node.js and Python. Access to the user environment is provided via an interactive terminal delivered to the user via their web browser. 7 | 8 | Workshop base images 9 | -------------------- 10 | 11 | Workshop content and any additional applications required are bundled as an image. The base image for the full workshop environment is: 12 | 13 | * https://github.com/openshift-homeroom/workshop-dashboard 14 | 15 | The base image if needing only an interactive terminal is: 16 | 17 | * https://github.com/openshift-homeroom/workshop-terminal 18 | 19 | A workshop image would extend these to add their own content. 20 | 21 | For all the configurations described below, to override the default image used, pass the `WORKSHOP_IMAGE` template parameter with value referencing the custom image for a specific workshop. 22 | 23 | Spawner configurations 24 | ---------------------- 25 | 26 | The spawner supports a number of different configurations, or modes, in which it can be deployed. These are: 27 | 28 | * `learning-portal` - Used for workshops, or a permanent interactive learning portal where users are anonymous and may do a workshop at any time. Users are given temporary access as a service account user, with a single temporary project. When a workshop is completed, or the allowed time expires, the service account and project are automatically deleted. 29 | 30 | * `hosted-workshop` - Used to run a supervised workshop where each user is provided with separate login credentials for an existing user of the OpenShift cluster in which the workshop is being run, in order to login. Users can perform any action in the cluster that the OpenShift user can do, including being able to create multiple projects, if the cluster user quota configuration permits it. 31 | 32 | * `terminal-server` - Similar to the hosted workshop configuration. It defaults to only supplying an interactive terminal in the browser using the workshop terminal base image. If a workshop using the full workshop dashboard base is used with this configuration, no embedded web console is provided. 33 | 34 | * `user-workspace` - Similar to the learning portal configuration, but users need to login through Keycloak. Users are given access as a service account user, with a single project. The service account and project are dedicated to the user and will still be present if the user were to leave and come back at a future time. This provides a place where users can do ongoing work, but without needing to allocate users in OpenShift itself. 35 | 36 | * `jumpbox-server` - Users login through Keycloak. It defaults to only supplying an interactive terminal in the browser using the workshop terminal base image. The user has no access to the cluster itself to do anything. The terminal would be used to access a separate system. 37 | 38 | Deploying the spawner 39 | --------------------- 40 | 41 | For each spawner configuration there is a separate template. The templates come in `production` and `development` variants. Unless you are working on the spawner, you can ignore the `development` variant of the template. 42 | 43 | The format of the command for deploying the spawner using any of the templates is: 44 | 45 | ``` 46 | oc process -f https://raw.githubusercontent.com/openshift-homeroom/workshop-spawner/master/templates/learning-portal-production.json --param SPAWNER_NAMESPACE=`oc project --short` --param CLUSTER_SUBDOMAIN=apps.openshiftcluster.com | oc apply -f - 47 | ``` 48 | 49 | In this case we have used the `learning-portal` template. Replace the name with that for the configuration you want to use. 50 | 51 | The `SPAWNER_NAMESPACE` template parameter is to pass in the name of the project the spawner is being deployed into. It should match the current project, or the project name passed in using the `-n` or `--namespace` option if supplied. 52 | 53 | The `CLUSTER_SUBDOMAIN` template parameter needs to provide the name of the cluster subdomain under which hostnames created for generated routes reside. You can also supply your own custom subdomain so long as DNS is setup to direct requests under that subdomain to the cluster. 54 | 55 | The default name used by the deployment will be the same as the configuration. If you need to override this, use the `WORKSHOP_NAME` template parameter. 56 | 57 | If you intend deploying multiple instances of the spawner using the same configuration type, and with the same deployed name, in different projects, you must provide the `NAME_PREFIX` template parameter and pass in a value which when combined with the name of the deployment is unique for the cluster. This is necessary as the deployment will create resources which are global and not contained within the project namespace. If you don't supply `NAME_PREFIX`, the global resource names will clash for the two deployments. 58 | 59 | Deleting the deployment 60 | ----------------------- 61 | 62 | To delete the deployment run the command: 63 | 64 | ``` 65 | oc delete all,serviceaccount,configmap,secret,persistentvolumeclaim,rolebinding,clusterrole,clusterrolebinding,oauthclient -l app=learning-portal 66 | ``` 67 | 68 | Replace the value of the `app` label with that which was actually used for the deployment. This will be a combination of `NAME_PREFIX` and `WORKSHOP_NAME`. 69 | 70 | Customizing configuration 71 | ------------------------- 72 | 73 | Each template provides a range of template parameters that can be supplied to customize the deployment. 74 | 75 | For example, to override the default image for the user environment and supply a reference to a custom image for a specific workshop, use the `WORKSHOP_IMAGE` template parameter. You can use the `WORKSHOP_NAME` template parameter to override the name used for the deployment. 76 | 77 | ``` 78 | oc new-app https://raw.githubusercontent.com/openshift-homeroom/workshop-spawner/master/templates/learning-portal-production.json --param SPAWNER_NAMESPACE=`oc project --short` --param WORKSHOP_NAME=lab-workshop-content --param WORKSHOP_IMAGE=quay.io/openshifthomeroom/lab-workshop-content:master 79 | ``` 80 | 81 | Look at the individual template files in the templates directory for the list of parameters they accept. 82 | 83 | Deployment scripts 84 | ------------------ 85 | 86 | For an easier way of deploying a workshop for multiple users, check out the repository: 87 | 88 | * https://github.com/openshift-homeroom/workshop-scripts 89 | 90 | This can be used in combination with the repository for a workshop, to embed deployment scripts into the workshop for deploying it, without needing any knowledge of the specific steps. 91 | -------------------------------------------------------------------------------- /jupyterhub/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos/s2i-base-centos7:latest 2 | 3 | USER root 4 | 5 | # Install required system packages. 6 | 7 | RUN HOME=/root && \ 8 | INSTALL_PKGS="sudo jq rh-python36 rh-python36-python-devel \ 9 | rh-python36-python-setuptools rh-python36-python-pip \ 10 | httpd24 httpd24-httpd-devel httpd24-mod_ssl httpd24-mod_auth_kerb \ 11 | httpd24-mod_ldap httpd24-mod_session" && \ 12 | yum install -y centos-release-scl epel-release && \ 13 | yum install -y --setopt=tsflags=nodocs \ 14 | --enablerepo=centosplus $INSTALL_PKGS && \ 15 | rpm -V $INSTALL_PKGS && \ 16 | # Remove centos-logos (httpd dependency) to keep image size smaller. 17 | rpm -e --nodeps centos-logos && \ 18 | yum -y clean all --enablerepo='*' 19 | 20 | # Fixup handling for UNIX accounts. 21 | 22 | RUN echo "auth requisite pam_deny.so" >> /etc/pam.d/su && \ 23 | sed -i.bak -e 's/^%wheel/# %wheel/' /etc/sudoers && \ 24 | chmod g+w /etc/passwd 25 | 26 | # Install configurable HTTP proxy. 27 | 28 | RUN HOME=/root && \ 29 | source scl_source enable $NODEJS_SCL && \ 30 | npm install -g configurable-http-proxy 31 | 32 | # Common environment variables. 33 | 34 | ENV HOME=/opt/app-root \ 35 | NPM_CONFIG_PREFIX=/opt/app-root \ 36 | PYTHONUNBUFFERED=1 \ 37 | PYTHONIOENCODING=UTF-8 \ 38 | LC_ALL=en_US.UTF-8 \ 39 | LANG=en_US.UTF-8 \ 40 | PIP_NO_CACHE_DIR=off 41 | 42 | # Install application software. 43 | 44 | COPY . /opt/app-root/ 45 | 46 | RUN chown -R 1001:0 /opt/app-root && \ 47 | fix-permissions /opt/app-root 48 | 49 | WORKDIR /opt/app-root/src 50 | 51 | USER 1001 52 | 53 | RUN /opt/app-root/etc/build.sh && \ 54 | fix-permissions /opt/app-root 55 | 56 | # Finish environment setup. 57 | 58 | ENV BASH_ENV=/opt/app-root/etc/profile \ 59 | ENV=/opt/app-root/etc/profile \ 60 | PROMPT_COMMAND=". /opt/app-root/etc/profile" 61 | 62 | ENTRYPOINT [ "/opt/app-root/etc/entrypoint.sh" ] 63 | 64 | CMD [ "/opt/app-root/etc/run.sh" ] 65 | -------------------------------------------------------------------------------- /jupyterhub/etc/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Enable SCL packages for build. 4 | 5 | source scl_source enable httpd24 6 | source scl_source enable rh-python36 7 | source scl_source enable $NODEJS_SCL 8 | 9 | # Create the Python virtual environment. 10 | 11 | virtualenv /opt/app-root 12 | 13 | source /opt/app-root/bin/activate 14 | 15 | pip install -U pip setuptools wheel 16 | 17 | # Install the required application packages. 18 | 19 | pip install -r requirements.txt 20 | 21 | # Create data directory for database files. 22 | 23 | mkdir /opt/app-root/data 24 | -------------------------------------------------------------------------------- /jupyterhub/etc/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | exec "$@" 4 | -------------------------------------------------------------------------------- /jupyterhub/etc/profile: -------------------------------------------------------------------------------- 1 | unset BASH_ENV PROMPT_COMMAND ENV 2 | 3 | # Add entry to /etc/passwd file. 4 | 5 | STATUS=0 && whoami &> /dev/null || STATUS=$? && true 6 | 7 | if [[ "$STATUS" != "0" ]]; then 8 | cat /etc/passwd | sed -e "s/^default:/builder:/" > /tmp/passwd 9 | echo "default:x:$(id -u):$(id -g):,,,:/opt/app-root/src:/bin/bash" >> /tmp/passwd 10 | cat /tmp/passwd > /etc/passwd 11 | rm /tmp/passwd 12 | fi 13 | 14 | # Read in additional profile files. 15 | 16 | for i in /opt/app-root/etc/profile.d/*.sh /opt/app-root/etc/profile.d/sh.local; do 17 | if [ -r "$i" ]; then 18 | . "$i" >/dev/null 19 | fi 20 | done 21 | -------------------------------------------------------------------------------- /jupyterhub/etc/profile.d/httpd.sh: -------------------------------------------------------------------------------- 1 | source scl_source enable httpd24 2 | -------------------------------------------------------------------------------- /jupyterhub/etc/profile.d/nodejs.sh: -------------------------------------------------------------------------------- 1 | source scl_source enable $NODEJS_SCL 2 | -------------------------------------------------------------------------------- /jupyterhub/etc/profile.d/python.sh: -------------------------------------------------------------------------------- 1 | source scl_source enable rh-python36 2 | 3 | export PYTHONUNBUFFERED=1 4 | export PYTHONIOENCODING=UTF-8 5 | export LC_ALL=en_US.UTF-8 6 | export LANG=en_US.UTF-8 7 | export PIP_NO_CACHE_DIR=off 8 | 9 | if [ -f /opt/app-root/bin/activate ]; then 10 | source /opt/app-root/bin/activate 11 | fi 12 | -------------------------------------------------------------------------------- /jupyterhub/etc/profile.d/sh.local: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift-homeroom/workshop-spawner/2c11d6fbb62f81b7295a66976ebdc8544711f391/jupyterhub/etc/profile.d/sh.local -------------------------------------------------------------------------------- /jupyterhub/etc/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | # Read any custom environment variables. 6 | 7 | . /opt/app-root/src/jupyterhub_config.sh 8 | 9 | # Start the JupyterHub instance. 10 | 11 | trap 'kill -TERM $PID' TERM INT 12 | 13 | jupyterhub -f /opt/app-root/src/jupyterhub_config.py & 14 | 15 | PID=$! 16 | wait $PID 17 | trap - TERM INT 18 | wait $PID 19 | STATUS=$? 20 | exit $STATUS 21 | -------------------------------------------------------------------------------- /jupyterhub/src/configs/hosted-workshop.py: -------------------------------------------------------------------------------- 1 | # This file provides configuration specific to the 'hosted-workshop' 2 | # deployment mode. In this mode authentication for JupyterHub is done 3 | # against the OpenShift cluster using OAuth. 4 | 5 | from tornado import web 6 | 7 | # Enable the OpenShift authenticator. Environments variables have 8 | # already been set from the hosted-workshop.sh script file. 9 | 10 | c.JupyterHub.authenticator_class = "openshift" 11 | 12 | from oauthenticator.openshift import OpenShiftOAuthenticator 13 | OpenShiftOAuthenticator.scope = ['user:full'] 14 | 15 | client_id = '%s-console' % application_name 16 | client_secret = os.environ['OAUTH_CLIENT_SECRET'] 17 | 18 | c.OpenShiftOAuthenticator.client_id = client_id 19 | c.OpenShiftOAuthenticator.client_secret = client_secret 20 | c.Authenticator.enable_auth_state = True 21 | 22 | c.CryptKeeper.keys = [ client_secret.encode('utf-8') ] 23 | 24 | c.OpenShiftOAuthenticator.oauth_callback_url = ( 25 | '%s://%s/hub/oauth_callback' % (public_protocol, public_hostname)) 26 | 27 | c.Authenticator.auto_login = True 28 | 29 | # Enable admin access to designated users of the OpenShift cluster. 30 | 31 | c.JupyterHub.admin_access = True 32 | 33 | c.Authenticator.admin_users = set(os.environ.get('ADMIN_USERS', '').split()) 34 | 35 | # Mount config map for user provided environment variables for the 36 | # terminal and workshop. 37 | 38 | c.KubeSpawner.volumes = [ 39 | { 40 | 'name': 'envvars', 41 | 'configMap': { 42 | 'name': '%s-session-envvars' % application_name, 43 | 'defaultMode': 420 44 | } 45 | } 46 | ] 47 | 48 | c.KubeSpawner.volume_mounts = [ 49 | { 50 | 'name': 'envvars', 51 | 'mountPath': '/opt/workshop/envvars' 52 | } 53 | ] 54 | 55 | # For workshops we provide each user with a persistent volume so they 56 | # don't loose their work. This is mounted on /opt/app-root, so we need 57 | # to copy the contents from the image into the persistent volume the 58 | # first time using an init container. 59 | 60 | volume_size = os.environ.get('VOLUME_SIZE') 61 | 62 | if volume_size: 63 | c.KubeSpawner.pvc_name_template = c.KubeSpawner.pod_name_template 64 | 65 | c.KubeSpawner.storage_pvc_ensure = True 66 | 67 | c.KubeSpawner.storage_capacity = volume_size 68 | 69 | c.KubeSpawner.storage_access_modes = ['ReadWriteOnce'] 70 | 71 | c.KubeSpawner.volumes.extend([ 72 | { 73 | 'name': 'data', 74 | 'persistentVolumeClaim': { 75 | 'claimName': c.KubeSpawner.pvc_name_template 76 | } 77 | } 78 | ]) 79 | 80 | c.KubeSpawner.volume_mounts.extend([ 81 | { 82 | 'name': 'data', 83 | 'mountPath': '/opt/app-root', 84 | 'subPath': 'workspace' 85 | } 86 | ]) 87 | 88 | c.KubeSpawner.init_containers.extend([ 89 | { 90 | 'name': 'setup-volume', 91 | 'image': '%s' % c.KubeSpawner.image_spec, 92 | 'command': [ 93 | '/opt/workshop/bin/setup-volume.sh', 94 | '/opt/app-root', 95 | '/mnt/workspace' 96 | ], 97 | "resources": { 98 | "limits": { 99 | "memory": os.environ.get('WORKSHOP_MEMORY', '128Mi') 100 | }, 101 | "requests": { 102 | "memory": os.environ.get('WORKSHOP_MEMORY', '128Mi') 103 | } 104 | }, 105 | 'volumeMounts': [ 106 | { 107 | 'name': 'data', 108 | 'mountPath': '/mnt' 109 | } 110 | ] 111 | } 112 | ]) 113 | 114 | # Deploy embedded web console as a separate container within the same 115 | # pod as the terminal instance. Currently use latest, but need to tie 116 | # this to the specific OpenShift version once OpenShift 4.0 is released. 117 | 118 | console_branding = os.environ.get('CONSOLE_BRANDING', 'openshift') 119 | console_image = os.environ.get('CONSOLE_IMAGE', 'quay.io/openshift/origin-console:4.1') 120 | 121 | c.KubeSpawner.extra_containers.extend([ 122 | { 123 | "name": "console", 124 | "image": console_image, 125 | "command": [ "/opt/bridge/bin/bridge" ], 126 | "env": [ 127 | { 128 | "name": "BRIDGE_K8S_MODE", 129 | "value": "in-cluster" 130 | }, 131 | { 132 | "name": "BRIDGE_LISTEN", 133 | "value": "http://0.0.0.0:10083" 134 | }, 135 | { 136 | "name": "BRIDGE_BASE_ADDRESS", 137 | "value": "%s://%s/" % (public_protocol, public_hostname) 138 | }, 139 | { 140 | "name": "BRIDGE_BASE_PATH", 141 | "value": "/user/{unescaped_username}/console/" 142 | }, 143 | { 144 | "name": "BRIDGE_PUBLIC_DIR", 145 | "value": "/opt/bridge/static" 146 | }, 147 | { 148 | "name": "BRIDGE_USER_AUTH", 149 | "value": "disabled" 150 | }, 151 | { 152 | "name": "BRIDGE_K8S_AUTH", 153 | "value": "bearer-token" 154 | }, 155 | { 156 | "name": "BRIDGE_BRANDING", 157 | "value": console_branding 158 | } 159 | ], 160 | "resources": { 161 | "limits": { 162 | "memory": os.environ.get('CONSOLE_MEMORY', '128Mi') 163 | }, 164 | "requests": { 165 | "memory": os.environ.get('CONSOLE_MEMORY', '128Mi') 166 | } 167 | } 168 | } 169 | ]) 170 | 171 | c.Spawner.environment['CONSOLE_URL'] = 'http://localhost:10083' 172 | 173 | # Pass through environment variables with remote workshop details. 174 | 175 | c.Spawner.environment['DOWNLOAD_URL'] = os.environ.get('DOWNLOAD_URL', '') 176 | c.Spawner.environment['WORKSHOP_FILE'] = os.environ.get('WORKSHOP_FILE', '') 177 | 178 | # Make modifications to pod based on user and type of session. 179 | 180 | @gen.coroutine 181 | def modify_pod_hook(spawner, pod): 182 | short_name = spawner.user.name 183 | user_account_name = '%s-%s' % (application_name, short_name) 184 | 185 | pod.spec.service_account_name = user_account_name 186 | pod.spec.automount_service_account_token = True 187 | 188 | # Grab the OpenShift user access token from the login state. 189 | 190 | auth_state = yield spawner.user.get_auth_state() 191 | access_token = auth_state['access_token'] 192 | 193 | # Ensure that a service account exists corresponding to the user. 194 | # Need to do this as it may have been cleaned up if the session had 195 | # expired and user wasn't logged out in the browser. 196 | 197 | owner_uid = yield create_service_account(spawner, pod) 198 | 199 | # If there are any exposed ports defined for the session, create 200 | # a service object mapping to the pod for the ports, and create 201 | # routes for each port. 202 | 203 | yield expose_service_ports(spawner, pod, owner_uid) 204 | 205 | # Before can continue, need to poll looking to see if the secret for 206 | # the api token has been added to the service account. If don't do 207 | # this then pod creation will fail immediately. To do this, must get 208 | # the secrets from the service account and make sure they in turn 209 | # exist. 210 | 211 | yield wait_on_service_account(user_account_name) 212 | 213 | # Set the session access token from the OpenShift login in 214 | # both the terminal and console containers. 215 | 216 | pod.spec.containers[0].env.append( 217 | dict(name='OPENSHIFT_TOKEN', value=access_token)) 218 | 219 | pod.spec.containers[-1].env.append( 220 | dict(name='BRIDGE_K8S_AUTH_BEARER_TOKEN', value=access_token)) 221 | 222 | # See if a template for the project name has been specified. 223 | # Try expanding the name, substituting the username. If the 224 | # result is different then we use it, not if it is the same 225 | # which would suggest it isn't unique. 226 | 227 | project = os.environ.get('OPENSHIFT_PROJECT') 228 | 229 | if project: 230 | name = project.format(username=spawner.user.name) 231 | if name != project: 232 | pod.spec.containers[0].env.append( 233 | dict(name='PROJECT_NAMESPACE', value=name)) 234 | 235 | # Ensure project is created if it doesn't exist. 236 | 237 | pod.spec.containers[0].env.append( 238 | dict(name='OPENSHIFT_PROJECT', value=name)) 239 | 240 | # Add environment variables for the namespace JupyterHub is running 241 | # in and its name. 242 | 243 | pod.spec.containers[0].env.append( 244 | dict(name='SPAWNER_NAMESPACE', value=namespace)) 245 | pod.spec.containers[0].env.append( 246 | dict(name='SPAWNER_APPLICATION', value=application_name)) 247 | 248 | if homeroom_link: 249 | pod.spec.containers[0].env.append( 250 | dict(name='HOMEROOM_LINK', value=homeroom_link)) 251 | 252 | return pod 253 | 254 | c.KubeSpawner.modify_pod_hook = modify_pod_hook 255 | 256 | # Setup culling of terminal instances if timeout parameter is supplied. 257 | 258 | idle_timeout = os.environ.get('IDLE_TIMEOUT') 259 | 260 | if idle_timeout and int(idle_timeout): 261 | cull_idle_servers_cmd = ['/opt/app-root/src/scripts/cull-idle-servers.sh'] 262 | 263 | cull_idle_servers_cmd.append('--timeout=%s' % idle_timeout) 264 | 265 | c.JupyterHub.services.extend([ 266 | { 267 | 'name': 'cull-idle', 268 | 'admin': True, 269 | 'command': cull_idle_servers_cmd, 270 | 'environment': dict( 271 | ENV="/opt/app-root/etc/profile", 272 | BASH_ENV="/opt/app-root/etc/profile", 273 | PROMPT_COMMAND=". /opt/app-root/etc/profile" 274 | ), 275 | } 276 | ]) 277 | 278 | # Pass through for dashboard the URL where should be redirected in order 279 | # to restart a session, with a new instance created with fresh image. 280 | 281 | c.Spawner.environment['RESTART_URL'] = '/restart' 282 | 283 | # Redirect handler for sending /restart back to home page for user. 284 | 285 | from jupyterhub.handlers import BaseHandler 286 | 287 | class RestartRedirectHandler(BaseHandler): 288 | 289 | @web.authenticated 290 | @gen.coroutine 291 | def get(self, *args): 292 | user = yield self.get_current_user() 293 | 294 | if user.running: 295 | status = yield user.spawner.poll_and_notify() 296 | if status is None: 297 | yield self.stop_single_user(user) 298 | self.clear_login_cookie() 299 | self.redirect(homeroom_link or '/hub/spawn') 300 | 301 | c.JupyterHub.extra_handlers.extend([ 302 | (r'/restart$', RestartRedirectHandler), 303 | ]) 304 | -------------------------------------------------------------------------------- /jupyterhub/src/configs/hosted-workshop.sh: -------------------------------------------------------------------------------- 1 | # From OpenShift 4.0 we need to supply separate URLs for Kubernetes 2 | # server and OAuth server. 3 | 4 | KUBERNETES_SERVER_URL="https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT" 5 | OAUTH_METADATA_URL="$KUBERNETES_SERVER_URL/.well-known/oauth-authorization-server" 6 | OAUTH_ISSUER_ADDRESS=`curl -ks $OAUTH_METADATA_URL | jq -r '.issuer'` 7 | 8 | export OPENSHIFT_URL=$OAUTH_ISSUER_ADDRESS 9 | export OPENSHIFT_REST_API_URL=$KUBERNETES_SERVER_URL 10 | export OPENSHIFT_AUTH_API_URL=$OAUTH_ISSUER_ADDRESS 11 | -------------------------------------------------------------------------------- /jupyterhub/src/configs/jumpbox-server.py: -------------------------------------------------------------------------------- 1 | # This file provides configuration specific to the 'jumpbox-server' 2 | # deployment mode. In this mode authentication for JupyterHub is done 3 | # against a KeyCloak authentication server. 4 | 5 | # Configure standalone KeyCloak as the authentication provider for 6 | # users. Environments variables have already been set from the 7 | # jumpbox-server.sh script file. 8 | 9 | c.JupyterHub.authenticator_class = "generic-oauth" 10 | 11 | c.OAuthenticator.login_service = "KeyCloak" 12 | 13 | c.OAuthenticator.oauth_callback_url = ( 14 | '%s://%s/hub/oauth_callback' % (public_protocol, public_hostname)) 15 | 16 | c.OAuthenticator.client_id = 'homeroom' 17 | c.OAuthenticator.client_secret = os.environ.get('OAUTH_CLIENT_SECRET') 18 | 19 | c.OAuthenticator.tls_verify = False 20 | 21 | c.Authenticator.auto_login = True 22 | 23 | # Enable admin access to designated users of the OpenShift cluster. 24 | 25 | c.JupyterHub.admin_access = True 26 | 27 | c.Authenticator.admin_users = set(os.environ.get('ADMIN_USERS', '').split()) 28 | 29 | # Mount config map for user provided environment variables for the 30 | # terminal and workshop. 31 | 32 | c.KubeSpawner.volumes = [ 33 | { 34 | 'name': 'envvars', 35 | 'configMap': { 36 | 'name': '%s-session-envvars' % application_name, 37 | 'defaultMode': 420 38 | } 39 | } 40 | ] 41 | 42 | c.KubeSpawner.volume_mounts = [ 43 | { 44 | 'name': 'envvars', 45 | 'mountPath': '/opt/workshop/envvars' 46 | } 47 | ] 48 | 49 | # For workshops we provide each user with a persistent volume so they 50 | # don't loose their work. This is mounted on /opt/app-root, so we need 51 | # to copy the contents from the image into the persistent volume the 52 | # first time using an init container. 53 | # 54 | # Note that if a profiles list is used, there must still be a default 55 | # terminal image setup we can use to run the init container. The image 56 | # is what contains the script which copies the file into the persistent 57 | # volume. Perhaps should use the JupyterHub image for the init container 58 | # and add the script which performs the copy to this image. 59 | 60 | volume_size = os.environ.get('VOLUME_SIZE') 61 | 62 | if volume_size: 63 | c.KubeSpawner.pvc_name_template = c.KubeSpawner.pod_name_template 64 | 65 | c.KubeSpawner.storage_pvc_ensure = True 66 | 67 | c.KubeSpawner.storage_capacity = volume_size 68 | 69 | c.KubeSpawner.storage_access_modes = ['ReadWriteOnce'] 70 | 71 | c.KubeSpawner.volumes.extend([ 72 | { 73 | 'name': 'data', 74 | 'persistentVolumeClaim': { 75 | 'claimName': c.KubeSpawner.pvc_name_template 76 | } 77 | } 78 | ]) 79 | 80 | c.KubeSpawner.volume_mounts.extend([ 81 | { 82 | 'name': 'data', 83 | 'mountPath': '/opt/app-root', 84 | 'subPath': 'workspace' 85 | } 86 | ]) 87 | 88 | c.KubeSpawner.init_containers.extend([ 89 | { 90 | 'name': 'setup-volume', 91 | 'image': '%s' % c.KubeSpawner.image_spec, 92 | 'command': [ 93 | '/opt/workshop/bin/setup-volume.sh', 94 | '/opt/app-root', 95 | '/mnt/workspace' 96 | ], 97 | "resources": { 98 | "limits": { 99 | "memory": os.environ.get('WORKSHOP_MEMORY', '128Mi') 100 | }, 101 | "requests": { 102 | "memory": os.environ.get('WORKSHOP_MEMORY', '128Mi') 103 | } 104 | }, 105 | 'volumeMounts': [ 106 | { 107 | 'name': 'data', 108 | 'mountPath': '/mnt' 109 | } 110 | ] 111 | } 112 | ]) 113 | 114 | # Pass through environment variables with remote workshop details. 115 | 116 | c.Spawner.environment['DOWNLOAD_URL'] = os.environ.get('DOWNLOAD_URL', '') 117 | c.Spawner.environment['WORKSHOP_FILE'] = os.environ.get('WORKSHOP_FILE', '') 118 | 119 | # Run as our own service account which doesn't have any access rights. 120 | 121 | c.KubeSpawner.service_account = '%s-session' % application_name 122 | 123 | # Setup culling of terminal instances if timeout parameter is supplied. 124 | 125 | idle_timeout = os.environ.get('IDLE_TIMEOUT') 126 | 127 | if idle_timeout and int(idle_timeout): 128 | cull_idle_servers_cmd = ['/opt/app-root/src/scripts/cull-idle-servers.sh'] 129 | 130 | cull_idle_servers_cmd.append('--timeout=%s' % idle_timeout) 131 | 132 | c.JupyterHub.services.extend([ 133 | { 134 | 'name': 'cull-idle', 135 | 'admin': True, 136 | 'command': cull_idle_servers_cmd, 137 | 'environment': dict( 138 | ENV="/opt/app-root/etc/profile", 139 | BASH_ENV="/opt/app-root/etc/profile", 140 | PROMPT_COMMAND=". /opt/app-root/etc/profile" 141 | ), 142 | } 143 | ]) 144 | 145 | # Pass through for dashboard the URL where should be redirected in order 146 | # to restart a session, with a new instance created with fresh image. 147 | 148 | c.Spawner.environment['RESTART_URL'] = '/restart' 149 | 150 | # Redirect handler for sending /restart back to home page for user. 151 | 152 | from tornado import web, gen 153 | 154 | from jupyterhub.handlers import BaseHandler 155 | 156 | class RestartRedirectHandler(BaseHandler): 157 | 158 | @web.authenticated 159 | @gen.coroutine 160 | def get(self, *args): 161 | user = yield self.get_current_user() 162 | if user.running: 163 | status = yield user.spawner.poll_and_notify() 164 | if status is None: 165 | yield self.stop_single_user(user) 166 | self.redirect('/hub/spawn') 167 | 168 | c.JupyterHub.extra_handlers.extend([ 169 | (r'/restart$', RestartRedirectHandler), 170 | ]) 171 | -------------------------------------------------------------------------------- /jupyterhub/src/configs/jumpbox-server.sh: -------------------------------------------------------------------------------- 1 | SERVER_URL="https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT" 2 | ACCESS_TOKEN=`cat /var/run/secrets/kubernetes.io/serviceaccount/token` 3 | NAMESPACE=`cat /var/run/secrets/kubernetes.io/serviceaccount/namespace` 4 | 5 | NAMESPACE_URL="$SERVER_URL/apis/route.openshift.io/v1/namespaces/$NAMESPACE" 6 | ROUTES_URL="$NAMESPACE_URL/routes/$APPLICATION_NAME-keycloak" 7 | 8 | KEYCLOAK_NAME="$APPLICATION_NAME-keycloak" 9 | 10 | KEYCLOAK_HOSTNAME=`curl -s -k -H "Authorization: Bearer $ACCESS_TOKEN" \ 11 | $ROUTES_URL | jq -r '.spec.host'` 12 | 13 | KEYCLOAK_REALM="homeroom" 14 | 15 | export OAUTH2_TOKEN_URL="https://$KEYCLOAK_HOSTNAME/auth/realms/$KEYCLOAK_REALM/protocol/openid-connect/token" 16 | export OAUTH2_AUTHORIZE_URL="https://$KEYCLOAK_HOSTNAME/auth/realms/$KEYCLOAK_REALM/protocol/openid-connect/auth" 17 | export OAUTH2_USERDATA_URL="https://$KEYCLOAK_HOSTNAME/auth/realms/$KEYCLOAK_REALM/protocol/openid-connect/userinfo" 18 | 19 | export OAUTH2_TLS_VERIFY="0" 20 | export OAUTH_TLS_VERIFY="0" 21 | 22 | export OAUTH2_USERNAME_KEY="preferred_username" 23 | -------------------------------------------------------------------------------- /jupyterhub/src/configs/learning-portal.py: -------------------------------------------------------------------------------- 1 | # This file provides configuration specific to the 'learning-portal' 2 | # deployment mode. In this mode, anonymous authentication is used, with 3 | # users being given their own unique service account and project to work 4 | # in. The project and service account will be deleted when the session 5 | # goes idle or the time limit for the session has expired. 6 | 7 | # Uses an anonymous authenticator. Users will be automatically assigned a 8 | # user name. If a spawner password is provided, should log in with an 9 | # email for user name and the spawner password. The email is not used as 10 | # part of the user name and a user name is still automatically assigned. 11 | # During the process of doing the psuedo authentication, create a 12 | # service account for them, where the name of service account is their 13 | # user name. The special '/restart' URL handler will cause any session 14 | # to be restarted and they will be given a new instance. 15 | 16 | import functools 17 | import random 18 | import weakref 19 | 20 | from tornado import web 21 | 22 | from jupyterhub.auth import Authenticator 23 | from jupyterhub.handlers import BaseHandler 24 | from jupyterhub.utils import url_path_join 25 | 26 | class AnonymousUser(object): 27 | 28 | def __init__(self, name): 29 | self.name = name 30 | self.active = False 31 | 32 | @functools.lru_cache(10000) 33 | def get_user_details(name): 34 | return AnonymousUser(name) 35 | 36 | random_userid_chars = 'bcdfghjklmnpqrstvwxyz0123456789' 37 | 38 | def generate_random_userid(n=5): 39 | return ''.join(random.choice(random_userid_chars) for _ in range(n)) 40 | 41 | class AutoAuthenticateHandler(BaseHandler): 42 | 43 | def initialize(self, force_new_server, process_user): 44 | super().initialize() 45 | self.force_new_server = force_new_server 46 | self.process_user = process_user 47 | 48 | def generate_user(self): 49 | while True: 50 | name = generate_random_userid() 51 | user = get_user_details(name) 52 | if not user.active: 53 | user.active = True 54 | return name 55 | 56 | @gen.coroutine 57 | def get(self): 58 | raw_user = yield self.get_current_user() 59 | 60 | if raw_user: 61 | if self.force_new_server and raw_user.running: 62 | # Stop the user's current terminal instance if it is 63 | # running so that they get a new one. Should hopefully 64 | # only end up here if have hit the /restart URL path. 65 | 66 | status = yield raw_user.spawner.poll_and_notify() 67 | if status is None: 68 | yield self.stop_single_user(raw_user) 69 | 70 | # Also force a new user name be generated so don't have 71 | # issues with browser caching web pages for anything 72 | # want to be able to change for a demo. Only way to do 73 | # this seems to be to clear the login cookie and force a 74 | # redirect back to the top of the site, hoping we do not 75 | # get into a loop. 76 | 77 | self.clear_login_cookie() 78 | return self.redirect('/') 79 | 80 | else: 81 | username = self.generate_user() 82 | raw_user = self.user_from_username(username) 83 | self.set_login_cookie(raw_user) 84 | 85 | user = yield gen.maybe_future(self.process_user(raw_user, self)) 86 | 87 | self.redirect(self.get_argument("next", user.url)) 88 | 89 | class AutoAuthenticator(Authenticator): 90 | 91 | auto_login = True 92 | login_service = 'auto' 93 | 94 | force_new_server = True 95 | 96 | def process_user(self, user, handler): 97 | return user 98 | 99 | def get_handlers(self, app): 100 | extra_settings = { 101 | 'force_new_server': self.force_new_server, 102 | 'process_user': self.process_user 103 | } 104 | return [ 105 | ('/login', AutoAuthenticateHandler, extra_settings) 106 | ] 107 | 108 | def login_url(self, base_url): 109 | return url_path_join(base_url, 'login') 110 | 111 | c.JupyterHub.authenticator_class = AutoAuthenticator 112 | 113 | named_users = weakref.WeakValueDictionary() 114 | 115 | user_count = 0 116 | 117 | def generate_sequential_userid(): 118 | global user_count 119 | user_count += 1 120 | return 'user%d' % user_count 121 | 122 | class NamedUserAuthenticator(Authenticator): 123 | password = os.environ.get('SPAWNER_PASSWORD') 124 | 125 | def generate_user(self, username): 126 | user = named_users.get(username) 127 | 128 | if user: 129 | return user.name 130 | 131 | while True: 132 | name = generate_sequential_userid() 133 | user = get_user_details(name) 134 | if not user.active: 135 | user.active = True 136 | named_users[username] = user 137 | return name 138 | 139 | @gen.coroutine 140 | def authenticate(self, handler, data): 141 | if data['username'] and self.password: 142 | if data['password'] == self.password: 143 | return self.generate_user(data['username']) 144 | 145 | if NamedUserAuthenticator.password: 146 | c.JupyterHub.authenticator_class = NamedUserAuthenticator 147 | 148 | # Mount config map for user provided environment variables for the 149 | # terminal and workshop. 150 | 151 | c.KubeSpawner.volumes = [ 152 | { 153 | 'name': 'envvars', 154 | 'configMap': { 155 | 'name': '%s-session-envvars' % application_name, 156 | 'defaultMode': 420 157 | } 158 | } 159 | ] 160 | 161 | c.KubeSpawner.volume_mounts = [ 162 | { 163 | 'name': 'envvars', 164 | 'mountPath': '/opt/workshop/envvars' 165 | } 166 | ] 167 | 168 | # Deploy embedded web console as a separate container within the same 169 | # pod as the terminal instance. Need to update this to calculate the the 170 | # specific OpenShift version. 171 | 172 | console_branding = os.environ.get('CONSOLE_BRANDING', 'openshift') 173 | console_image = os.environ.get('CONSOLE_IMAGE', 'quay.io/openshift/origin-console:4.1') 174 | 175 | c.KubeSpawner.extra_containers.extend([ 176 | { 177 | "name": "console", 178 | "image": console_image, 179 | "command": [ "/opt/bridge/bin/bridge" ], 180 | "env": [ 181 | { 182 | "name": "BRIDGE_K8S_MODE", 183 | "value": "in-cluster" 184 | }, 185 | { 186 | "name": "BRIDGE_LISTEN", 187 | "value": "http://0.0.0.0:10083" 188 | }, 189 | { 190 | "name": "BRIDGE_BASE_ADDRESS", 191 | "value": "%s://%s/" % (public_protocol, public_hostname) 192 | }, 193 | { 194 | "name": "BRIDGE_BASE_PATH", 195 | "value": "/user/{unescaped_username}/console/" 196 | }, 197 | { 198 | "name": "BRIDGE_PUBLIC_DIR", 199 | "value": "/opt/bridge/static" 200 | }, 201 | { 202 | "name": "BRIDGE_USER_AUTH", 203 | "value": "disabled" 204 | }, 205 | { 206 | "name": "BRIDGE_BRANDING", 207 | "value": console_branding 208 | } 209 | ], 210 | "resources": { 211 | "limits": { 212 | "memory": os.environ.get('CONSOLE_MEMORY', '128Mi') 213 | }, 214 | "requests": { 215 | "memory": os.environ.get('CONSOLE_MEMORY', '128Mi') 216 | } 217 | } 218 | } 219 | ]) 220 | 221 | c.Spawner.environment['CONSOLE_URL'] = 'http://localhost:10083' 222 | 223 | # Pass through environment variables with remote workshop details. 224 | 225 | c.Spawner.environment['DOWNLOAD_URL'] = os.environ.get('DOWNLOAD_URL', '') 226 | c.Spawner.environment['WORKSHOP_FILE'] = os.environ.get('WORKSHOP_FILE', '') 227 | 228 | # Pass through for dashboard the URL where should be redirected in order 229 | # to restart a session, with a new instance created with fresh image. 230 | 231 | c.Spawner.environment['RESTART_URL'] = '/restart' 232 | 233 | # Intercept creation of pod and used it to trigger our customisations. 234 | 235 | project_owner_name = '%s-spawner-extra' % application_name 236 | 237 | try: 238 | project_owner = cluster_role_resource.get(project_owner_name) 239 | 240 | except Exception as e: 241 | print('ERROR: Cannot get spawner cluster role %s. %s' % (project_owner_name, e)) 242 | raise 243 | 244 | @gen.coroutine 245 | def modify_pod_hook(spawner, pod): 246 | short_name = spawner.user.name 247 | user_account_name = '%s-%s' % (application_name, short_name) 248 | 249 | project_name = '%s-%s' % (application_name, short_name) 250 | 251 | pod.spec.automount_service_account_token = True 252 | pod.spec.service_account_name = user_account_name 253 | 254 | # Ensure that a service account exists corresponding to the user. 255 | # Need to do this as it may have been cleaned up if the session had 256 | # expired and user wasn't logged out in the browser. 257 | 258 | owner_uid = yield create_service_account(spawner, pod) 259 | 260 | # If there are any exposed ports defined for the session, create 261 | # a service object mapping to the pod for the ports, and create 262 | # routes for each port. 263 | 264 | yield expose_service_ports(spawner, pod, owner_uid) 265 | 266 | # Create a project for just this user. Poll to make sure it is 267 | # created before continue. 268 | 269 | yield create_project_namespace(spawner, pod, project_name) 270 | 271 | # Now set up the project permissions and resource budget. 272 | 273 | resource_budget = os.environ.get('RESOURCE_BUDGET', 'default') 274 | 275 | project_uid = yield setup_project_namespace(spawner, pod, project_name, 276 | 'admin', resource_budget) 277 | 278 | # Before can continue, need to poll looking to see if the secret for 279 | # the api token has been added to the service account. If don't do 280 | # this then pod creation will fail immediately. To do this, must get 281 | # the secrets from the service account and make sure they in turn 282 | # exist. 283 | 284 | yield wait_on_service_account(user_account_name) 285 | 286 | # Create any extra resources in the project required for a workshop. 287 | 288 | yield create_extra_resources(spawner, pod, project_name, project_uid, 289 | user_account_name, short_name) 290 | 291 | # Add environment variable for the project namespace for use in any 292 | # workshop content. 293 | 294 | pod.spec.containers[0].env.append( 295 | dict(name='PROJECT_NAMESPACE', value=project_name)) 296 | 297 | # Add environment variables for the namespace JupyterHub is running 298 | # in and its name. 299 | 300 | pod.spec.containers[0].env.append( 301 | dict(name='SPAWNER_NAMESPACE', value=namespace)) 302 | pod.spec.containers[0].env.append( 303 | dict(name='SPAWNER_APPLICATION', value=application_name)) 304 | 305 | if homeroom_link: 306 | pod.spec.containers[0].env.append( 307 | dict(name='HOMEROOM_LINK', value=homeroom_link)) 308 | 309 | return pod 310 | 311 | c.KubeSpawner.modify_pod_hook = modify_pod_hook 312 | 313 | # Setup culling of terminal instances when idle or session expires, as 314 | # well as setup service to clean up service accounts and projects 315 | # related to old sessions. If a server limit is defined, also cap how 316 | # many can be run. 317 | 318 | server_limit = os.environ.get('SERVER_LIMIT') 319 | 320 | if server_limit: 321 | c.JupyterHub.active_server_limit = int(server_limit) 322 | 323 | idle_timeout = os.environ.get('IDLE_TIMEOUT', '600') 324 | max_session_age = os.environ.get('MAX_SESSION_AGE') 325 | 326 | if idle_timeout and int(idle_timeout): 327 | cull_idle_servers_cmd = ['/opt/app-root/src/scripts/cull-idle-servers.sh'] 328 | 329 | cull_idle_servers_cmd.append('--cull-every=60') 330 | cull_idle_servers_cmd.append('--timeout=%s' % idle_timeout) 331 | cull_idle_servers_cmd.append('--cull-users') 332 | 333 | if max_session_age: 334 | cull_idle_servers_cmd.append('--max-age=%s' % max_session_age) 335 | 336 | c.JupyterHub.services.extend([ 337 | { 338 | 'name': 'cull-idle', 339 | 'admin': True, 340 | 'command': cull_idle_servers_cmd, 341 | 'environment': dict( 342 | ENV="/opt/app-root/etc/profile", 343 | BASH_ENV="/opt/app-root/etc/profile", 344 | PROMPT_COMMAND=". /opt/app-root/etc/profile" 345 | ), 346 | } 347 | ]) 348 | 349 | delete_projects_cmd = ['/opt/app-root/src/scripts/delete-projects.sh'] 350 | 351 | c.JupyterHub.services.extend([ 352 | { 353 | 'name': 'delete-projects', 354 | 'command': delete_projects_cmd, 355 | 'environment': dict( 356 | ENV="/opt/app-root/etc/profile", 357 | BASH_ENV="/opt/app-root/etc/profile", 358 | PROMPT_COMMAND=". /opt/app-root/etc/profile", 359 | APPLICATION_NAME=application_name, 360 | KUBERNETES_SERVICE_HOST=kubernetes_service_host, 361 | KUBERNETES_SERVICE_PORT=kubernetes_service_port 362 | ), 363 | } 364 | ]) 365 | 366 | # Redirect handler for sending /restart back to home page for user. 367 | 368 | from jupyterhub.handlers import BaseHandler 369 | 370 | class RestartRedirectHandler(BaseHandler): 371 | 372 | @web.authenticated 373 | @gen.coroutine 374 | def get(self, *args): 375 | user = yield self.get_current_user() 376 | 377 | if user.running: 378 | status = yield user.spawner.poll_and_notify() 379 | if status is None: 380 | yield self.stop_single_user(user) 381 | self.clear_login_cookie() 382 | self.redirect(homeroom_link or '/hub/spawn') 383 | 384 | c.JupyterHub.extra_handlers.extend([ 385 | (r'/restart$', RestartRedirectHandler), 386 | ]) 387 | -------------------------------------------------------------------------------- /jupyterhub/src/configs/learning-portal.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift-homeroom/workshop-spawner/2c11d6fbb62f81b7295a66976ebdc8544711f391/jupyterhub/src/configs/learning-portal.sh -------------------------------------------------------------------------------- /jupyterhub/src/configs/terminal-server.py: -------------------------------------------------------------------------------- 1 | # This file provides configuration specific to the 'terminal-server' 2 | # deployment mode. In this mode authentication for JupyterHub is done 3 | # against the OpenShift cluster using OAuth. 4 | 5 | from tornado import web 6 | 7 | # Enable the OpenShift authenticator. Environments variables have 8 | # already been set from the terminal-server.sh script file. 9 | 10 | c.JupyterHub.authenticator_class = "openshift" 11 | 12 | from oauthenticator.openshift import OpenShiftOAuthenticator 13 | OpenShiftOAuthenticator.scope = ['user:full'] 14 | 15 | client_id = '%s-console' % application_name 16 | client_secret = os.environ['OAUTH_CLIENT_SECRET'] 17 | 18 | c.OpenShiftOAuthenticator.client_id = client_id 19 | c.OpenShiftOAuthenticator.client_secret = client_secret 20 | c.Authenticator.enable_auth_state = True 21 | 22 | c.CryptKeeper.keys = [ client_secret.encode('utf-8') ] 23 | 24 | c.OpenShiftOAuthenticator.oauth_callback_url = ( 25 | '%s://%s/hub/oauth_callback' % (public_protocol, public_hostname)) 26 | 27 | c.Authenticator.auto_login = True 28 | 29 | # Enable admin access to designated users of the OpenShift cluster. 30 | 31 | c.JupyterHub.admin_access = True 32 | 33 | c.Authenticator.admin_users = set(os.environ.get('ADMIN_USERS', '').split()) 34 | 35 | # Mount config map for user provided environment variables for the 36 | # terminal and workshop. 37 | 38 | c.KubeSpawner.volumes = [ 39 | { 40 | 'name': 'envvars', 41 | 'configMap': { 42 | 'name': '%s-session-envvars' % application_name, 43 | 'defaultMode': 420 44 | } 45 | } 46 | ] 47 | 48 | c.KubeSpawner.volume_mounts = [ 49 | { 50 | 'name': 'envvars', 51 | 'mountPath': '/opt/workshop/envvars' 52 | } 53 | ] 54 | 55 | # For workshops we provide each user with a persistent volume so they 56 | # don't loose their work. This is mounted on /opt/app-root, so we need 57 | # to copy the contents from the image into the persistent volume the 58 | # first time using an init container. 59 | 60 | volume_size = os.environ.get('VOLUME_SIZE') 61 | 62 | if volume_size: 63 | c.KubeSpawner.pvc_name_template = c.KubeSpawner.pod_name_template 64 | 65 | c.KubeSpawner.storage_pvc_ensure = True 66 | 67 | c.KubeSpawner.storage_capacity = volume_size 68 | 69 | c.KubeSpawner.storage_access_modes = ['ReadWriteOnce'] 70 | 71 | c.KubeSpawner.volumes.extend([ 72 | { 73 | 'name': 'data', 74 | 'persistentVolumeClaim': { 75 | 'claimName': c.KubeSpawner.pvc_name_template 76 | } 77 | } 78 | ]) 79 | 80 | c.KubeSpawner.volume_mounts.extend([ 81 | { 82 | 'name': 'data', 83 | 'mountPath': '/opt/app-root', 84 | 'subPath': 'workspace' 85 | } 86 | ]) 87 | 88 | c.KubeSpawner.init_containers.extend([ 89 | { 90 | 'name': 'setup-volume', 91 | 'image': '%s' % c.KubeSpawner.image_spec, 92 | 'command': [ 93 | '/opt/workshop/bin/setup-volume.sh', 94 | '/opt/app-root', 95 | '/mnt/workspace' 96 | ], 97 | "resources": { 98 | "limits": { 99 | "memory": os.environ.get('WORKSHOP_MEMORY', '128Mi') 100 | }, 101 | "requests": { 102 | "memory": os.environ.get('WORKSHOP_MEMORY', '128Mi') 103 | } 104 | }, 105 | 'volumeMounts': [ 106 | { 107 | 'name': 'data', 108 | 'mountPath': '/mnt' 109 | } 110 | ] 111 | } 112 | ]) 113 | 114 | # Pass through environment variables with remote workshop details. 115 | 116 | c.Spawner.environment['DOWNLOAD_URL'] = os.environ.get('DOWNLOAD_URL', '') 117 | c.Spawner.environment['WORKSHOP_FILE'] = os.environ.get('WORKSHOP_FILE', '') 118 | 119 | # Make modifications to pod based on user and type of session. 120 | 121 | @gen.coroutine 122 | def modify_pod_hook(spawner, pod): 123 | short_name = spawner.user.name 124 | user_account_name = '%s-%s' % (application_name, short_name) 125 | 126 | pod.spec.service_account_name = user_account_name 127 | pod.spec.automount_service_account_token = True 128 | 129 | # Grab the OpenShift user access token from the login state. 130 | 131 | auth_state = yield spawner.user.get_auth_state() 132 | access_token = auth_state['access_token'] 133 | 134 | # Ensure that a service account exists corresponding to the user. 135 | # Need to do this as it may have been cleaned up if the session had 136 | # expired and user wasn't logged out in the browser. 137 | 138 | owner_uid = yield create_service_account(spawner, pod) 139 | 140 | # If there are any exposed ports defined for the session, create 141 | # a service object mapping to the pod for the ports, and create 142 | # routes for each port. 143 | 144 | yield expose_service_ports(spawner, pod, owner_uid) 145 | 146 | # Before can continue, need to poll looking to see if the secret for 147 | # the api token has been added to the service account. If don't do 148 | # this then pod creation will fail immediately. To do this, must get 149 | # the secrets from the service account and make sure they in turn 150 | # exist. 151 | 152 | yield wait_on_service_account(user_account_name) 153 | 154 | # Set the session access token from the OpenShift login in 155 | # both the terminal and console containers. 156 | 157 | pod.spec.containers[0].env.append( 158 | dict(name='OPENSHIFT_TOKEN', value=access_token)) 159 | 160 | # See if a template for the project name has been specified. 161 | # Try expanding the name, substituting the username. If the 162 | # result is different then we use it, not if it is the same 163 | # which would suggest it isn't unique. 164 | 165 | project = os.environ.get('OPENSHIFT_PROJECT') 166 | 167 | if project: 168 | name = project.format(username=spawner.user.name) 169 | if name != project: 170 | pod.spec.containers[0].env.append( 171 | dict(name='PROJECT_NAMESPACE', value=name)) 172 | 173 | # Ensure project is created if it doesn't exist. 174 | 175 | pod.spec.containers[0].env.append( 176 | dict(name='OPENSHIFT_PROJECT', value=name)) 177 | 178 | # Add environment variables for the namespace JupyterHub is running 179 | # in and its name. 180 | 181 | pod.spec.containers[0].env.append( 182 | dict(name='SPAWNER_NAMESPACE', value=namespace)) 183 | pod.spec.containers[0].env.append( 184 | dict(name='SPAWNER_APPLICATION', value=application_name)) 185 | 186 | if homeroom_link: 187 | pod.spec.containers[0].env.append( 188 | dict(name='HOMEROOM_LINK', value=homeroom_link)) 189 | 190 | return pod 191 | 192 | c.KubeSpawner.modify_pod_hook = modify_pod_hook 193 | 194 | # Setup culling of terminal instances if timeout parameter is supplied. 195 | 196 | idle_timeout = os.environ.get('IDLE_TIMEOUT') 197 | 198 | if idle_timeout and int(idle_timeout): 199 | cull_idle_servers_cmd = ['/opt/app-root/src/scripts/cull-idle-servers.sh'] 200 | 201 | cull_idle_servers_cmd.append('--timeout=%s' % idle_timeout) 202 | 203 | c.JupyterHub.services.extend([ 204 | { 205 | 'name': 'cull-idle', 206 | 'admin': True, 207 | 'command': cull_idle_servers_cmd, 208 | 'environment': dict( 209 | ENV="/opt/app-root/etc/profile", 210 | BASH_ENV="/opt/app-root/etc/profile", 211 | PROMPT_COMMAND=". /opt/app-root/etc/profile" 212 | ), 213 | } 214 | ]) 215 | 216 | # Pass through for dashboard the URL where should be redirected in order 217 | # to restart a session, with a new instance created with fresh image. 218 | 219 | c.Spawner.environment['RESTART_URL'] = '/restart' 220 | 221 | # Redirect handler for sending /restart back to home page for user. 222 | 223 | from jupyterhub.handlers import BaseHandler 224 | 225 | class RestartRedirectHandler(BaseHandler): 226 | 227 | @web.authenticated 228 | @gen.coroutine 229 | def get(self, *args): 230 | user = yield self.get_current_user() 231 | if user.running: 232 | status = yield user.spawner.poll_and_notify() 233 | if status is None: 234 | yield self.stop_single_user(user) 235 | self.redirect('/hub/spawn') 236 | 237 | c.JupyterHub.extra_handlers.extend([ 238 | (r'/restart$', RestartRedirectHandler), 239 | ]) 240 | -------------------------------------------------------------------------------- /jupyterhub/src/configs/terminal-server.sh: -------------------------------------------------------------------------------- 1 | # From OpenShift 4.0 we need to supply separate URLs for Kubernetes 2 | # server and OAuth server. 3 | 4 | KUBERNETES_SERVER_URL="https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT" 5 | OAUTH_METADATA_URL="$KUBERNETES_SERVER_URL/.well-known/oauth-authorization-server" 6 | OAUTH_ISSUER_ADDRESS=`curl -ks $OAUTH_METADATA_URL | jq -r '.issuer'` 7 | 8 | export OPENSHIFT_URL=$OAUTH_ISSUER_ADDRESS 9 | export OPENSHIFT_REST_API_URL=$KUBERNETES_SERVER_URL 10 | export OPENSHIFT_AUTH_API_URL=$OAUTH_ISSUER_ADDRESS 11 | -------------------------------------------------------------------------------- /jupyterhub/src/configs/user-workspace.py: -------------------------------------------------------------------------------- 1 | # This file provides configuration specific to the 'user-workspace' 2 | # deployment mode. In this mode authentication for JupyterHub is done 3 | # against a KeyCloak authentication server. 4 | 5 | from tornado import web 6 | 7 | # Configure standalone KeyCloak as the authentication provider for 8 | # users. Environments variables have already been set from the 9 | # user-workspace.sh script file. 10 | 11 | c.JupyterHub.authenticator_class = "generic-oauth" 12 | 13 | c.OAuthenticator.login_service = "KeyCloak" 14 | 15 | c.OAuthenticator.oauth_callback_url = ( 16 | '%s://%s/hub/oauth_callback' % (public_protocol, public_hostname)) 17 | 18 | c.OAuthenticator.client_id = 'homeroom' 19 | c.OAuthenticator.client_secret = os.environ.get('OAUTH_CLIENT_SECRET') 20 | 21 | c.OAuthenticator.tls_verify = False 22 | 23 | c.Authenticator.auto_login = True 24 | 25 | # Enable admin access to designated users of the OpenShift cluster. 26 | 27 | c.JupyterHub.admin_access = True 28 | 29 | c.Authenticator.admin_users = set(os.environ.get('ADMIN_USERS', '').split()) 30 | 31 | # Mount config map for user provided environment variables for the 32 | # terminal and workshop. 33 | 34 | c.KubeSpawner.volumes = [ 35 | { 36 | 'name': 'envvars', 37 | 'configMap': { 38 | 'name': '%s-session-envvars' % application_name, 39 | 'defaultMode': 420 40 | } 41 | } 42 | ] 43 | 44 | c.KubeSpawner.volume_mounts = [ 45 | { 46 | 'name': 'envvars', 47 | 'mountPath': '/opt/workshop/envvars' 48 | } 49 | ] 50 | 51 | # For workshops we provide each user with a persistent volume so they 52 | # don't loose their work. This is mounted on /opt/app-root, so we need 53 | # to copy the contents from the image into the persistent volume the 54 | # first time using an init container. 55 | # 56 | # Note that if a profiles list is used, there must still be a default 57 | # terminal image setup we can use to run the init container. The image 58 | # is what contains the script which copies the file into the persistent 59 | # volume. Perhaps should use the JupyterHub image for the init container 60 | # and add the script which performs the copy to this image. 61 | 62 | volume_size = os.environ.get('VOLUME_SIZE') 63 | 64 | if volume_size: 65 | c.KubeSpawner.pvc_name_template = c.KubeSpawner.pod_name_template 66 | 67 | c.KubeSpawner.storage_pvc_ensure = True 68 | 69 | c.KubeSpawner.storage_capacity = volume_size 70 | 71 | c.KubeSpawner.storage_access_modes = ['ReadWriteOnce'] 72 | 73 | c.KubeSpawner.volumes.extend([ 74 | { 75 | 'name': 'data', 76 | 'persistentVolumeClaim': { 77 | 'claimName': c.KubeSpawner.pvc_name_template 78 | } 79 | } 80 | ]) 81 | 82 | c.KubeSpawner.volume_mounts.extend([ 83 | { 84 | 'name': 'data', 85 | 'mountPath': '/opt/app-root', 86 | 'subPath': 'workspace' 87 | } 88 | ]) 89 | 90 | c.KubeSpawner.init_containers.extend([ 91 | { 92 | 'name': 'setup-volume', 93 | 'image': '%s' % c.KubeSpawner.image_spec, 94 | 'command': [ 95 | '/opt/workshop/bin/setup-volume.sh', 96 | '/opt/app-root', 97 | '/mnt/workspace' 98 | ], 99 | "resources": { 100 | "limits": { 101 | "memory": os.environ.get('WORKSHOP_MEMORY', '128Mi') 102 | }, 103 | "requests": { 104 | "memory": os.environ.get('WORKSHOP_MEMORY', '128Mi') 105 | } 106 | }, 107 | 'volumeMounts': [ 108 | { 109 | 'name': 'data', 110 | 'mountPath': '/mnt' 111 | } 112 | ] 113 | } 114 | ]) 115 | 116 | # Deploy embedded web console as a separate container within the same 117 | # pod as the terminal instance. Currently use latest, but need to tie 118 | # this to the specific OpenShift version once OpenShift 4.0 is released. 119 | 120 | console_branding = os.environ.get('CONSOLE_BRANDING', 'openshift') 121 | console_image = os.environ.get('CONSOLE_IMAGE', 'quay.io/openshift/origin-console:4.1') 122 | 123 | c.KubeSpawner.extra_containers.extend([ 124 | { 125 | "name": "console", 126 | "image": console_image, 127 | "command": [ "/opt/bridge/bin/bridge" ], 128 | "env": [ 129 | { 130 | "name": "BRIDGE_K8S_MODE", 131 | "value": "in-cluster" 132 | }, 133 | { 134 | "name": "BRIDGE_LISTEN", 135 | "value": "http://0.0.0.0:10083" 136 | }, 137 | { 138 | "name": "BRIDGE_BASE_ADDRESS", 139 | "value": "%s://%s/" % (public_protocol, public_hostname) 140 | }, 141 | { 142 | "name": "BRIDGE_BASE_PATH", 143 | "value": "/user/{unescaped_username}/console/" 144 | }, 145 | { 146 | "name": "BRIDGE_PUBLIC_DIR", 147 | "value": "/opt/bridge/static" 148 | }, 149 | { 150 | "name": "BRIDGE_USER_AUTH", 151 | "value": "disabled" 152 | }, 153 | { 154 | "name": "BRIDGE_BRANDING", 155 | "value": console_branding 156 | } 157 | ], 158 | "resources": { 159 | "limits": { 160 | "memory": os.environ.get('CONSOLE_MEMORY', '128Mi') 161 | }, 162 | "requests": { 163 | "memory": os.environ.get('CONSOLE_MEMORY', '128Mi') 164 | } 165 | } 166 | } 167 | ]) 168 | 169 | c.Spawner.environment['CONSOLE_URL'] = 'http://localhost:10083' 170 | 171 | # Pass through environment variables with remote workshop details. 172 | 173 | c.Spawner.environment['DOWNLOAD_URL'] = os.environ.get('DOWNLOAD_URL', '') 174 | c.Spawner.environment['WORKSHOP_FILE'] = os.environ.get('WORKSHOP_FILE', '') 175 | 176 | project_owner_name = '%s-spawner-extra' % application_name 177 | 178 | try: 179 | project_owner = cluster_role_resource.get(project_owner_name) 180 | 181 | except Exception as e: 182 | print('ERROR: Cannot get spawner cluster role %s. %s' % (project_owner_name, e)) 183 | raise 184 | 185 | @gen.coroutine 186 | def modify_pod_hook(spawner, pod): 187 | short_name = spawner.user.name 188 | user_account_name = '%s-%s' % (application_name, short_name) 189 | 190 | project_name = '%s-%s' % (application_name, short_name) 191 | 192 | pod.spec.automount_service_account_token = True 193 | pod.spec.service_account_name = user_account_name 194 | 195 | # Ensure that a service account exists corresponding to the user. 196 | # Need to do this as it may have been cleaned up if the session had 197 | # expired and user wasn't logged out in the browser. 198 | 199 | owner_uid = yield create_service_account(spawner, pod) 200 | 201 | # If there are any exposed ports defined for the session, create 202 | # a service object mapping to the pod for the ports, and create 203 | # routes for each port. 204 | 205 | yield expose_service_ports(spawner, pod, owner_uid) 206 | 207 | # Create a project for just this user. Poll to make sure it is 208 | # created before continue. 209 | 210 | yield create_project_namespace(spawner, pod, project_name) 211 | 212 | # Now set up the project permissions and resource budget. 213 | 214 | resource_budget = os.environ.get('RESOURCE_BUDGET', 'default') 215 | 216 | project_uid = yield setup_project_namespace(spawner, pod, project_name, 217 | 'admin', resource_budget) 218 | 219 | # Before can continue, need to poll looking to see if the secret for 220 | # the api token has been added to the service account. If don't do 221 | # this then pod creation will fail immediately. To do this, must get 222 | # the secrets from the service account and make sure they in turn 223 | # exist. 224 | 225 | yield wait_on_service_account(user_account_name) 226 | 227 | # Create any extra resources in the project required for a workshop. 228 | 229 | yield create_extra_resources(spawner, pod, project_name, project_uid, 230 | user_account_name, short_name) 231 | 232 | # Add environment variable for the project namespace for use in any 233 | # workshop content. 234 | 235 | pod.spec.containers[0].env.append( 236 | dict(name='PROJECT_NAMESPACE', value=project_name)) 237 | 238 | # Add environment variables for the namespace JupyterHub is running 239 | # in and its name. 240 | 241 | pod.spec.containers[0].env.append( 242 | dict(name='SPAWNER_NAMESPACE', value=namespace)) 243 | pod.spec.containers[0].env.append( 244 | dict(name='SPAWNER_APPLICATION', value=application_name)) 245 | 246 | if homeroom_link: 247 | pod.spec.containers[0].env.append( 248 | dict(name='HOMEROOM_LINK', value=homeroom_link)) 249 | 250 | return pod 251 | 252 | c.KubeSpawner.modify_pod_hook = modify_pod_hook 253 | 254 | # Setup culling of terminal instances if timeout parameter is supplied. 255 | 256 | idle_timeout = os.environ.get('IDLE_TIMEOUT') 257 | 258 | if idle_timeout and int(idle_timeout): 259 | cull_idle_servers_cmd = ['/opt/app-root/src/scripts/cull-idle-servers.sh'] 260 | 261 | cull_idle_servers_cmd.append('--timeout=%s' % idle_timeout) 262 | 263 | c.JupyterHub.services.extend([ 264 | { 265 | 'name': 'cull-idle', 266 | 'admin': True, 267 | 'command': cull_idle_servers_cmd, 268 | 'environment': dict( 269 | ENV="/opt/app-root/etc/profile", 270 | BASH_ENV="/opt/app-root/etc/profile", 271 | PROMPT_COMMAND=". /opt/app-root/etc/profile" 272 | ), 273 | } 274 | ]) 275 | 276 | # Pass through for dashboard the URL where should be redirected in order 277 | # to restart a session, with a new instance created with fresh image. 278 | 279 | c.Spawner.environment['RESTART_URL'] = '/restart' 280 | 281 | # Redirect handler for sending /restart back to home page for user. 282 | 283 | from jupyterhub.handlers import BaseHandler 284 | 285 | class RestartRedirectHandler(BaseHandler): 286 | 287 | @web.authenticated 288 | @gen.coroutine 289 | def get(self, *args): 290 | user = yield self.get_current_user() 291 | 292 | if user.running: 293 | status = yield user.spawner.poll_and_notify() 294 | if status is None: 295 | yield self.stop_single_user(user) 296 | self.redirect(homeroom_link or '/hub/spawn') 297 | 298 | c.JupyterHub.extra_handlers.extend([ 299 | (r'/restart$', RestartRedirectHandler), 300 | ]) 301 | -------------------------------------------------------------------------------- /jupyterhub/src/configs/user-workspace.sh: -------------------------------------------------------------------------------- 1 | SERVER_URL="https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT" 2 | ACCESS_TOKEN=`cat /var/run/secrets/kubernetes.io/serviceaccount/token` 3 | NAMESPACE=`cat /var/run/secrets/kubernetes.io/serviceaccount/namespace` 4 | 5 | NAMESPACE_URL="$SERVER_URL/apis/route.openshift.io/v1/namespaces/$NAMESPACE" 6 | ROUTES_URL="$NAMESPACE_URL/routes/$APPLICATION_NAME-keycloak" 7 | 8 | KEYCLOAK_NAME="$APPLICATION_NAME-keycloak" 9 | 10 | KEYCLOAK_HOSTNAME=`curl -s -k -H "Authorization: Bearer $ACCESS_TOKEN" \ 11 | $ROUTES_URL | jq -r '.spec.host'` 12 | 13 | KEYCLOAK_REALM="homeroom" 14 | 15 | export OAUTH2_TOKEN_URL="https://$KEYCLOAK_HOSTNAME/auth/realms/$KEYCLOAK_REALM/protocol/openid-connect/token" 16 | export OAUTH2_AUTHORIZE_URL="https://$KEYCLOAK_HOSTNAME/auth/realms/$KEYCLOAK_REALM/protocol/openid-connect/auth" 17 | export OAUTH2_USERDATA_URL="https://$KEYCLOAK_HOSTNAME/auth/realms/$KEYCLOAK_REALM/protocol/openid-connect/userinfo" 18 | 19 | export OAUTH2_TLS_VERIFY="0" 20 | export OAUTH_TLS_VERIFY="0" 21 | 22 | export OAUTH2_USERNAME_KEY="preferred_username" 23 | -------------------------------------------------------------------------------- /jupyterhub/src/images/HomeroomIcon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift-homeroom/workshop-spawner/2c11d6fbb62f81b7295a66976ebdc8544711f391/jupyterhub/src/images/HomeroomIcon.png -------------------------------------------------------------------------------- /jupyterhub/src/jupyterhub_config.sh: -------------------------------------------------------------------------------- 1 | CONFIGURATION_TYPE=${CONFIGURATION_TYPE:-hosted-workshop} 2 | 3 | if [ -f /opt/app-root/src/configs/${CONFIGURATION_TYPE}.sh ]; then 4 | . /opt/app-root/src/configs/${CONFIGURATION_TYPE}.sh 5 | fi 6 | 7 | if [ -f /opt/app-root/configs/jupyterhub_config.sh ]; then 8 | . /opt/app-root/configs/jupyterhub_config.sh 9 | fi 10 | -------------------------------------------------------------------------------- /jupyterhub/src/requirements.txt: -------------------------------------------------------------------------------- 1 | #kubernetes==9.0.1 2 | kubernetes==11.0.0 3 | jupyterhub==1.1.0 4 | jupyterhub-kubespawner==0.13.0 5 | jupyterhub-tmpauthenticator==0.6 6 | oauthenticator==0.10.0 7 | #openshift==0.9.2 8 | openshift==0.11.2 9 | wrapt==1.12.1 10 | -------------------------------------------------------------------------------- /jupyterhub/src/scripts/create-terminals.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Some bash functions for common tasks. 4 | 5 | trim() 6 | { 7 | local trimmed="$1" 8 | 9 | # Strip leading space. 10 | trimmed="${trimmed## }" 11 | # Strip trailing space. 12 | trimmed="${trimmed%% }" 13 | 14 | echo "$trimmed" 15 | } 16 | 17 | # Script can optionally be passed the arguments. If not supplied the 18 | # user will be prompted to supply them. 19 | 20 | if [ "$#" -ge 1 ]; then 21 | NUMBER_OF_USERS=$1 22 | shift 23 | else 24 | read -p "Number of users: " NUMBER_OF_USERS 25 | fi 26 | 27 | NUMBER_OF_USERS=$(trim `echo $NUMBER_OF_USERS`) 28 | 29 | if [ "$NUMBER_OF_USERS" == "" ]; then 30 | echo "ERROR: number of users cannot be empty." 31 | exit 1 32 | fi 33 | 34 | if ! [[ $NUMBER_OF_USERS =~ ^[0-9]*$ ]]; then 35 | echo "ERROR: Invalid value $NUMBER_OF_USERS." 36 | exit 1 37 | fi 38 | 39 | if [ "$#" -ge 1 ]; then 40 | ACCESS_TOKEN=$1 41 | shift 42 | else 43 | read -p "Access token: " ACCESS_TOKEN 44 | fi 45 | 46 | ACCESS_TOKEN=$(trim `echo $ACCESS_TOKEN`) 47 | 48 | if [ "$ACCESS_TOKEN" == "" ]; then 49 | echo "ERROR: access token cannot be empty." 50 | exit 1 51 | fi 52 | 53 | if [ "$#" -ge 1 ]; then 54 | APPLICATION_NAME=$1 55 | shift 56 | else 57 | read -p "Application name: " APPLICATION_NAME 58 | fi 59 | 60 | APPLICATION_NAME=$(trim `echo $APPLICATION_NAME`) 61 | 62 | if [ "$APPLICATION_NAME" == "" ]; then 63 | echo "ERROR: application name cannot be empty." 64 | exit 1 65 | fi 66 | 67 | if [ "$#" -ge 1 ]; then 68 | DO_UPDATE=y 69 | CONTINUE_PROMPT=n 70 | fi 71 | 72 | if [ x"$CONTINUE_PROMPT" != x"n" ]; then 73 | read -p "Continue? [y/N] " DO_UPDATE 74 | fi 75 | 76 | if ! [[ $DO_UPDATE =~ ^[Yy]$ ]]; then 77 | exit 1 78 | fi 79 | 80 | # Assumed that 'oc' is in the current path and that the script is being 81 | # check that we can find the deployment in the project. 82 | 83 | oc get "dc/$APPLICATION_NAME" > /dev/null 2>&1 84 | 85 | if [ "$?" != "0" ]; then 86 | echo "ERROR: Cannot find JupyterHub deployment." 87 | exit 1 88 | fi 89 | 90 | # Lookup up the URL endpoint for the JupyterHub instance. 91 | 92 | REST_API_HOST=`oc get "route/$APPLICATION_NAME" --template='{{.spec.host}}'` 93 | 94 | if [ "$?" != "0" ]; then 95 | echo "ERROR: Cannot retrieve REST API host for $APPLICATION_NAME." 96 | exit 1 97 | fi 98 | 99 | REST_API_URL="https://$REST_API_HOST/hub/api" 100 | 101 | # Create the terminal via REST API. 102 | 103 | for i in `seq 1 $NUMBER_OF_USERS`; do 104 | 105 | echo "Creating user user$i" 106 | 107 | python -c "import json; \ 108 | print(json.dumps({'usernames':['user$i']}))" > /tmp/user$$.json 109 | 110 | curl -k -H "Authorization: token $ACCESS_TOKEN" -X POST \ 111 | -d @/tmp/user$$.json "$REST_API_URL/users" 112 | 113 | rm -f /tmp/users$$.json 114 | 115 | echo "Spawn terminal for user$i" 116 | 117 | curl -k -H "Authorization: token $ACCESS_TOKEN" -X POST \ 118 | "$REST_API_URL/users/user$i/server" 119 | 120 | echo "Sleeping 3 seconds" 121 | 122 | sleep 3 123 | done 124 | 125 | echo 126 | -------------------------------------------------------------------------------- /jupyterhub/src/scripts/cull-idle-servers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """script to monitor and cull idle single-user servers 3 | 4 | Caveats: 5 | 6 | last_activity is not updated with high frequency, 7 | so cull timeout should be greater than the sum of: 8 | 9 | - single-user websocket ping interval (default: 30s) 10 | - JupyterHub.last_activity_interval (default: 5 minutes) 11 | 12 | You can run this as a service managed by JupyterHub with this in your config:: 13 | 14 | 15 | c.JupyterHub.services = [ 16 | { 17 | 'name': 'cull-idle', 18 | 'admin': True, 19 | 'command': 'python3 cull_idle_servers.py --timeout=3600'.split(), 20 | } 21 | ] 22 | 23 | Or run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`: 24 | 25 | export JUPYTERHUB_API_TOKEN=`jupyterhub token` 26 | python3 cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api] 27 | 28 | This script uses the same ``--timeout`` and ``--max-age`` values for 29 | culling users and users' servers. If you want a different value for 30 | users and servers, you should add this script to the services list 31 | twice, just with different ``name``s, different values, and one with 32 | the ``--cull-users`` option. 33 | """ 34 | 35 | from datetime import datetime, timezone 36 | from functools import partial 37 | import json 38 | import os 39 | 40 | try: 41 | from urllib.parse import quote 42 | except ImportError: 43 | from urllib import quote 44 | 45 | import dateutil.parser 46 | 47 | from tornado.gen import coroutine, multi 48 | from tornado.locks import Semaphore 49 | from tornado.log import app_log 50 | from tornado.httpclient import AsyncHTTPClient, HTTPRequest 51 | from tornado.ioloop import IOLoop, PeriodicCallback 52 | from tornado.options import define, options, parse_command_line 53 | 54 | 55 | def parse_date(date_string): 56 | """Parse a timestamp 57 | 58 | If it doesn't have a timezone, assume utc 59 | 60 | Returned datetime object will always be timezone-aware 61 | """ 62 | dt = dateutil.parser.parse(date_string) 63 | if not dt.tzinfo: 64 | # assume naïve timestamps are UTC 65 | dt = dt.replace(tzinfo=timezone.utc) 66 | return dt 67 | 68 | 69 | def format_td(td): 70 | """ 71 | Nicely format a timedelta object 72 | 73 | as HH:MM:SS 74 | """ 75 | if td is None: 76 | return "unknown" 77 | if isinstance(td, str): 78 | return td 79 | seconds = int(td.total_seconds()) 80 | h = seconds // 3600 81 | seconds = seconds % 3600 82 | m = seconds // 60 83 | seconds = seconds % 60 84 | return "{h:02}:{m:02}:{seconds:02}".format(h=h, m=m, seconds=seconds) 85 | 86 | 87 | @coroutine 88 | def cull_idle(url, api_token, inactive_limit, cull_users=False, max_age=0, concurrency=10): 89 | """Shutdown idle single-user servers 90 | 91 | If cull_users, inactive *users* will be deleted as well. 92 | """ 93 | auth_header = { 94 | 'Authorization': 'token %s' % api_token, 95 | } 96 | req = HTTPRequest( 97 | url=url + '/users', 98 | headers=auth_header, 99 | ) 100 | now = datetime.now(timezone.utc) 101 | client = AsyncHTTPClient() 102 | 103 | if concurrency: 104 | semaphore = Semaphore(concurrency) 105 | @coroutine 106 | def fetch(req): 107 | """client.fetch wrapped in a semaphore to limit concurrency""" 108 | yield semaphore.acquire() 109 | try: 110 | return (yield client.fetch(req)) 111 | finally: 112 | yield semaphore.release() 113 | else: 114 | fetch = client.fetch 115 | 116 | resp = yield fetch(req) 117 | users = json.loads(resp.body.decode('utf8', 'replace')) 118 | futures = [] 119 | 120 | @coroutine 121 | def handle_server(user, server_name, server): 122 | """Handle (maybe) culling a single server 123 | 124 | Returns True if server is now stopped (user removable), 125 | False otherwise. 126 | """ 127 | log_name = user['name'] 128 | if server_name: 129 | log_name = '%s/%s' % (user['name'], server_name) 130 | if server.get('pending'): 131 | app_log.warning( 132 | "Not culling server %s with pending %s", 133 | log_name, server['pending']) 134 | return False 135 | 136 | # jupyterhub < 0.9 defined 'server.url' once the server was ready 137 | # as an *implicit* signal that the server was ready. 138 | # 0.9 adds a dedicated, explicit 'ready' field. 139 | # By current (0.9) definitions, servers that have no pending 140 | # events and are not ready shouldn't be in the model, 141 | # but let's check just to be safe. 142 | 143 | if not server.get('ready', bool(server['url'])): 144 | app_log.warning( 145 | "Not culling not-ready not-pending server %s: %s", 146 | log_name, server) 147 | return False 148 | 149 | if server.get('started'): 150 | age = now - parse_date(server['started']) 151 | else: 152 | # started may be undefined on jupyterhub < 0.9 153 | age = None 154 | 155 | # check last activity 156 | # last_activity can be None in 0.9 157 | if server['last_activity']: 158 | inactive = now - parse_date(server['last_activity']) 159 | else: 160 | # no activity yet, use start date 161 | # last_activity may be None with jupyterhub 0.9, 162 | # which introduces the 'started' field which is never None 163 | # for running servers 164 | inactive = age 165 | 166 | should_cull = (inactive is not None and 167 | inactive.total_seconds() >= inactive_limit) 168 | if should_cull: 169 | app_log.info( 170 | "Culling server %s (inactive for %s)", 171 | log_name, format_td(inactive)) 172 | 173 | if max_age and not should_cull: 174 | # only check started if max_age is specified 175 | # so that we can still be compatible with jupyterhub 0.8 176 | # which doesn't define the 'started' field 177 | if age is not None and age.total_seconds() >= max_age: 178 | app_log.info( 179 | "Culling server %s (age: %s, inactive for %s)", 180 | log_name, format_td(age), format_td(inactive)) 181 | should_cull = True 182 | 183 | if not should_cull: 184 | app_log.debug( 185 | "Not culling server %s (age: %s, inactive for %s)", 186 | log_name, format_td(age), format_td(inactive)) 187 | return False 188 | 189 | req = HTTPRequest( 190 | url=url + '/users/%s/server' % quote(user['name']), 191 | method='DELETE', 192 | headers=auth_header, 193 | ) 194 | resp = yield fetch(req) 195 | if resp.code == 202: 196 | app_log.warning( 197 | "Server %s is slow to stop", 198 | log_name, 199 | ) 200 | # return False to prevent culling user with pending shutdowns 201 | return False 202 | return True 203 | 204 | @coroutine 205 | def handle_user(user): 206 | """Handle one user. 207 | 208 | Create a list of their servers, and async exec them. Wait for 209 | that to be done, and if all servers are stopped, possibly cull 210 | the user. 211 | """ 212 | # shutdown servers first. 213 | # Hub doesn't allow deleting users with running servers. 214 | # jupyterhub 0.9 always provides a 'servers' model. 215 | # 0.8 only does this when named servers are enabled. 216 | if 'servers' in user: 217 | servers = user['servers'] 218 | else: 219 | # jupyterhub < 0.9 without named servers enabled. 220 | # create servers dict with one entry for the default server 221 | # from the user model. 222 | # only if the server is running. 223 | servers = {} 224 | if user['server']: 225 | servers[''] = { 226 | 'last_activity': user['last_activity'], 227 | 'pending': user['pending'], 228 | 'url': user['server'], 229 | } 230 | server_futures = [ 231 | handle_server(user, server_name, server) 232 | for server_name, server in servers.items() 233 | ] 234 | results = yield multi(server_futures) 235 | if not cull_users: 236 | return 237 | # some servers are still running, cannot cull users 238 | still_alive = len(results) - sum(results) 239 | if still_alive: 240 | app_log.debug( 241 | "Not culling user %s with %i servers still alive", 242 | user['name'], still_alive) 243 | return False 244 | 245 | should_cull = False 246 | if user.get('created'): 247 | age = now - parse_date(user['created']) 248 | else: 249 | # created may be undefined on jupyterhub < 0.9 250 | age = None 251 | 252 | # check last activity 253 | # last_activity can be None in 0.9 254 | if user['last_activity']: 255 | inactive = now - parse_date(user['last_activity']) 256 | else: 257 | # no activity yet, use start date 258 | # last_activity may be None with jupyterhub 0.9, 259 | # which introduces the 'created' field which is never None 260 | inactive = age 261 | 262 | should_cull = (inactive is not None and 263 | inactive.total_seconds() >= inactive_limit) 264 | if should_cull: 265 | app_log.info( 266 | "Culling user %s (inactive for %s)", 267 | user['name'], inactive) 268 | 269 | if max_age and not should_cull: 270 | # only check created if max_age is specified 271 | # so that we can still be compatible with jupyterhub 0.8 272 | # which doesn't define the 'started' field 273 | if age is not None and age.total_seconds() >= max_age: 274 | app_log.info( 275 | "Culling user %s (age: %s, inactive for %s)", 276 | user['name'], format_td(age), format_td(inactive)) 277 | should_cull = True 278 | 279 | if not should_cull: 280 | app_log.debug( 281 | "Not culling user %s (created: %s, last active: %s)", 282 | user['name'], format_td(age), format_td(inactive)) 283 | return False 284 | 285 | req = HTTPRequest( 286 | url=url + '/users/%s' % user['name'], 287 | method='DELETE', 288 | headers=auth_header, 289 | ) 290 | yield fetch(req) 291 | return True 292 | 293 | for user in users: 294 | futures.append((user['name'], handle_user(user))) 295 | 296 | for (name, f) in futures: 297 | try: 298 | result = yield f 299 | except Exception: 300 | app_log.exception("Error processing %s", name) 301 | else: 302 | if result: 303 | app_log.debug("Finished culling %s", name) 304 | 305 | 306 | if __name__ == '__main__': 307 | define( 308 | 'url', 309 | default=os.environ.get('JUPYTERHUB_API_URL'), 310 | help="The JupyterHub API URL", 311 | ) 312 | define('timeout', default=600, help="The idle timeout (in seconds)") 313 | define('cull_every', default=0, 314 | help="The interval (in seconds) for checking for idle servers to cull") 315 | define('max_age', default=0, 316 | help="The maximum age (in seconds) of servers that should be culled even if they are active") 317 | define('cull_users', default=False, 318 | help="""Cull users in addition to servers. 319 | This is for use in temporary-user cases such as tmpnb.""", 320 | ) 321 | define('concurrency', default=10, 322 | help="""Limit the number of concurrent requests made to the Hub. 323 | 324 | Deleting a lot of users at the same time can slow down the Hub, 325 | so limit the number of API requests we have outstanding at any given time. 326 | """ 327 | ) 328 | 329 | parse_command_line() 330 | if not options.cull_every: 331 | options.cull_every = options.timeout // 2 332 | api_token = os.environ['JUPYTERHUB_API_TOKEN'] 333 | 334 | try: 335 | AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") 336 | except ImportError as e: 337 | app_log.warning( 338 | "Could not load pycurl: %s\n" 339 | "pycurl is recommended if you have a large number of users.", 340 | e) 341 | 342 | loop = IOLoop.current() 343 | cull = partial( 344 | cull_idle, 345 | url=options.url, 346 | api_token=api_token, 347 | inactive_limit=options.timeout, 348 | cull_users=options.cull_users, 349 | max_age=options.max_age, 350 | concurrency=options.concurrency, 351 | ) 352 | # schedule first cull immediately 353 | # because PeriodicCallback doesn't start until the end of the first interval 354 | loop.add_callback(cull) 355 | # schedule periodic cull 356 | pc = PeriodicCallback(cull, 1e3 * options.cull_every) 357 | pc.start() 358 | try: 359 | loop.start() 360 | except KeyboardInterrupt: 361 | pass 362 | -------------------------------------------------------------------------------- /jupyterhub/src/scripts/cull-idle-servers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | exec python `dirname $0`/cull-idle-servers.py "$@" 4 | -------------------------------------------------------------------------------- /jupyterhub/src/scripts/delete-projects.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import time 3 | import os 4 | 5 | from collections import namedtuple 6 | 7 | from kubernetes.client.rest import ApiException 8 | 9 | from kubernetes.client.configuration import Configuration 10 | from kubernetes.config.incluster_config import load_incluster_config 11 | from kubernetes.client.api_client import ApiClient 12 | from openshift.dynamic import DynamicClient, Resource 13 | 14 | service_account_path = '/var/run/secrets/kubernetes.io/serviceaccount' 15 | 16 | with open(os.path.join(service_account_path, 'namespace')) as fp: 17 | namespace = fp.read().strip() 18 | 19 | with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace') as fp: 20 | namespace = fp.read().strip() 21 | 22 | workshop_name = os.environ.get('WORKSHOP_NAME') 23 | 24 | application_name = os.environ.get('APPLICATION_NAME') 25 | 26 | if not application_name: 27 | workshop_name = application_name 28 | 29 | if not workshop_name: 30 | workshop_name = 'homeroom' 31 | 32 | service_account_name = '%s-spawner' % application_name 33 | 34 | full_service_account_name = 'system:serviceaccount:%s:%s' % (namespace, 35 | service_account_name) 36 | 37 | load_incluster_config() 38 | 39 | import urllib3 40 | urllib3.disable_warnings() 41 | instance = Configuration() 42 | instance.verify_ssl = False 43 | Configuration.set_default(instance) 44 | 45 | api_client = DynamicClient(ApiClient()) 46 | 47 | pod_resource = api_client.resources.get( 48 | api_version='v1', kind='Pod') 49 | 50 | service_account_resource = api_client.resources.get( 51 | api_version='v1', kind='ServiceAccount') 52 | 53 | namespace_resource = api_client.resources.get( 54 | api_version='v1', kind='Namespace') 55 | 56 | role_binding_resource = api_client.resources.get( 57 | api_version='rbac.authorization.k8s.io/v1', kind='RoleBinding') 58 | 59 | project_cache = {} 60 | account_cache = {} 61 | orphan_cache = {} 62 | 63 | Namespace = namedtuple('Namespace', ['name', 'account', 'pod']) 64 | 65 | def get_projects(): 66 | project_details = [] 67 | 68 | try: 69 | projects = namespace_resource.get(namespace=namespace) 70 | 71 | for project in projects.items: 72 | annotations = project.metadata.annotations 73 | if annotations: 74 | if (annotations['spawner/requestor'] == full_service_account_name and 75 | annotations['spawner/namespace'] == namespace and 76 | annotations['spawner/deployment'] == application_name): 77 | project_details.append(Namespace(project.metadata.name, 78 | annotations['spawner/account'], 79 | annotations['spawner/session'])) 80 | 81 | except Exception as e: 82 | print('ERROR: failed to list projects:', e) 83 | 84 | return project_details 85 | 86 | def get_accounts(): 87 | account_details = [] 88 | 89 | try: 90 | accounts = service_account_resource.get(namespace=namespace) 91 | 92 | for account in accounts.items: 93 | labels = account.metadata.labels 94 | application_label = labels and labels['app'] 95 | if application_label == application_name and labels['user']: 96 | account_details.append(account) 97 | 98 | except Exception as e: 99 | print('ERROR: failed to list accounts:', e) 100 | 101 | return account_details 102 | 103 | def pod_exists(name): 104 | try: 105 | pod_resource.get(namespace=namespace, name=name) 106 | 107 | return True 108 | 109 | except ApiException as e: 110 | if e.status != 404: 111 | print('ERROR: failed to lookup pod %s:' % name, e) 112 | 113 | except Exception as e: 114 | print('ERROR: failed to lookup pod %s:' % name, e) 115 | 116 | return False 117 | 118 | def namespaced_resources(): 119 | api_groups = api_client.resources.parse_api_groups() 120 | 121 | for api in api_groups.values(): 122 | for domain, items in api.items(): 123 | for version, group in items.items(): 124 | try: 125 | for kind in group.resources: 126 | if domain: 127 | version = '%s/%s' % (domain, version) 128 | resource = api_client.resources.get(api_version=version, kind=kind) 129 | if type(resource) == Resource and resource.namespaced: 130 | yield resource 131 | except Exception: 132 | pass 133 | 134 | def purge_project(name): 135 | for resource_type in namespaced_resources(): 136 | try: 137 | objects = resource_type.get(namespace=name) 138 | for obj in objects.items: 139 | if obj.metadata.deletionTimestamp and obj.metadata.finalizers: 140 | # Since the project is stuck in terminating, we 141 | # remove any finalizers which might be blocking 142 | # it. Finalizers can be left around with nothing 143 | # to remove them because there is no gaurantee 144 | # what order resources will be deleted when a 145 | # project is deleted. Thus an application, for 146 | # example an operator which would remove the 147 | # finalizer when a CRD is deleted, might get 148 | # deleted before the objects with the finalizer, 149 | # and so the objects can't then be deleted. 150 | 151 | body = { 152 | 'kind': obj.kind, 153 | 'apiVersion': obj.apiVersion, 154 | 'metadata': { 155 | 'name': obj.metadata.name, 156 | 'finalizers': None 157 | } 158 | } 159 | 160 | print('WARNING: deleting finalizers on resource: %s' % body) 161 | 162 | try: 163 | resource_type.patch(namespace=name, body=body, 164 | content_type='application/merge-patch+json') 165 | 166 | except ApiException as e: 167 | print('ERROR: failed to delete finalizers: %s' % body, e) 168 | 169 | except Exception as e: 170 | print('ERROR: failed to delete finalizers: %s' % body, e) 171 | 172 | except ApiException as e: 173 | if e.status not in (403, 404, 405): 174 | print('ERROR: failed to query resources %s' % resource_type, e) 175 | 176 | except Exception as e: 177 | print('ERROR: failed to query resources %s' % resource_type, e) 178 | 179 | pass 180 | 181 | def delete_project(name): 182 | try: 183 | namespace_resource.delete(name=name) 184 | 185 | print('INFO: deleted project %s' % name) 186 | 187 | except ApiException as e: 188 | if e.status == 409: 189 | print('WARNING: project %s is still terminating' % name) 190 | purge_project(name) 191 | elif e.status != 404: 192 | print('ERROR: failed to delete project %s:' % name, e) 193 | else: 194 | print('INFO: project %s already deleted' % name) 195 | 196 | except Exception as e: 197 | print('ERROR: failed to delete project %s:' % name, e) 198 | 199 | def delete_account(name): 200 | try: 201 | service_account_resource.delete(namespace=namespace, name=name) 202 | print('INFO: deleted account %s' % name) 203 | 204 | except ApiException as e: 205 | if e.status != 404: 206 | print('ERROR: failed to delete account %s:' % name, e) 207 | else: 208 | print('INFO: account %s already deleted' % name) 209 | 210 | except Exception as e: 211 | print('ERROR: failed to delete account %s:' % name, e) 212 | 213 | def purge(): 214 | now = time.time() 215 | 216 | projects = get_projects() 217 | 218 | if projects: 219 | print('INFO: checking for projects to be deleted: %s' % projects) 220 | 221 | for project in projects: 222 | if not project in project_cache: 223 | project_cache[project] = now 224 | 225 | account_cache.setdefault(project.account, set()).add(project) 226 | 227 | for project in projects: 228 | if pod_exists(project.pod): 229 | project_cache[project] = now 230 | 231 | for project, last_seen in list(project_cache.items()): 232 | if now - last_seen > 90.0: 233 | account_cache[project.account].remove(project) 234 | 235 | if not account_cache[project.account]: 236 | delete_account(project.account) 237 | 238 | del account_cache[project.account] 239 | 240 | delete_project(project.name) 241 | 242 | del project_cache[project] 243 | 244 | accounts = get_accounts() 245 | 246 | for account in accounts: 247 | name = account.metadata.name 248 | if not name in account_cache: 249 | if not name in orphan_cache: 250 | orphan_cache[name] = now 251 | 252 | for name, last_seen in list(orphan_cache.items()): 253 | if name in account_cache: 254 | del orphan_cache[name] 255 | 256 | elif now - last_seen > 90.0: 257 | delete_account(name) 258 | 259 | del orphan_cache[name] 260 | 261 | def loop(): 262 | while True: 263 | try: 264 | purge() 265 | except Exception as e: 266 | print('ERROR: unexpected exception:', e) 267 | pass 268 | 269 | time.sleep(60.0) 270 | 271 | thread = threading.Thread(target=loop) 272 | thread.set_daemon = True 273 | thread.start() 274 | 275 | thread.join() 276 | -------------------------------------------------------------------------------- /jupyterhub/src/scripts/delete-projects.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | exec python `dirname $0`/delete-projects.py 4 | -------------------------------------------------------------------------------- /keycloak/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jboss/keycloak:7.0.1 2 | 3 | COPY realm.json start-keycloak.sh / 4 | 5 | ENTRYPOINT [ "/start-keycloak.sh" ] 6 | -------------------------------------------------------------------------------- /keycloak/start-keycloak.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | SERVER_URL="https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT" 6 | TOKEN=`cat /var/run/secrets/kubernetes.io/serviceaccount/token` 7 | NAMESPACE=`cat /var/run/secrets/kubernetes.io/serviceaccount/namespace` 8 | 9 | NAMESPACE_URL="$SERVER_URL/apis/route.openshift.io/v1/namespaces/$NAMESPACE" 10 | ROUTES_URL="$NAMESPACE_URL/routes/$SPAWNER_APPLICATION" 11 | 12 | SPAWNER_HOSTNAME=`curl -s -k -H "Authorization: Bearer $TOKEN" $ROUTES_URL | \ 13 | grep '"host"' | head -1 | sed -e 's/^.*host": "//' -e 's/".*//'` 14 | 15 | KEYCLOAK_ARGS= 16 | 17 | cat /realm.json | sed \ 18 | -e "s/{{ *SPAWNER_HOSTNAME *}}/$SPAWNER_HOSTNAME/g" \ 19 | -e "s/{{ *OAUTH_CLIENT_SECRET *}}/$OAUTH_CLIENT_SECRET/g" > /tmp/realm.json 20 | 21 | KEYCLOAK_ARGS="$KEYCLOAK_ARGS -Dkeycloak.import=/tmp/realm.json" 22 | KEYCLOAK_ARGS="$KEYCLOAK_ARGS -Dkeycloak.migration.strategy=IGNORE_EXISTING" 23 | 24 | exec /opt/jboss/tools/docker-entrypoint.sh $KEYCLOAK_ARGS "$@" 25 | -------------------------------------------------------------------------------- /resources/bases/spawner-resources/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | commonLabels: 5 | class: spawner 6 | 7 | resources: 8 | - spawner-service-account.yaml 9 | - spawner-basic-role-binding.yaml 10 | - spawner-environ-config-map.yaml 11 | - spawner-configs-config-map.yaml 12 | - session-envvars-config-map.yaml 13 | - spawner-deployment.yaml 14 | - spawner-service.yaml 15 | - spawner-ingress.yaml 16 | 17 | images: 18 | - name: workshop-spawner 19 | newName: quay.io/openshifthomeroom/workshop-spawner 20 | newTag: latest 21 | -------------------------------------------------------------------------------- /resources/bases/spawner-resources/session-envvars-config-map.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: session-envvars 5 | data: 6 | gateway.sh: "" 7 | terminal.sh: "" 8 | workshop.sh: "" 9 | -------------------------------------------------------------------------------- /resources/bases/spawner-resources/spawner-basic-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: spawner-basic 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: edit 9 | subjects: 10 | - kind: ServiceAccount 11 | name: spawner 12 | -------------------------------------------------------------------------------- /resources/bases/spawner-resources/spawner-configs-config-map.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: spawner-configs 5 | data: 6 | jupyterhub_config.sh: "" 7 | jupyterhub_config.py: "" 8 | -------------------------------------------------------------------------------- /resources/bases/spawner-resources/spawner-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: spawner 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | deployment: spawner 10 | strategy: 11 | type: Recreate 12 | template: 13 | metadata: 14 | labels: 15 | deployment: spawner 16 | spec: 17 | serviceAccountName: spawner 18 | containers: 19 | - name: spawner 20 | image: workshop-spawner 21 | envFrom: 22 | - configMapRef: 23 | name: spawner-environ 24 | ports: 25 | - containerPort: 8080 26 | protocol: TCP 27 | resources: 28 | limits: 29 | memory: 768Mi 30 | requests: 31 | memory: 768Mi 32 | volumeMounts: 33 | - mountPath: /opt/app-root/configs 34 | name: configs 35 | volumes: 36 | - name: configs 37 | configMap: 38 | defaultMode: 420 39 | name: spawner-configs 40 | -------------------------------------------------------------------------------- /resources/bases/spawner-resources/spawner-environ-config-map.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: spawner-environ 5 | data: 6 | TERMINAL_IMAGE: quay.io/openshifthomeroom/workshop-terminal:latest 7 | -------------------------------------------------------------------------------- /resources/bases/spawner-resources/spawner-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: spawner 5 | annotations: 6 | homeroom/group: homeroom 7 | spec: 8 | rules: 9 | - host: spawner.svc 10 | http: 11 | paths: 12 | - path: "/" 13 | backend: 14 | serviceName: spawner 15 | servicePort: 8080 16 | -------------------------------------------------------------------------------- /resources/bases/spawner-resources/spawner-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: spawner 5 | -------------------------------------------------------------------------------- /resources/bases/spawner-resources/spawner-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: spawner 5 | spec: 6 | ports: 7 | - name: 8080-tcp 8 | port: 8080 9 | protocol: TCP 10 | targetPort: 8080 11 | - name: 8081-tcp 12 | port: 8081 13 | protocol: TCP 14 | targetPort: 8081 15 | selector: 16 | deployment: spawner 17 | -------------------------------------------------------------------------------- /resources/overlays/hosted-workshop/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | bases: 5 | - ../../bases/spawner-resources 6 | 7 | commonLabels: 8 | app: hosted-workshop 9 | spawner: hosted-workshop 10 | 11 | resources: 12 | - spawner-console-oauth-client.yaml 13 | - spawner-data-persistent-volume-claim.yaml 14 | 15 | patchesJson6902: 16 | - path: spawner-deployment-patch.yaml 17 | target: 18 | group: apps 19 | version: v1 20 | kind: Deployment 21 | name: spawner 22 | 23 | configMapGenerator: 24 | - name: spawner-environ 25 | behavior: merge 26 | literals: 27 | - CONFIGURATION_TYPE="hosted-workshop" 28 | - OAUTH_CLIENT_SECRET="0123456789abcdef0123456789abcdef" 29 | - RESOURCE_BUDGET="default" 30 | - IDLE_TIMEOUT="7200" 31 | -------------------------------------------------------------------------------- /resources/overlays/hosted-workshop/spawner-console-oauth-client.yaml: -------------------------------------------------------------------------------- 1 | kind: OAuthClient 2 | apiVersion: oauth.openshift.io/v1 3 | metadata: 4 | name: console 5 | secret: 0123456789abcdef0123456789abcdef 6 | grantMethod: auto 7 | redirectURIs: 8 | - http://spawner.svc/hub/oauth_callback 9 | -------------------------------------------------------------------------------- /resources/overlays/hosted-workshop/spawner-data-persistent-volume-claim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: spawner-data 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 1Gi 11 | -------------------------------------------------------------------------------- /resources/overlays/hosted-workshop/spawner-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /spec/template/spec/containers/0/volumeMounts/- 3 | value: 4 | mountPath: /opt/app-root/data 5 | name: data 6 | - op: add 7 | path: /spec/template/spec/volumes/- 8 | value: 9 | name: data 10 | persistentVolumeClaim: 11 | claimName: spawner-data 12 | -------------------------------------------------------------------------------- /resources/overlays/learning-portal/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | bases: 5 | - ../../bases/spawner-resources 6 | 7 | commonLabels: 8 | app: learning-portal 9 | spawner: learning-portal 10 | 11 | resources: 12 | - spawner-basic-cluster-role-binding.yaml 13 | - spawner-extra-cluster-role.yaml 14 | - spawner-extra-cluster-role-binding.yaml 15 | - spawner-rules-cluster-role.yaml 16 | - spawner-rules-cluster-role-binding.yaml 17 | - session-rules-cluster-role.yaml 18 | - session-rules-cluster-role-binding.yaml 19 | - session-resources-config-map.yaml 20 | 21 | patchesJson6902: 22 | - path: spawner-deployment-patch.yaml 23 | target: 24 | group: apps 25 | version: v1 26 | kind: Deployment 27 | name: spawner 28 | 29 | configMapGenerator: 30 | - name: spawner-environ 31 | behavior: merge 32 | literals: 33 | - CONFIGURATION_TYPE="learning-portal" 34 | - SERVER_LIMIT="8" 35 | - RESOURCE_BUDGET="default" 36 | -------------------------------------------------------------------------------- /resources/overlays/learning-portal/session-resources-config-map.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: session-resources 5 | data: 6 | extra_resources.yaml: "" 7 | -------------------------------------------------------------------------------- /resources/overlays/learning-portal/session-rules-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: session-rules 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: session-rules 9 | subjects: 10 | - kind: ServiceAccount 11 | name: spawner 12 | -------------------------------------------------------------------------------- /resources/overlays/learning-portal/session-rules-cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: session-rules 5 | rules: [] 6 | -------------------------------------------------------------------------------- /resources/overlays/learning-portal/spawner-basic-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: spawner-basic 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: spawner 12 | -------------------------------------------------------------------------------- /resources/overlays/learning-portal/spawner-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /spec/template/spec/containers/0/volumeMounts/- 3 | value: 4 | mountPath: /opt/app-root/resources 5 | name: resources 6 | - op: add 7 | path: /spec/template/spec/volumes/- 8 | value: 9 | name: resources 10 | configMap: 11 | defaultMode: 420 12 | name: session-resources 13 | -------------------------------------------------------------------------------- /resources/overlays/learning-portal/spawner-extra-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: spawner-extra 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: spawner-extra 9 | subjects: 10 | - kind: ServiceAccount 11 | name: spawner 12 | -------------------------------------------------------------------------------- /resources/overlays/learning-portal/spawner-extra-cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: spawner-extra 5 | rules: 6 | - apiGroups: 7 | - rbac.authorization.k8s.io 8 | resourceNames: 9 | - view 10 | - edit 11 | - admin 12 | resources: 13 | - clusterroles 14 | - roles 15 | verbs: 16 | - bind 17 | - apiGroups: 18 | - rbac.authorization.k8s.io 19 | resources: 20 | - clusterroles 21 | - clusterrolebindings 22 | - rolebindings 23 | verbs: 24 | - create 25 | - delete 26 | - deletecollection 27 | - get 28 | - list 29 | - patch 30 | - update 31 | - watch 32 | - apiGroups: 33 | - "" 34 | resources: 35 | - limitranges 36 | - resourcequotas 37 | verbs: 38 | - create 39 | - delete 40 | - deletecollection 41 | - get 42 | - list 43 | - patch 44 | - update 45 | - watch 46 | - apiGroups: 47 | - "" 48 | resources: 49 | - namespaces 50 | verbs: 51 | - create 52 | - delete 53 | - deletecollection 54 | - get 55 | - list 56 | - patch 57 | - update 58 | - watch 59 | -------------------------------------------------------------------------------- /resources/overlays/learning-portal/spawner-rules-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: spawner-rules 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: spawner-rules 9 | subjects: 10 | - kind: ServiceAccount 11 | name: spawner 12 | -------------------------------------------------------------------------------- /resources/overlays/learning-portal/spawner-rules-cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: spawner-rules 5 | rules: [] 6 | -------------------------------------------------------------------------------- /templates/hosted-workshop-development.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Template", 3 | "apiVersion": "v1", 4 | "metadata": { 5 | "name": "hosted-workshop-development", 6 | "annotations": { 7 | "openshift.io/display-name": "Hosted Workshop (Development)" 8 | } 9 | }, 10 | "parameters": [ 11 | { 12 | "name": "SPAWNER_NAMESPACE", 13 | "value": "", 14 | "required": true 15 | }, 16 | { 17 | "name": "CLUSTER_SUBDOMAIN", 18 | "value": "", 19 | "required": true 20 | }, 21 | { 22 | "name": "WORKSHOP_NAME", 23 | "value": "hosted-workshop", 24 | "required": true 25 | }, 26 | { 27 | "name": "NAME_PREFIX", 28 | "value": "", 29 | "required": false 30 | }, 31 | { 32 | "name": "HOMEROOM_NAME", 33 | "value": "" 34 | }, 35 | { 36 | "name": "HOMEROOM_LINK", 37 | "value": "" 38 | }, 39 | { 40 | "name": "WORKSHOP_TITLE", 41 | "value": "" 42 | }, 43 | { 44 | "name": "WORKSHOP_DESCRIPTION", 45 | "value": "" 46 | }, 47 | { 48 | "name": "GIT_REPOSITORY_URL", 49 | "value": "https://github.com/openshift-homeroom/workshop-spawner.git", 50 | "required": true 51 | }, 52 | { 53 | "name": "GIT_REPOSITORY_REF", 54 | "value": "develop", 55 | "required": true 56 | }, 57 | { 58 | "name": "SPAWNER_MEMORY", 59 | "value": "768Mi" 60 | }, 61 | { 62 | "name": "WORKSHOP_MEMORY", 63 | "value": "512Mi" 64 | }, 65 | { 66 | "name": "CONSOLE_MEMORY", 67 | "value": "128Mi" 68 | }, 69 | { 70 | "name": "CONSOLE_BRANDING", 71 | "value": "openshift" 72 | }, 73 | { 74 | "name": "CONSOLE_IMAGE", 75 | "value": "quay.io/openshift/origin-console:4.2", 76 | "required": true 77 | }, 78 | { 79 | "name": "VOLUME_SIZE", 80 | "value": "" 81 | }, 82 | { 83 | "name": "ADMIN_USERS", 84 | "value": "" 85 | }, 86 | { 87 | "name": "OPENSHIFT_PROJECT", 88 | "value": "" 89 | }, 90 | { 91 | "name": "IDLE_TIMEOUT", 92 | "value": "7200" 93 | }, 94 | { 95 | "name": "WORKSHOP_IMAGE", 96 | "value": "", 97 | "required": false 98 | }, 99 | { 100 | "name": "TERMINAL_ENVVARS", 101 | "value": "" 102 | }, 103 | { 104 | "name": "WORKSHOP_ENVVARS", 105 | "value": "" 106 | }, 107 | { 108 | "name": "GATEWAY_ENVVARS", 109 | "value": "" 110 | }, 111 | { 112 | "name": "DOWNLOAD_URL", 113 | "value": "" 114 | }, 115 | { 116 | "name": "WORKSHOP_FILE", 117 | "value": "" 118 | }, 119 | { 120 | "name": "OC_VERSION", 121 | "value": "" 122 | }, 123 | { 124 | "name": "ODO_VERSION", 125 | "value": "" 126 | }, 127 | { 128 | "name": "KUBECTL_VERSION", 129 | "value": "" 130 | }, 131 | { 132 | "name": "JUPYTERHUB_CONFIG", 133 | "value": "", 134 | "required": false 135 | }, 136 | { 137 | "name": "JUPYTERHUB_ENVVARS", 138 | "value": "", 139 | "required": false 140 | }, 141 | { 142 | "name": "LETS_ENCRYPT", 143 | "value": "false", 144 | "required": true 145 | }, 146 | { 147 | "name": "OAUTH_CLIENT_SECRET", 148 | "generate": "expression", 149 | "from": "[a-zA-Z0-9]{32}" 150 | } 151 | ], 152 | "objects": [ 153 | { 154 | "kind": "ServiceAccount", 155 | "apiVersion": "v1", 156 | "metadata": { 157 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 158 | "labels": { 159 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 160 | "spawner": "hosted-workshop", 161 | "class": "spawner" 162 | } 163 | } 164 | }, 165 | { 166 | "kind": "OAuthClient", 167 | "apiVersion": "oauth.openshift.io/v1", 168 | "metadata": { 169 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-console", 170 | "labels": { 171 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 172 | "spawner": "hosted-workshop", 173 | "class": "spawner" 174 | } 175 | }, 176 | "secret": "${OAUTH_CLIENT_SECRET}", 177 | "grantMethod": "auto", 178 | "redirectURIs": [ 179 | "https://${WORKSHOP_NAME}-${SPAWNER_NAMESPACE}.${CLUSTER_SUBDOMAIN}/hub/oauth_callback" 180 | ] 181 | }, 182 | { 183 | "kind": "RoleBinding", 184 | "apiVersion": "v1", 185 | "metadata": { 186 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-basic", 187 | "labels": { 188 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 189 | "spawner": "hosted-workshop", 190 | "class": "spawner" 191 | } 192 | }, 193 | "subjects": [ 194 | { 195 | "kind": "ServiceAccount", 196 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 197 | } 198 | ], 199 | "roleRef": { 200 | "name": "edit" 201 | } 202 | }, 203 | { 204 | "kind": "ImageStream", 205 | "apiVersion": "v1", 206 | "metadata": { 207 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 208 | "labels": { 209 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 210 | "spawner": "hosted-workshop", 211 | "class": "spawner" 212 | } 213 | } 214 | }, 215 | { 216 | "kind": "ConfigMap", 217 | "apiVersion": "v1", 218 | "metadata": { 219 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-configs", 220 | "labels": { 221 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 222 | "spawner": "hosted-workshop", 223 | "class": "spawner" 224 | } 225 | }, 226 | "data": { 227 | "jupyterhub_config.py": "${JUPYTERHUB_CONFIG}", 228 | "jupyterhub_config.sh": "${JUPYTERHUB_ENVVARS}" 229 | } 230 | }, 231 | { 232 | "kind": "ConfigMap", 233 | "apiVersion": "v1", 234 | "metadata": { 235 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-session-envvars", 236 | "labels": { 237 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 238 | "spawner": "hosted-workshop", 239 | "class": "spawner" 240 | } 241 | }, 242 | "data": { 243 | "terminal.sh": "${TERMINAL_ENVVARS}", 244 | "workshop.sh": "${WORKSHOP_ENVVARS}", 245 | "gateway.sh": "${GATEWAY_ENVVARS}" 246 | } 247 | }, 248 | { 249 | "kind": "BuildConfig", 250 | "apiVersion": "v1", 251 | "metadata": { 252 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 253 | "labels": { 254 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 255 | "spawner": "hosted-workshop", 256 | "class": "spawner" 257 | } 258 | }, 259 | "spec": { 260 | "triggers": [ 261 | { 262 | "type": "ConfigChange" 263 | }, 264 | { 265 | "type": "ImageChange" 266 | } 267 | ], 268 | "source": { 269 | "type": "Git", 270 | "git": { 271 | "uri": "${GIT_REPOSITORY_URL}", 272 | "ref": "${GIT_REPOSITORY_REF}" 273 | }, 274 | "contextDir": "jupyterhub" 275 | }, 276 | "strategy": { 277 | "type": "Docker", 278 | "dockerStrategy": { 279 | "from": { 280 | "kind": "DockerImage", 281 | "name": "centos/s2i-base-centos7:latest" 282 | } 283 | } 284 | }, 285 | "output": { 286 | "to": { 287 | "kind": "ImageStreamTag", 288 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner:latest" 289 | } 290 | } 291 | } 292 | }, 293 | { 294 | "kind": "DeploymentConfig", 295 | "apiVersion": "v1", 296 | "metadata": { 297 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 298 | "labels": { 299 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 300 | "spawner": "hosted-workshop", 301 | "class": "spawner" 302 | } 303 | }, 304 | "spec": { 305 | "strategy": { 306 | "type": "Recreate" 307 | }, 308 | "triggers": [ 309 | { 310 | "type": "ConfigChange" 311 | }, 312 | { 313 | "type": "ImageChange", 314 | "imageChangeParams": { 315 | "automatic": true, 316 | "containerNames": [ 317 | "spawner" 318 | ], 319 | "from": { 320 | "kind": "ImageStreamTag", 321 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner:latest" 322 | } 323 | } 324 | } 325 | ], 326 | "replicas": 1, 327 | "selector": { 328 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 329 | }, 330 | "template": { 331 | "metadata": { 332 | "labels": { 333 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 334 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 335 | "spawner": "hosted-workshop", 336 | "class": "spawner" 337 | } 338 | }, 339 | "spec": { 340 | "serviceAccountName": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 341 | "containers": [ 342 | { 343 | "name": "spawner", 344 | "image": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner:latest", 345 | "ports": [ 346 | { 347 | "containerPort": 8080, 348 | "protocol": "TCP" 349 | } 350 | ], 351 | "resources": { 352 | "limits": { 353 | "memory": "${SPAWNER_MEMORY}" 354 | }, 355 | "requests": { 356 | "memory": "${SPAWNER_MEMORY}" 357 | } 358 | }, 359 | "readinessProbe": { 360 | "httpGet": { 361 | "path": "/hub/api", 362 | "port": 8080, 363 | "scheme": "HTTP" 364 | }, 365 | "periodSeconds": 10, 366 | "successThreshold": 1, 367 | "failureThreshold": 3, 368 | "timeoutSeconds": 1 369 | }, 370 | "livenessProbe": { 371 | "httpGet": { 372 | "path": "/hub/api", 373 | "port": 8080, 374 | "scheme": "HTTP" 375 | }, 376 | "periodSeconds": 10, 377 | "successThreshold": 1, 378 | "failureThreshold": 3, 379 | "timeoutSeconds": 1 380 | }, 381 | "env": [ 382 | { 383 | "name": "CONFIGURATION_TYPE", 384 | "value": "hosted-workshop" 385 | }, 386 | { 387 | "name": "APPLICATION_NAME", 388 | "value": "${NAME_PREFIX}${WORKSHOP_NAME}" 389 | }, 390 | { 391 | "name": "WORKSHOP_NAME", 392 | "value": "${WORKSHOP_NAME}" 393 | }, 394 | { 395 | "name": "HOMEROOM_NAME", 396 | "value": "${HOMEROOM_NAME}" 397 | }, 398 | { 399 | "name": "HOMEROOM_LINK", 400 | "value": "${HOMEROOM_LINK}" 401 | }, 402 | { 403 | "name": "WORKSHOP_IMAGE", 404 | "value": "${WORKSHOP_IMAGE}" 405 | }, 406 | { 407 | "name": "WORKSHOP_MEMORY", 408 | "value": "${WORKSHOP_MEMORY}" 409 | }, 410 | { 411 | "name": "CONSOLE_MEMORY", 412 | "value": "${CONSOLE_MEMORY}" 413 | }, 414 | { 415 | "name": "CONSOLE_BRANDING", 416 | "value": "${CONSOLE_BRANDING}" 417 | }, 418 | { 419 | "name": "CONSOLE_IMAGE", 420 | "value": "${CONSOLE_IMAGE}" 421 | }, 422 | { 423 | "name": "VOLUME_SIZE", 424 | "value": "${VOLUME_SIZE}" 425 | }, 426 | { 427 | "name": "ADMIN_USERS", 428 | "value": "${ADMIN_USERS}" 429 | }, 430 | { 431 | "name": "OPENSHIFT_PROJECT", 432 | "value": "${OPENSHIFT_PROJECT}" 433 | }, 434 | { 435 | "name": "IDLE_TIMEOUT", 436 | "value": "${IDLE_TIMEOUT}" 437 | }, 438 | { 439 | "name": "DOWNLOAD_URL", 440 | "value": "${DOWNLOAD_URL}" 441 | }, 442 | { 443 | "name": "WORKSHOP_FILE", 444 | "value": "${WORKSHOP_FILE}" 445 | }, 446 | { 447 | "name": "OC_VERSION", 448 | "value": "${OC_VERSION}" 449 | }, 450 | { 451 | "name": "ODO_VERSION", 452 | "value": "${ODO_VERSION}" 453 | }, 454 | { 455 | "name": "KUBECTL_VERSION", 456 | "value": "${KUBECTL_VERSION}" 457 | }, 458 | { 459 | "name": "OAUTH_CLIENT_SECRET", 460 | "value": "${OAUTH_CLIENT_SECRET}" 461 | } 462 | ], 463 | "volumeMounts": [ 464 | { 465 | "mountPath": "/opt/app-root/data", 466 | "name": "data" 467 | }, 468 | { 469 | "name": "configs", 470 | "mountPath": "/opt/app-root/configs" 471 | } 472 | ] 473 | } 474 | ], 475 | "volumes": [ 476 | { 477 | "name": "data", 478 | "persistentVolumeClaim": { 479 | "claimName": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-data" 480 | } 481 | }, 482 | { 483 | "name": "configs", 484 | "configMap": { 485 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-configs", 486 | "defaultMode": 420 487 | } 488 | } 489 | ] 490 | } 491 | } 492 | } 493 | }, 494 | { 495 | "apiVersion": "v1", 496 | "kind": "PersistentVolumeClaim", 497 | "metadata": { 498 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-data", 499 | "labels": { 500 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 501 | "spawner": "hosted-workshop", 502 | "class": "spawner" 503 | } 504 | }, 505 | "spec": { 506 | "accessModes": [ 507 | "ReadWriteOnce" 508 | ], 509 | "resources": { 510 | "requests": { 511 | "storage": "1Gi" 512 | } 513 | } 514 | } 515 | }, 516 | { 517 | "kind": "Service", 518 | "apiVersion": "v1", 519 | "metadata": { 520 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 521 | "labels": { 522 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 523 | "spawner": "hosted-workshop", 524 | "class": "spawner" 525 | } 526 | }, 527 | "spec": { 528 | "ports": [ 529 | { 530 | "name": "8080-tcp", 531 | "protocol": "TCP", 532 | "port": 8080, 533 | "targetPort": 8080 534 | }, 535 | { 536 | "name": "8081-tcp", 537 | "protocol": "TCP", 538 | "port": 8081, 539 | "targetPort": 8081 540 | } 541 | ], 542 | "selector": { 543 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 544 | } 545 | } 546 | }, 547 | { 548 | "kind": "Route", 549 | "apiVersion": "v1", 550 | "metadata": { 551 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 552 | "labels": { 553 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 554 | "spawner": "hosted-workshop", 555 | "class": "spawner" 556 | }, 557 | "annotations": { 558 | "kubernetes.io/tls-acme": "${LETS_ENCRYPT}", 559 | "homeroom/group": "${HOMEROOM_NAME}", 560 | "homeroom/title": "${WORKSHOP_TITLE}", 561 | "homeroom/description": "${WORKSHOP_DESCRIPTION}" 562 | } 563 | }, 564 | "spec": { 565 | "host": "${WORKSHOP_NAME}-${SPAWNER_NAMESPACE}.${CLUSTER_SUBDOMAIN}", 566 | "to": { 567 | "kind": "Service", 568 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 569 | "weight": 100 570 | }, 571 | "port": { 572 | "targetPort": "8080-tcp" 573 | }, 574 | "tls": { 575 | "termination": "edge", 576 | "insecureEdgeTerminationPolicy": "Redirect" 577 | } 578 | } 579 | }, 580 | { 581 | "kind": "ImageStream", 582 | "apiVersion": "v1", 583 | "metadata": { 584 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-session", 585 | "labels": { 586 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 587 | "spawner": "hosted-workshop", 588 | "class": "spawner" 589 | } 590 | }, 591 | "spec": { 592 | "lookupPolicy": { 593 | "local": true 594 | }, 595 | "tags": [ 596 | { 597 | "name": "latest", 598 | "from": { 599 | "kind": "DockerImage", 600 | "name": "quay.io/openshifthomeroom/workshop-dashboard:5.0.0" 601 | } 602 | } 603 | ] 604 | } 605 | } 606 | ] 607 | } 608 | -------------------------------------------------------------------------------- /templates/hosted-workshop-production.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Template", 3 | "apiVersion": "v1", 4 | "metadata": { 5 | "name": "hosted-workshop-production", 6 | "annotations": { 7 | "openshift.io/display-name": "Hosted Workshop (Production)" 8 | } 9 | }, 10 | "parameters": [ 11 | { 12 | "name": "SPAWNER_NAMESPACE", 13 | "value": "", 14 | "required": true 15 | }, 16 | { 17 | "name": "CLUSTER_SUBDOMAIN", 18 | "value": "", 19 | "required": true 20 | }, 21 | { 22 | "name": "WORKSHOP_NAME", 23 | "value": "hosted-workshop", 24 | "required": true 25 | }, 26 | { 27 | "name": "NAME_PREFIX", 28 | "value": "", 29 | "required": false 30 | }, 31 | { 32 | "name": "HOMEROOM_NAME", 33 | "value": "" 34 | }, 35 | { 36 | "name": "HOMEROOM_LINK", 37 | "value": "" 38 | }, 39 | { 40 | "name": "WORKSHOP_TITLE", 41 | "value": "" 42 | }, 43 | { 44 | "name": "WORKSHOP_DESCRIPTION", 45 | "value": "" 46 | }, 47 | { 48 | "name": "SPAWNER_MEMORY", 49 | "value": "768Mi" 50 | }, 51 | { 52 | "name": "WORKSHOP_MEMORY", 53 | "value": "512Mi" 54 | }, 55 | { 56 | "name": "CONSOLE_MEMORY", 57 | "value": "128Mi" 58 | }, 59 | { 60 | "name": "CONSOLE_BRANDING", 61 | "value": "openshift" 62 | }, 63 | { 64 | "name": "CONSOLE_IMAGE", 65 | "value": "quay.io/openshift/origin-console:4.2", 66 | "required": true 67 | }, 68 | { 69 | "name": "VOLUME_SIZE", 70 | "value": "" 71 | }, 72 | { 73 | "name": "ADMIN_USERS", 74 | "value": "" 75 | }, 76 | { 77 | "name": "OPENSHIFT_PROJECT", 78 | "value": "" 79 | }, 80 | { 81 | "name": "IDLE_TIMEOUT", 82 | "value": "7200" 83 | }, 84 | { 85 | "name": "WORKSHOP_IMAGE", 86 | "value": "", 87 | "required": false 88 | }, 89 | { 90 | "name": "SPAWNER_IMAGE", 91 | "value": "quay.io/openshifthomeroom/workshop-spawner:7.1.0", 92 | "required": true 93 | }, 94 | { 95 | "name": "TERMINAL_ENVVARS", 96 | "value": "" 97 | }, 98 | { 99 | "name": "WORKSHOP_ENVVARS", 100 | "value": "" 101 | }, 102 | { 103 | "name": "GATEWAY_ENVVARS", 104 | "value": "" 105 | }, 106 | { 107 | "name": "DOWNLOAD_URL", 108 | "value": "" 109 | }, 110 | { 111 | "name": "WORKSHOP_FILE", 112 | "value": "" 113 | }, 114 | { 115 | "name": "OC_VERSION", 116 | "value": "" 117 | }, 118 | { 119 | "name": "ODO_VERSION", 120 | "value": "" 121 | }, 122 | { 123 | "name": "KUBECTL_VERSION", 124 | "value": "" 125 | }, 126 | { 127 | "name": "JUPYTERHUB_CONFIG", 128 | "value": "", 129 | "required": false 130 | }, 131 | { 132 | "name": "JUPYTERHUB_ENVVARS", 133 | "value": "", 134 | "required": false 135 | }, 136 | { 137 | "name": "LETS_ENCRYPT", 138 | "value": "false", 139 | "required": true 140 | }, 141 | { 142 | "name": "OAUTH_CLIENT_SECRET", 143 | "generate": "expression", 144 | "from": "[a-zA-Z0-9]{32}" 145 | } 146 | ], 147 | "objects": [ 148 | { 149 | "kind": "ServiceAccount", 150 | "apiVersion": "v1", 151 | "metadata": { 152 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 153 | "labels": { 154 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 155 | "spawner": "hosted-workshop", 156 | "class": "spawner" 157 | } 158 | } 159 | }, 160 | { 161 | "kind": "OAuthClient", 162 | "apiVersion": "oauth.openshift.io/v1", 163 | "metadata": { 164 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-console", 165 | "labels": { 166 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 167 | "spawner": "hosted-workshop", 168 | "class": "spawner" 169 | } 170 | }, 171 | "secret": "${OAUTH_CLIENT_SECRET}", 172 | "grantMethod": "auto", 173 | "redirectURIs": [ 174 | "https://${WORKSHOP_NAME}-${SPAWNER_NAMESPACE}.${CLUSTER_SUBDOMAIN}/hub/oauth_callback" 175 | ] 176 | }, 177 | { 178 | "kind": "RoleBinding", 179 | "apiVersion": "v1", 180 | "metadata": { 181 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-basic", 182 | "labels": { 183 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 184 | "spawner": "hosted-workshop", 185 | "class": "spawner" 186 | } 187 | }, 188 | "subjects": [ 189 | { 190 | "kind": "ServiceAccount", 191 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 192 | } 193 | ], 194 | "roleRef": { 195 | "name": "edit" 196 | } 197 | }, 198 | { 199 | "kind": "ImageStream", 200 | "apiVersion": "v1", 201 | "metadata": { 202 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 203 | "labels": { 204 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 205 | "spawner": "hosted-workshop", 206 | "class": "spawner" 207 | } 208 | }, 209 | "spec": { 210 | "tags": [ 211 | { 212 | "name": "latest", 213 | "from": { 214 | "kind": "DockerImage", 215 | "name": "${SPAWNER_IMAGE}" 216 | } 217 | } 218 | ] 219 | } 220 | }, 221 | { 222 | "kind": "ConfigMap", 223 | "apiVersion": "v1", 224 | "metadata": { 225 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-configs", 226 | "labels": { 227 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 228 | "spawner": "hosted-workshop", 229 | "class": "spawner" 230 | } 231 | }, 232 | "data": { 233 | "jupyterhub_config.py": "${JUPYTERHUB_CONFIG}", 234 | "jupyterhub_config.sh": "${JUPYTERHUB_ENVVARS}" 235 | } 236 | }, 237 | { 238 | "kind": "ConfigMap", 239 | "apiVersion": "v1", 240 | "metadata": { 241 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-session-envvars", 242 | "labels": { 243 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 244 | "spawner": "hosted-workshop", 245 | "class": "spawner" 246 | } 247 | }, 248 | "data": { 249 | "terminal.sh": "${TERMINAL_ENVVARS}", 250 | "workshop.sh": "${WORKSHOP_ENVVARS}", 251 | "gateway.sh": "${GATEWAY_ENVVARS}" 252 | } 253 | }, 254 | { 255 | "kind": "DeploymentConfig", 256 | "apiVersion": "v1", 257 | "metadata": { 258 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 259 | "labels": { 260 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 261 | "spawner": "hosted-workshop", 262 | "class": "spawner" 263 | } 264 | }, 265 | "spec": { 266 | "strategy": { 267 | "type": "Recreate" 268 | }, 269 | "triggers": [ 270 | { 271 | "type": "ConfigChange" 272 | }, 273 | { 274 | "type": "ImageChange", 275 | "imageChangeParams": { 276 | "automatic": true, 277 | "containerNames": [ 278 | "spawner" 279 | ], 280 | "from": { 281 | "kind": "ImageStreamTag", 282 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner:latest" 283 | } 284 | } 285 | } 286 | ], 287 | "replicas": 1, 288 | "selector": { 289 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 290 | }, 291 | "template": { 292 | "metadata": { 293 | "labels": { 294 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 295 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 296 | "spawner": "hosted-workshop", 297 | "class": "spawner" 298 | } 299 | }, 300 | "spec": { 301 | "serviceAccountName": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 302 | "containers": [ 303 | { 304 | "name": "spawner", 305 | "image": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner:latest", 306 | "ports": [ 307 | { 308 | "containerPort": 8080, 309 | "protocol": "TCP" 310 | } 311 | ], 312 | "resources": { 313 | "limits": { 314 | "memory": "${SPAWNER_MEMORY}" 315 | }, 316 | "requests": { 317 | "memory": "${SPAWNER_MEMORY}" 318 | } 319 | }, 320 | "readinessProbe": { 321 | "httpGet": { 322 | "path": "/hub/api", 323 | "port": 8080, 324 | "scheme": "HTTP" 325 | }, 326 | "periodSeconds": 10, 327 | "successThreshold": 1, 328 | "failureThreshold": 3, 329 | "timeoutSeconds": 1 330 | }, 331 | "livenessProbe": { 332 | "httpGet": { 333 | "path": "/hub/api", 334 | "port": 8080, 335 | "scheme": "HTTP" 336 | }, 337 | "periodSeconds": 10, 338 | "successThreshold": 1, 339 | "failureThreshold": 3, 340 | "timeoutSeconds": 1 341 | }, 342 | "env": [ 343 | { 344 | "name": "CONFIGURATION_TYPE", 345 | "value": "hosted-workshop" 346 | }, 347 | { 348 | "name": "APPLICATION_NAME", 349 | "value": "${NAME_PREFIX}${WORKSHOP_NAME}" 350 | }, 351 | { 352 | "name": "WORKSHOP_NAME", 353 | "value": "${WORKSHOP_NAME}" 354 | }, 355 | { 356 | "name": "HOMEROOM_NAME", 357 | "value": "${HOMEROOM_NAME}" 358 | }, 359 | { 360 | "name": "HOMEROOM_LINK", 361 | "value": "${HOMEROOM_LINK}" 362 | }, 363 | { 364 | "name": "WORKSHOP_IMAGE", 365 | "value": "${WORKSHOP_IMAGE}" 366 | }, 367 | { 368 | "name": "WORKSHOP_MEMORY", 369 | "value": "${WORKSHOP_MEMORY}" 370 | }, 371 | { 372 | "name": "CONSOLE_MEMORY", 373 | "value": "${CONSOLE_MEMORY}" 374 | }, 375 | { 376 | "name": "CONSOLE_BRANDING", 377 | "value": "${CONSOLE_BRANDING}" 378 | }, 379 | { 380 | "name": "CONSOLE_IMAGE", 381 | "value": "${CONSOLE_IMAGE}" 382 | }, 383 | { 384 | "name": "VOLUME_SIZE", 385 | "value": "${VOLUME_SIZE}" 386 | }, 387 | { 388 | "name": "ADMIN_USERS", 389 | "value": "${ADMIN_USERS}" 390 | }, 391 | { 392 | "name": "OPENSHIFT_PROJECT", 393 | "value": "${OPENSHIFT_PROJECT}" 394 | }, 395 | { 396 | "name": "IDLE_TIMEOUT", 397 | "value": "${IDLE_TIMEOUT}" 398 | }, 399 | { 400 | "name": "DOWNLOAD_URL", 401 | "value": "${DOWNLOAD_URL}" 402 | }, 403 | { 404 | "name": "WORKSHOP_FILE", 405 | "value": "${WORKSHOP_FILE}" 406 | }, 407 | { 408 | "name": "OC_VERSION", 409 | "value": "${OC_VERSION}" 410 | }, 411 | { 412 | "name": "ODO_VERSION", 413 | "value": "${ODO_VERSION}" 414 | }, 415 | { 416 | "name": "KUBECTL_VERSION", 417 | "value": "${KUBECTL_VERSION}" 418 | }, 419 | { 420 | "name": "OAUTH_CLIENT_SECRET", 421 | "value": "${OAUTH_CLIENT_SECRET}" 422 | } 423 | ], 424 | "volumeMounts": [ 425 | { 426 | "mountPath": "/opt/app-root/data", 427 | "name": "data" 428 | }, 429 | { 430 | "name": "configs", 431 | "mountPath": "/opt/app-root/configs" 432 | } 433 | ] 434 | } 435 | ], 436 | "volumes": [ 437 | { 438 | "name": "data", 439 | "persistentVolumeClaim": { 440 | "claimName": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-data" 441 | } 442 | }, 443 | { 444 | "name": "configs", 445 | "configMap": { 446 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-configs", 447 | "defaultMode": 420 448 | } 449 | } 450 | ] 451 | } 452 | } 453 | } 454 | }, 455 | { 456 | "apiVersion": "v1", 457 | "kind": "PersistentVolumeClaim", 458 | "metadata": { 459 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-data", 460 | "labels": { 461 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 462 | "spawner": "hosted-workshop", 463 | "class": "spawner" 464 | } 465 | }, 466 | "spec": { 467 | "accessModes": [ 468 | "ReadWriteOnce" 469 | ], 470 | "resources": { 471 | "requests": { 472 | "storage": "1Gi" 473 | } 474 | } 475 | } 476 | }, 477 | { 478 | "kind": "Service", 479 | "apiVersion": "v1", 480 | "metadata": { 481 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 482 | "labels": { 483 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 484 | "spawner": "hosted-workshop", 485 | "class": "spawner" 486 | } 487 | }, 488 | "spec": { 489 | "ports": [ 490 | { 491 | "name": "8080-tcp", 492 | "protocol": "TCP", 493 | "port": 8080, 494 | "targetPort": 8080 495 | }, 496 | { 497 | "name": "8081-tcp", 498 | "protocol": "TCP", 499 | "port": 8081, 500 | "targetPort": 8081 501 | } 502 | ], 503 | "selector": { 504 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 505 | } 506 | } 507 | }, 508 | { 509 | "kind": "Route", 510 | "apiVersion": "v1", 511 | "metadata": { 512 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 513 | "labels": { 514 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 515 | "spawner": "hosted-workshop", 516 | "class": "spawner" 517 | }, 518 | "annotations": { 519 | "kubernetes.io/tls-acme": "${LETS_ENCRYPT}", 520 | "homeroom/group": "${HOMEROOM_NAME}", 521 | "homeroom/title": "${WORKSHOP_TITLE}", 522 | "homeroom/description": "${WORKSHOP_DESCRIPTION}" 523 | } 524 | }, 525 | "spec": { 526 | "host": "${WORKSHOP_NAME}-${SPAWNER_NAMESPACE}.${CLUSTER_SUBDOMAIN}", 527 | "to": { 528 | "kind": "Service", 529 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 530 | "weight": 100 531 | }, 532 | "port": { 533 | "targetPort": "8080-tcp" 534 | }, 535 | "tls": { 536 | "termination": "edge", 537 | "insecureEdgeTerminationPolicy": "Redirect" 538 | } 539 | } 540 | }, 541 | { 542 | "kind": "ImageStream", 543 | "apiVersion": "v1", 544 | "metadata": { 545 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-session", 546 | "labels": { 547 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 548 | "spawner": "hosted-workshop", 549 | "class": "spawner" 550 | } 551 | }, 552 | "spec": { 553 | "lookupPolicy": { 554 | "local": true 555 | }, 556 | "tags": [ 557 | { 558 | "name": "latest", 559 | "from": { 560 | "kind": "DockerImage", 561 | "name": "quay.io/openshifthomeroom/workshop-dashboard:5.0.0" 562 | } 563 | } 564 | ] 565 | } 566 | } 567 | ] 568 | } 569 | -------------------------------------------------------------------------------- /templates/terminal-server-development.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Template", 3 | "apiVersion": "v1", 4 | "metadata": { 5 | "name": "terminal-server-development", 6 | "annotations": { 7 | "openshift.io/display-name": "Terminal Server (Development)" 8 | } 9 | }, 10 | "parameters": [ 11 | { 12 | "name": "SPAWNER_NAMESPACE", 13 | "value": "", 14 | "required": true 15 | }, 16 | { 17 | "name": "CLUSTER_SUBDOMAIN", 18 | "value": "", 19 | "required": true 20 | }, 21 | { 22 | "name": "WORKSHOP_NAME", 23 | "value": "terminal-server", 24 | "required": true 25 | }, 26 | { 27 | "name": "NAME_PREFIX", 28 | "value": "", 29 | "required": false 30 | }, 31 | { 32 | "name": "HOMEROOM_NAME", 33 | "value": "" 34 | }, 35 | { 36 | "name": "HOMEROOM_LINK", 37 | "value": "" 38 | }, 39 | { 40 | "name": "WORKSHOP_TITLE", 41 | "value": "" 42 | }, 43 | { 44 | "name": "WORKSHOP_DESCRIPTION", 45 | "value": "" 46 | }, 47 | { 48 | "name": "GIT_REPOSITORY_URL", 49 | "value": "https://github.com/openshift-homeroom/workshop-spawner.git", 50 | "required": true 51 | }, 52 | { 53 | "name": "GIT_REPOSITORY_REF", 54 | "value": "develop", 55 | "required": true 56 | }, 57 | { 58 | "name": "SPAWNER_MEMORY", 59 | "value": "768Mi" 60 | }, 61 | { 62 | "name": "WORKSHOP_MEMORY", 63 | "value": "512Mi" 64 | }, 65 | { 66 | "name": "CONSOLE_MEMORY", 67 | "value": "128Mi" 68 | }, 69 | { 70 | "name": "CONSOLE_BRANDING", 71 | "value": "openshift" 72 | }, 73 | { 74 | "name": "CONSOLE_IMAGE", 75 | "value": "quay.io/openshift/origin-console:4.2", 76 | "required": true 77 | }, 78 | { 79 | "name": "VOLUME_SIZE", 80 | "value": "" 81 | }, 82 | { 83 | "name": "ADMIN_USERS", 84 | "value": "" 85 | }, 86 | { 87 | "name": "OPENSHIFT_PROJECT", 88 | "value": "" 89 | }, 90 | { 91 | "name": "IDLE_TIMEOUT", 92 | "value": "7200" 93 | }, 94 | { 95 | "name": "WORKSHOP_IMAGE", 96 | "value": "", 97 | "required": false 98 | }, 99 | { 100 | "name": "TERMINAL_ENVVARS", 101 | "value": "" 102 | }, 103 | { 104 | "name": "WORKSHOP_ENVVARS", 105 | "value": "" 106 | }, 107 | { 108 | "name": "GATEWAY_ENVVARS", 109 | "value": "" 110 | }, 111 | { 112 | "name": "DOWNLOAD_URL", 113 | "value": "" 114 | }, 115 | { 116 | "name": "WORKSHOP_FILE", 117 | "value": "" 118 | }, 119 | { 120 | "name": "OC_VERSION", 121 | "value": "" 122 | }, 123 | { 124 | "name": "ODO_VERSION", 125 | "value": "" 126 | }, 127 | { 128 | "name": "KUBECTL_VERSION", 129 | "value": "" 130 | }, 131 | { 132 | "name": "JUPYTERHUB_CONFIG", 133 | "value": "", 134 | "required": false 135 | }, 136 | { 137 | "name": "JUPYTERHUB_ENVVARS", 138 | "value": "", 139 | "required": false 140 | }, 141 | { 142 | "name": "LETS_ENCRYPT", 143 | "value": "false", 144 | "required": true 145 | }, 146 | { 147 | "name": "OAUTH_CLIENT_SECRET", 148 | "generate": "expression", 149 | "from": "[a-zA-Z0-9]{32}" 150 | } 151 | ], 152 | "objects": [ 153 | { 154 | "kind": "ServiceAccount", 155 | "apiVersion": "v1", 156 | "metadata": { 157 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 158 | "labels": { 159 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 160 | "spawner": "terminal-server", 161 | "class": "spawner" 162 | } 163 | } 164 | }, 165 | { 166 | "kind": "OAuthClient", 167 | "apiVersion": "oauth.openshift.io/v1", 168 | "metadata": { 169 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-console", 170 | "labels": { 171 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 172 | "spawner": "terminal-server", 173 | "class": "spawner" 174 | } 175 | }, 176 | "secret": "${OAUTH_CLIENT_SECRET}", 177 | "grantMethod": "auto", 178 | "redirectURIs": [ 179 | "https://${WORKSHOP_NAME}-${SPAWNER_NAMESPACE}.${CLUSTER_SUBDOMAIN}/hub/oauth_callback" 180 | ] 181 | }, 182 | { 183 | "kind": "RoleBinding", 184 | "apiVersion": "v1", 185 | "metadata": { 186 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-basic", 187 | "labels": { 188 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 189 | "spawner": "terminal-server", 190 | "class": "spawner" 191 | } 192 | }, 193 | "subjects": [ 194 | { 195 | "kind": "ServiceAccount", 196 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 197 | } 198 | ], 199 | "roleRef": { 200 | "name": "edit" 201 | } 202 | }, 203 | { 204 | "kind": "ImageStream", 205 | "apiVersion": "v1", 206 | "metadata": { 207 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 208 | "labels": { 209 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 210 | "spawner": "terminal-server", 211 | "class": "spawner" 212 | } 213 | } 214 | }, 215 | { 216 | "kind": "ConfigMap", 217 | "apiVersion": "v1", 218 | "metadata": { 219 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-configs", 220 | "labels": { 221 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 222 | "spawner": "terminal-server", 223 | "class": "spawner" 224 | } 225 | }, 226 | "data": { 227 | "jupyterhub_config.py": "${JUPYTERHUB_CONFIG}", 228 | "jupyterhub_config.sh": "${JUPYTERHUB_ENVVARS}" 229 | } 230 | }, 231 | { 232 | "kind": "ConfigMap", 233 | "apiVersion": "v1", 234 | "metadata": { 235 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-session-envvars", 236 | "labels": { 237 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 238 | "spawner": "terminal-server", 239 | "class": "spawner" 240 | } 241 | }, 242 | "data": { 243 | "terminal.sh": "${TERMINAL_ENVVARS}", 244 | "workshop.sh": "${WORKSHOP_ENVVARS}", 245 | "gateway.sh": "${GATEWAY_ENVVARS}" 246 | } 247 | }, 248 | { 249 | "kind": "BuildConfig", 250 | "apiVersion": "v1", 251 | "metadata": { 252 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 253 | "labels": { 254 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 255 | "spawner": "terminal-server", 256 | "class": "spawner" 257 | } 258 | }, 259 | "spec": { 260 | "triggers": [ 261 | { 262 | "type": "ConfigChange" 263 | }, 264 | { 265 | "type": "ImageChange" 266 | } 267 | ], 268 | "source": { 269 | "type": "Git", 270 | "git": { 271 | "uri": "${GIT_REPOSITORY_URL}", 272 | "ref": "${GIT_REPOSITORY_REF}" 273 | }, 274 | "contextDir": "jupyterhub" 275 | }, 276 | "strategy": { 277 | "type": "Docker", 278 | "dockerStrategy": { 279 | "from": { 280 | "kind": "DockerImage", 281 | "name": "centos/s2i-base-centos7:latest" 282 | } 283 | } 284 | }, 285 | "output": { 286 | "to": { 287 | "kind": "ImageStreamTag", 288 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner:latest" 289 | } 290 | } 291 | } 292 | }, 293 | { 294 | "kind": "DeploymentConfig", 295 | "apiVersion": "v1", 296 | "metadata": { 297 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 298 | "labels": { 299 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 300 | "spawner": "terminal-server", 301 | "class": "spawner" 302 | } 303 | }, 304 | "spec": { 305 | "strategy": { 306 | "type": "Recreate" 307 | }, 308 | "triggers": [ 309 | { 310 | "type": "ConfigChange" 311 | }, 312 | { 313 | "type": "ImageChange", 314 | "imageChangeParams": { 315 | "automatic": true, 316 | "containerNames": [ 317 | "spawner" 318 | ], 319 | "from": { 320 | "kind": "ImageStreamTag", 321 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner:latest" 322 | } 323 | } 324 | } 325 | ], 326 | "replicas": 1, 327 | "selector": { 328 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 329 | }, 330 | "template": { 331 | "metadata": { 332 | "labels": { 333 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 334 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 335 | "spawner": "terminal-server", 336 | "class": "spawner" 337 | } 338 | }, 339 | "spec": { 340 | "serviceAccountName": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 341 | "containers": [ 342 | { 343 | "name": "spawner", 344 | "image": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner:latest", 345 | "ports": [ 346 | { 347 | "containerPort": 8080, 348 | "protocol": "TCP" 349 | } 350 | ], 351 | "resources": { 352 | "limits": { 353 | "memory": "${SPAWNER_MEMORY}" 354 | }, 355 | "requests": { 356 | "memory": "${SPAWNER_MEMORY}" 357 | } 358 | }, 359 | "readinessProbe": { 360 | "httpGet": { 361 | "path": "/hub/api", 362 | "port": 8080, 363 | "scheme": "HTTP" 364 | }, 365 | "periodSeconds": 10, 366 | "successThreshold": 1, 367 | "failureThreshold": 3, 368 | "timeoutSeconds": 1 369 | }, 370 | "livenessProbe": { 371 | "httpGet": { 372 | "path": "/hub/api", 373 | "port": 8080, 374 | "scheme": "HTTP" 375 | }, 376 | "periodSeconds": 10, 377 | "successThreshold": 1, 378 | "failureThreshold": 3, 379 | "timeoutSeconds": 1 380 | }, 381 | "env": [ 382 | { 383 | "name": "CONFIGURATION_TYPE", 384 | "value": "terminal-server" 385 | }, 386 | { 387 | "name": "APPLICATION_NAME", 388 | "value": "${NAME_PREFIX}${WORKSHOP_NAME}" 389 | }, 390 | { 391 | "name": "WORKSHOP_NAME", 392 | "value": "${WORKSHOP_NAME}" 393 | }, 394 | { 395 | "name": "HOMEROOM_NAME", 396 | "value": "${HOMEROOM_NAME}" 397 | }, 398 | { 399 | "name": "HOMEROOM_LINK", 400 | "value": "${HOMEROOM_LINK}" 401 | }, 402 | { 403 | "name": "WORKSHOP_IMAGE", 404 | "value": "${WORKSHOP_IMAGE}" 405 | }, 406 | { 407 | "name": "WORKSHOP_MEMORY", 408 | "value": "${WORKSHOP_MEMORY}" 409 | }, 410 | { 411 | "name": "CONSOLE_MEMORY", 412 | "value": "${CONSOLE_MEMORY}" 413 | }, 414 | { 415 | "name": "CONSOLE_BRANDING", 416 | "value": "${CONSOLE_BRANDING}" 417 | }, 418 | { 419 | "name": "CONSOLE_IMAGE", 420 | "value": "${CONSOLE_IMAGE}" 421 | }, 422 | { 423 | "name": "VOLUME_SIZE", 424 | "value": "${VOLUME_SIZE}" 425 | }, 426 | { 427 | "name": "ADMIN_USERS", 428 | "value": "${ADMIN_USERS}" 429 | }, 430 | { 431 | "name": "OPENSHIFT_PROJECT", 432 | "value": "${OPENSHIFT_PROJECT}" 433 | }, 434 | { 435 | "name": "IDLE_TIMEOUT", 436 | "value": "${IDLE_TIMEOUT}" 437 | }, 438 | { 439 | "name": "DOWNLOAD_URL", 440 | "value": "${DOWNLOAD_URL}" 441 | }, 442 | { 443 | "name": "WORKSHOP_FILE", 444 | "value": "${WORKSHOP_FILE}" 445 | }, 446 | { 447 | "name": "OC_VERSION", 448 | "value": "${OC_VERSION}" 449 | }, 450 | { 451 | "name": "ODO_VERSION", 452 | "value": "${ODO_VERSION}" 453 | }, 454 | { 455 | "name": "KUBECTL_VERSION", 456 | "value": "${KUBECTL_VERSION}" 457 | }, 458 | { 459 | "name": "OAUTH_CLIENT_SECRET", 460 | "value": "${OAUTH_CLIENT_SECRET}" 461 | } 462 | ], 463 | "volumeMounts": [ 464 | { 465 | "mountPath": "/opt/app-root/data", 466 | "name": "data" 467 | }, 468 | { 469 | "name": "configs", 470 | "mountPath": "/opt/app-root/configs" 471 | } 472 | ] 473 | } 474 | ], 475 | "volumes": [ 476 | { 477 | "name": "data", 478 | "persistentVolumeClaim": { 479 | "claimName": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-data" 480 | } 481 | }, 482 | { 483 | "name": "configs", 484 | "configMap": { 485 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-configs", 486 | "defaultMode": 420 487 | } 488 | } 489 | ] 490 | } 491 | } 492 | } 493 | }, 494 | { 495 | "apiVersion": "v1", 496 | "kind": "PersistentVolumeClaim", 497 | "metadata": { 498 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-data", 499 | "labels": { 500 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 501 | "spawner": "terminal-server", 502 | "class": "spawner" 503 | } 504 | }, 505 | "spec": { 506 | "accessModes": [ 507 | "ReadWriteOnce" 508 | ], 509 | "resources": { 510 | "requests": { 511 | "storage": "1Gi" 512 | } 513 | } 514 | } 515 | }, 516 | { 517 | "kind": "Service", 518 | "apiVersion": "v1", 519 | "metadata": { 520 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 521 | "labels": { 522 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 523 | "spawner": "terminal-server", 524 | "class": "spawner" 525 | } 526 | }, 527 | "spec": { 528 | "ports": [ 529 | { 530 | "name": "8080-tcp", 531 | "protocol": "TCP", 532 | "port": 8080, 533 | "targetPort": 8080 534 | }, 535 | { 536 | "name": "8081-tcp", 537 | "protocol": "TCP", 538 | "port": 8081, 539 | "targetPort": 8081 540 | } 541 | ], 542 | "selector": { 543 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 544 | } 545 | } 546 | }, 547 | { 548 | "kind": "Route", 549 | "apiVersion": "v1", 550 | "metadata": { 551 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 552 | "labels": { 553 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 554 | "spawner": "terminal-server", 555 | "class": "spawner" 556 | }, 557 | "annotations": { 558 | "kubernetes.io/tls-acme": "${LETS_ENCRYPT}", 559 | "homeroom/group": "${HOMEROOM_NAME}", 560 | "homeroom/title": "${WORKSHOP_TITLE}", 561 | "homeroom/description": "${WORKSHOP_DESCRIPTION}" 562 | } 563 | }, 564 | "spec": { 565 | "host": "${WORKSHOP_NAME}-${SPAWNER_NAMESPACE}.${CLUSTER_SUBDOMAIN}", 566 | "to": { 567 | "kind": "Service", 568 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 569 | "weight": 100 570 | }, 571 | "port": { 572 | "targetPort": "8080-tcp" 573 | }, 574 | "tls": { 575 | "termination": "edge", 576 | "insecureEdgeTerminationPolicy": "Redirect" 577 | } 578 | } 579 | }, 580 | { 581 | "kind": "ImageStream", 582 | "apiVersion": "v1", 583 | "metadata": { 584 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-session", 585 | "labels": { 586 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 587 | "spawner": "terminal-server", 588 | "class": "spawner" 589 | } 590 | }, 591 | "spec": { 592 | "lookupPolicy": { 593 | "local": true 594 | }, 595 | "tags": [ 596 | { 597 | "name": "latest", 598 | "from": { 599 | "kind": "DockerImage", 600 | "name": "quay.io/openshifthomeroom/workshop-terminal:3.4.3" 601 | } 602 | } 603 | ] 604 | } 605 | } 606 | ] 607 | } 608 | -------------------------------------------------------------------------------- /templates/terminal-server-production.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Template", 3 | "apiVersion": "v1", 4 | "metadata": { 5 | "name": "terminal-server-production", 6 | "annotations": { 7 | "openshift.io/display-name": "Terminal Server (Production)" 8 | } 9 | }, 10 | "parameters": [ 11 | { 12 | "name": "SPAWNER_NAMESPACE", 13 | "value": "", 14 | "required": true 15 | }, 16 | { 17 | "name": "CLUSTER_SUBDOMAIN", 18 | "value": "", 19 | "required": true 20 | }, 21 | { 22 | "name": "WORKSHOP_NAME", 23 | "value": "terminal-server", 24 | "required": true 25 | }, 26 | { 27 | "name": "NAME_PREFIX", 28 | "value": "", 29 | "required": false 30 | }, 31 | { 32 | "name": "HOMEROOM_NAME", 33 | "value": "" 34 | }, 35 | { 36 | "name": "HOMEROOM_LINK", 37 | "value": "" 38 | }, 39 | { 40 | "name": "WORKSHOP_TITLE", 41 | "value": "" 42 | }, 43 | { 44 | "name": "WORKSHOP_DESCRIPTION", 45 | "value": "" 46 | }, 47 | { 48 | "name": "SPAWNER_MEMORY", 49 | "value": "768Mi" 50 | }, 51 | { 52 | "name": "WORKSHOP_MEMORY", 53 | "value": "512Mi" 54 | }, 55 | { 56 | "name": "CONSOLE_MEMORY", 57 | "value": "128Mi" 58 | }, 59 | { 60 | "name": "CONSOLE_BRANDING", 61 | "value": "openshift" 62 | }, 63 | { 64 | "name": "CONSOLE_IMAGE", 65 | "value": "quay.io/openshift/origin-console:4.2", 66 | "required": true 67 | }, 68 | { 69 | "name": "VOLUME_SIZE", 70 | "value": "" 71 | }, 72 | { 73 | "name": "ADMIN_USERS", 74 | "value": "" 75 | }, 76 | { 77 | "name": "OPENSHIFT_PROJECT", 78 | "value": "" 79 | }, 80 | { 81 | "name": "IDLE_TIMEOUT", 82 | "value": "7200" 83 | }, 84 | { 85 | "name": "WORKSHOP_IMAGE", 86 | "value": "", 87 | "required": false 88 | }, 89 | { 90 | "name": "SPAWNER_IMAGE", 91 | "value": "quay.io/openshifthomeroom/workshop-spawner:7.1.0", 92 | "required": true 93 | }, 94 | { 95 | "name": "TERMINAL_ENVVARS", 96 | "value": "" 97 | }, 98 | { 99 | "name": "WORKSHOP_ENVVARS", 100 | "value": "" 101 | }, 102 | { 103 | "name": "GATEWAY_ENVVARS", 104 | "value": "" 105 | }, 106 | { 107 | "name": "DOWNLOAD_URL", 108 | "value": "" 109 | }, 110 | { 111 | "name": "WORKSHOP_FILE", 112 | "value": "" 113 | }, 114 | { 115 | "name": "OC_VERSION", 116 | "value": "" 117 | }, 118 | { 119 | "name": "ODO_VERSION", 120 | "value": "" 121 | }, 122 | { 123 | "name": "KUBECTL_VERSION", 124 | "value": "" 125 | }, 126 | { 127 | "name": "JUPYTERHUB_CONFIG", 128 | "value": "", 129 | "required": false 130 | }, 131 | { 132 | "name": "JUPYTERHUB_ENVVARS", 133 | "value": "", 134 | "required": false 135 | }, 136 | { 137 | "name": "LETS_ENCRYPT", 138 | "value": "false", 139 | "required": true 140 | }, 141 | { 142 | "name": "OAUTH_CLIENT_SECRET", 143 | "generate": "expression", 144 | "from": "[a-zA-Z0-9]{32}" 145 | } 146 | ], 147 | "objects": [ 148 | { 149 | "kind": "ServiceAccount", 150 | "apiVersion": "v1", 151 | "metadata": { 152 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 153 | "labels": { 154 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 155 | "spawner": "terminal-server", 156 | "class": "spawner" 157 | } 158 | } 159 | }, 160 | { 161 | "kind": "OAuthClient", 162 | "apiVersion": "oauth.openshift.io/v1", 163 | "metadata": { 164 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-console", 165 | "labels": { 166 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 167 | "spawner": "terminal-server", 168 | "class": "spawner" 169 | } 170 | }, 171 | "secret": "${OAUTH_CLIENT_SECRET}", 172 | "grantMethod": "auto", 173 | "redirectURIs": [ 174 | "https://${WORKSHOP_NAME}-${SPAWNER_NAMESPACE}.${CLUSTER_SUBDOMAIN}/hub/oauth_callback" 175 | ] 176 | }, 177 | { 178 | "kind": "RoleBinding", 179 | "apiVersion": "v1", 180 | "metadata": { 181 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-basic", 182 | "labels": { 183 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 184 | "spawner": "terminal-server", 185 | "class": "spawner" 186 | } 187 | }, 188 | "subjects": [ 189 | { 190 | "kind": "ServiceAccount", 191 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 192 | } 193 | ], 194 | "roleRef": { 195 | "name": "edit" 196 | } 197 | }, 198 | { 199 | "kind": "ImageStream", 200 | "apiVersion": "v1", 201 | "metadata": { 202 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 203 | "labels": { 204 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 205 | "spawner": "terminal-server", 206 | "class": "spawner" 207 | } 208 | }, 209 | "spec": { 210 | "tags": [ 211 | { 212 | "name": "latest", 213 | "from": { 214 | "kind": "DockerImage", 215 | "name": "${SPAWNER_IMAGE}" 216 | } 217 | } 218 | ] 219 | } 220 | }, 221 | { 222 | "kind": "ConfigMap", 223 | "apiVersion": "v1", 224 | "metadata": { 225 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-configs", 226 | "labels": { 227 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 228 | "spawner": "terminal-server", 229 | "class": "spawner" 230 | } 231 | }, 232 | "data": { 233 | "jupyterhub_config.py": "${JUPYTERHUB_CONFIG}", 234 | "jupyterhub_config.sh": "${JUPYTERHUB_ENVVARS}" 235 | } 236 | }, 237 | { 238 | "kind": "ConfigMap", 239 | "apiVersion": "v1", 240 | "metadata": { 241 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-session-envvars", 242 | "labels": { 243 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 244 | "spawner": "terminal-server", 245 | "class": "spawner" 246 | } 247 | }, 248 | "data": { 249 | "terminal.sh": "${TERMINAL_ENVVARS}", 250 | "workshop.sh": "${WORKSHOP_ENVVARS}", 251 | "gateway.sh": "${GATEWAY_ENVVARS}" 252 | } 253 | }, 254 | { 255 | "kind": "DeploymentConfig", 256 | "apiVersion": "v1", 257 | "metadata": { 258 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 259 | "labels": { 260 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 261 | "spawner": "terminal-server", 262 | "class": "spawner" 263 | } 264 | }, 265 | "spec": { 266 | "strategy": { 267 | "type": "Recreate" 268 | }, 269 | "triggers": [ 270 | { 271 | "type": "ConfigChange" 272 | }, 273 | { 274 | "type": "ImageChange", 275 | "imageChangeParams": { 276 | "automatic": true, 277 | "containerNames": [ 278 | "spawner" 279 | ], 280 | "from": { 281 | "kind": "ImageStreamTag", 282 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner:latest" 283 | } 284 | } 285 | } 286 | ], 287 | "replicas": 1, 288 | "selector": { 289 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 290 | }, 291 | "template": { 292 | "metadata": { 293 | "labels": { 294 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 295 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 296 | "spawner": "terminal-server", 297 | "class": "spawner" 298 | } 299 | }, 300 | "spec": { 301 | "serviceAccountName": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 302 | "containers": [ 303 | { 304 | "name": "spawner", 305 | "image": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner:latest", 306 | "ports": [ 307 | { 308 | "containerPort": 8080, 309 | "protocol": "TCP" 310 | } 311 | ], 312 | "resources": { 313 | "limits": { 314 | "memory": "${SPAWNER_MEMORY}" 315 | }, 316 | "requests": { 317 | "memory": "${SPAWNER_MEMORY}" 318 | } 319 | }, 320 | "readinessProbe": { 321 | "httpGet": { 322 | "path": "/hub/api", 323 | "port": 8080, 324 | "scheme": "HTTP" 325 | }, 326 | "periodSeconds": 10, 327 | "successThreshold": 1, 328 | "failureThreshold": 3, 329 | "timeoutSeconds": 1 330 | }, 331 | "livenessProbe": { 332 | "httpGet": { 333 | "path": "/hub/api", 334 | "port": 8080, 335 | "scheme": "HTTP" 336 | }, 337 | "periodSeconds": 10, 338 | "successThreshold": 1, 339 | "failureThreshold": 3, 340 | "timeoutSeconds": 1 341 | }, 342 | "env": [ 343 | { 344 | "name": "CONFIGURATION_TYPE", 345 | "value": "terminal-server" 346 | }, 347 | { 348 | "name": "APPLICATION_NAME", 349 | "value": "${NAME_PREFIX}${WORKSHOP_NAME}" 350 | }, 351 | { 352 | "name": "WORKSHOP_NAME", 353 | "value": "${WORKSHOP_NAME}" 354 | }, 355 | { 356 | "name": "HOMEROOM_NAME", 357 | "value": "${HOMEROOM_NAME}" 358 | }, 359 | { 360 | "name": "HOMEROOM_LINK", 361 | "value": "${HOMEROOM_LINK}" 362 | }, 363 | { 364 | "name": "WORKSHOP_IMAGE", 365 | "value": "${WORKSHOP_IMAGE}" 366 | }, 367 | { 368 | "name": "WORKSHOP_MEMORY", 369 | "value": "${WORKSHOP_MEMORY}" 370 | }, 371 | { 372 | "name": "CONSOLE_MEMORY", 373 | "value": "${CONSOLE_MEMORY}" 374 | }, 375 | { 376 | "name": "CONSOLE_BRANDING", 377 | "value": "${CONSOLE_BRANDING}" 378 | }, 379 | { 380 | "name": "CONSOLE_IMAGE", 381 | "value": "${CONSOLE_IMAGE}" 382 | }, 383 | { 384 | "name": "VOLUME_SIZE", 385 | "value": "${VOLUME_SIZE}" 386 | }, 387 | { 388 | "name": "ADMIN_USERS", 389 | "value": "${ADMIN_USERS}" 390 | }, 391 | { 392 | "name": "OPENSHIFT_PROJECT", 393 | "value": "${OPENSHIFT_PROJECT}" 394 | }, 395 | { 396 | "name": "IDLE_TIMEOUT", 397 | "value": "${IDLE_TIMEOUT}" 398 | }, 399 | { 400 | "name": "DOWNLOAD_URL", 401 | "value": "${DOWNLOAD_URL}" 402 | }, 403 | { 404 | "name": "WORKSHOP_FILE", 405 | "value": "${WORKSHOP_FILE}" 406 | }, 407 | { 408 | "name": "OC_VERSION", 409 | "value": "${OC_VERSION}" 410 | }, 411 | { 412 | "name": "ODO_VERSION", 413 | "value": "${ODO_VERSION}" 414 | }, 415 | { 416 | "name": "KUBECTL_VERSION", 417 | "value": "${KUBECTL_VERSION}" 418 | }, 419 | { 420 | "name": "OAUTH_CLIENT_SECRET", 421 | "value": "${OAUTH_CLIENT_SECRET}" 422 | } 423 | ], 424 | "volumeMounts": [ 425 | { 426 | "mountPath": "/opt/app-root/data", 427 | "name": "data" 428 | }, 429 | { 430 | "name": "configs", 431 | "mountPath": "/opt/app-root/configs" 432 | } 433 | ] 434 | } 435 | ], 436 | "volumes": [ 437 | { 438 | "name": "data", 439 | "persistentVolumeClaim": { 440 | "claimName": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-data" 441 | } 442 | }, 443 | { 444 | "name": "configs", 445 | "configMap": { 446 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-configs", 447 | "defaultMode": 420 448 | } 449 | } 450 | ] 451 | } 452 | } 453 | } 454 | }, 455 | { 456 | "apiVersion": "v1", 457 | "kind": "PersistentVolumeClaim", 458 | "metadata": { 459 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner-data", 460 | "labels": { 461 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 462 | "spawner": "terminal-server", 463 | "class": "spawner" 464 | } 465 | }, 466 | "spec": { 467 | "accessModes": [ 468 | "ReadWriteOnce" 469 | ], 470 | "resources": { 471 | "requests": { 472 | "storage": "1Gi" 473 | } 474 | } 475 | } 476 | }, 477 | { 478 | "kind": "Service", 479 | "apiVersion": "v1", 480 | "metadata": { 481 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 482 | "labels": { 483 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 484 | "spawner": "terminal-server", 485 | "class": "spawner" 486 | } 487 | }, 488 | "spec": { 489 | "ports": [ 490 | { 491 | "name": "8080-tcp", 492 | "protocol": "TCP", 493 | "port": 8080, 494 | "targetPort": 8080 495 | }, 496 | { 497 | "name": "8081-tcp", 498 | "protocol": "TCP", 499 | "port": 8081, 500 | "targetPort": 8081 501 | } 502 | ], 503 | "selector": { 504 | "deploymentconfig": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner" 505 | } 506 | } 507 | }, 508 | { 509 | "kind": "Route", 510 | "apiVersion": "v1", 511 | "metadata": { 512 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 513 | "labels": { 514 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 515 | "spawner": "terminal-server", 516 | "class": "spawner" 517 | }, 518 | "annotations": { 519 | "kubernetes.io/tls-acme": "${LETS_ENCRYPT}", 520 | "homeroom/group": "${HOMEROOM_NAME}", 521 | "homeroom/title": "${WORKSHOP_TITLE}", 522 | "homeroom/description": "${WORKSHOP_DESCRIPTION}" 523 | } 524 | }, 525 | "spec": { 526 | "host": "${WORKSHOP_NAME}-${SPAWNER_NAMESPACE}.${CLUSTER_SUBDOMAIN}", 527 | "to": { 528 | "kind": "Service", 529 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-spawner", 530 | "weight": 100 531 | }, 532 | "port": { 533 | "targetPort": "8080-tcp" 534 | }, 535 | "tls": { 536 | "termination": "edge", 537 | "insecureEdgeTerminationPolicy": "Redirect" 538 | } 539 | } 540 | }, 541 | { 542 | "kind": "ImageStream", 543 | "apiVersion": "v1", 544 | "metadata": { 545 | "name": "${NAME_PREFIX}${WORKSHOP_NAME}-session", 546 | "labels": { 547 | "app": "${NAME_PREFIX}${WORKSHOP_NAME}", 548 | "spawner": "terminal-server", 549 | "class": "spawner" 550 | } 551 | }, 552 | "spec": { 553 | "lookupPolicy": { 554 | "local": true 555 | }, 556 | "tags": [ 557 | { 558 | "name": "latest", 559 | "from": { 560 | "kind": "DockerImage", 561 | "name": "quay.io/openshifthomeroom/workshop-terminal:3.4.3" 562 | } 563 | } 564 | ] 565 | } 566 | } 567 | ] 568 | } 569 | --------------------------------------------------------------------------------