├── .s2i ├── bin │ ├── assemble │ └── run └── environment ├── Dockerfile ├── LICENSE ├── README.md ├── build-configs └── jupyterhub.json ├── builder ├── assemble ├── image_metadata.json ├── run └── save-artifacts ├── image-streams └── jupyterhub.json ├── jupyterhub_config-workspace.py ├── jupyterhub_config-workspace.sh ├── jupyterhub_config.py ├── jupyterhub_config.sh ├── requirements.txt ├── scripts ├── backup-user-details ├── backup-user-details.py ├── cull-idle-servers ├── cull-idle-servers.py └── wait-for-database ├── start-jupyterhub.sh └── templates ├── jupyterhub-builder.json ├── jupyterhub-deployer.json ├── jupyterhub-quickstart.json └── jupyterhub-workspace.json /.s2i/bin/assemble: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | set -x 6 | 7 | # Ensure we are using the latest pip and wheel packages. 8 | 9 | pip install -U pip setuptools wheel 10 | 11 | # Install base packages needed for running JupyterHub. 12 | 13 | pip install -r /tmp/src/requirements.txt 14 | 15 | # Install npm packages required by JupyterHub. 16 | 17 | echo " -----> Installing npm packages." 18 | 19 | npm install -g configurable-http-proxy 20 | 21 | # Copy global JupyterHub config file into place. 22 | 23 | cp /tmp/src/jupyterhub_config.py /opt/app-root/etc/ 24 | cp /tmp/src/jupyterhub_config.sh /opt/app-root/etc/ 25 | 26 | cp /tmp/src/jupyterhub_config-*.py /opt/app-root/etc/ 27 | cp /tmp/src/jupyterhub_config-*.sh /opt/app-root/etc/ 28 | 29 | # This S2I assemble script is only used when creating the custom image. 30 | # For when running the image, or using it as a S2I builder, we use a second 31 | # set of custom S2I scripts. We now need to move these into the correct 32 | # location and have the custom image use those by dropping in an image 33 | # metadata file which overrides the labels of the base image. 34 | 35 | mkdir -p /tmp/.s2i 36 | 37 | mv /tmp/src/builder/image_metadata.json /tmp/.s2i/image_metadata.json 38 | 39 | mv /tmp/src/builder /opt/app-root/builder 40 | 41 | # Also move into place helper scripts and the script to start JupyterHub. 42 | 43 | mv /tmp/src/scripts/* /opt/app-root/bin/ 44 | 45 | mv /tmp/src/start-jupyterhub.sh /opt/app-root/bin/ 46 | 47 | rm -rf /tmp/src 48 | 49 | # Scripts used to be kept in /opt/app-root/scripts but are now in the 50 | # directory /opt/app-root/bin. Create a symlink for wait-for-database 51 | # for now until any templates running script from old location are 52 | # purged. 53 | 54 | mkdir -p /opt/app-root/scripts 55 | 56 | ln -s /opt/app-root/bin/wait-for-database /opt/app-root/scripts/wait-for-database 57 | 58 | # Ensure passwd/group file intercept happens for any shell environment. 59 | 60 | echo "source /opt/app-root/etc/generate_container_user" >> /opt/app-root/etc/scl_enable 61 | 62 | # Create additional directories. 63 | 64 | echo " -----> Creating additional directories." 65 | 66 | mkdir -p /opt/app-root/data 67 | 68 | # Fixup permissions on directories and files. 69 | 70 | fix-permissions /opt/app-root 71 | -------------------------------------------------------------------------------- /.s2i/bin/run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | # Execute the run script from the customised builder. 6 | 7 | exec /opt/app-root/builder/run 8 | -------------------------------------------------------------------------------- /.s2i/environment: -------------------------------------------------------------------------------- 1 | NPM_CONFIG_PREFIX=/opt/app-root 2 | PYTHONPATH=/opt/app-root/src 3 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos/python-36-centos7:latest 2 | 3 | LABEL io.k8s.display-name="JupyterHub" \ 4 | io.k8s.description="JupyterHub." \ 5 | io.openshift.tags="builder,python,jupyterhub" \ 6 | io.openshift.s2i.scripts-url="image:///opt/app-root/builder" 7 | 8 | USER root 9 | 10 | COPY . /tmp/src 11 | 12 | RUN rm -rf /tmp/src/.git* && \ 13 | chown -R 1001 /tmp/src && \ 14 | chgrp -R 0 /tmp/src && \ 15 | chmod -R g+w /tmp/src && \ 16 | mv /tmp/src/.s2i/bin /tmp/scripts 17 | 18 | USER 1001 19 | 20 | ENV NPM_CONFIG_PREFIX=/opt/app-root \ 21 | PYTHONPATH=/opt/app-root/src 22 | 23 | RUN /tmp/scripts/assemble 24 | 25 | CMD [ "/opt/app-root/builder/run" ] 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | JupyterHub for OpenShift 2 | ======================== 3 | 4 | This repository contains software to make it easier to host Jupyter Notebooks on OpenShift using JupyterHub. 5 | 6 | OpenShift, being a Kubernetes distribution, you can use the [JupyterHub deployment method for Kubernetes](http://zero-to-jupyterhub.readthedocs.io/) created by the Jupyter project team. That deployment method relies on using Helm templates to manage deployment. The use of Helm, and that Kubernetes is a platform for IT operations, means it isn't as easy to deploy by end users as it could be. This repository aims to provide a much easier way of deploying JupyterHub to OpenShift which makes better use of OpenShift specific features, including OpenShift templates, and Source-to-Image (S2I) builders. The result is a method for deploying JupyterHub to OpenShift which doesn't require any special admin privileges to the underlying Kubernetes cluster, or OpenShift. As long as a user has the necessary quotas for memory, CPU and persistent storage, they can deploy JupyterHub themselves. 7 | 8 | Use a stable version of this repository 9 | --------------------------------------- 10 | 11 | When using this repository, and the parallel ``jupyter-notebooks`` repository, unless you are participating in the development and testing of the images produced from these repositories, always use a tagged version. Do not use master or development branches as your builds or deployments could break across versions. 12 | 13 | You should therefore always use any files for creating images or templates from the required tagged version. These will reference the appropriate version. If you have created your own resource definitions to build from the repository, ensure that the ref field of the Git settings for the build refers to the desired version. 14 | 15 | Preparing the Jupyter Images 16 | ---------------------------- 17 | 18 | The first step in deploying JupyterHub is to prepare a notebook image and the image for JupyterHub. 19 | 20 | You can use the official Jupyter project ``docker-stacks`` images, but some extra configuration is required to use those as they will not work out of the box with OpenShift. Details on how to use the Jupyter project images is described later. 21 | 22 | To load an image stream definition for a minimal Jupyter notebook image designed to run in OpenShift, run: 23 | 24 | ``` 25 | oc apply -f https://raw.githubusercontent.com/jupyter-on-openshift/jupyter-notebooks/master/image-streams/s2i-minimal-notebook.json 26 | ``` 27 | 28 | An image stream named ``s2i-minimal-notebook`` should be created in your project, with tags ``3.5`` and ``3.6``, corresponding to Python 3.5 and 3.6 variants of the notebook image. This image is based on CentOS. 29 | 30 | For more detailed instructions on creating the minimal notebook image, including how to build it from source code or using a RHEL base image, as well as how to create custom notebook images, read: 31 | 32 | * https://github.com/jupyter-on-openshift/jupyter-notebooks 33 | 34 | To load the JupyterHub image, next run: 35 | 36 | ``` 37 | oc apply -f https://raw.githubusercontent.com/jupyter-on-openshift/jupyterhub-quickstart/master/image-streams/jupyterhub.json 38 | ``` 39 | 40 | An image stream named ``jupyterhub`` should be created in your project, with a tag corresponding to whatever is the latest version. This image is also based on CentOS. 41 | 42 | If you are using OpenShift Container Platform, and need to instead build a RHEL based version of the JupyterHub image, you can use the command: 43 | 44 | ``` 45 | oc apply -f https://raw.githubusercontent.com/jupyter-on-openshift/jupyterhub-quickstart/master/build-configs/jupyterhub.json 46 | ``` 47 | 48 | Use one or the other method. Do not load the image stream and try and create a build config to create it, at the same time. 49 | 50 | Loading the JupyterHub Templates 51 | -------------------------------- 52 | 53 | To make it easier to deploy JupyterHub in OpenShift, templates are provided. To load the templates run: 54 | 55 | ``` 56 | oc apply -f https://raw.githubusercontent.com/jupyter-on-openshift/jupyterhub-quickstart/master/templates/jupyterhub-builder.json 57 | oc apply -f https://raw.githubusercontent.com/jupyter-on-openshift/jupyterhub-quickstart/master/templates/jupyterhub-deployer.json 58 | oc apply -f https://raw.githubusercontent.com/jupyter-on-openshift/jupyterhub-quickstart/master/templates/jupyterhub-quickstart.json 59 | oc apply -f https://raw.githubusercontent.com/jupyter-on-openshift/jupyterhub-quickstart/master/templates/jupyterhub-workspace.json 60 | ``` 61 | 62 | This should result in the creation of the templates ``jupyterhub-builder``, ``jupyterhub-deployer``, ``jupyterhub-quickstart`` and ``jupyterhub-workspace``. 63 | 64 | Creating the JupyterHub Deployment 65 | ---------------------------------- 66 | 67 | To deploy JupyterHub with the default configuration, which will provide you a deployment similar to ``tmpnb.org``, and using the ``s2i-minimal-notebook:3.6`` image, run: 68 | 69 | ``` 70 | oc new-app --template jupyterhub-deployer 71 | ``` 72 | 73 | This deployment requires a single persistent volume of size 1Gi for use by the PostgreSQL database deployed along with JupyterHub. The notebooks which will be deployed will use ephemeral storage. 74 | 75 | To monitor progress as the deployment occurs run: 76 | 77 | ``` 78 | oc rollout status dc/jupyterhub 79 | ``` 80 | 81 | To view the hostname assigned to the JupyterHub instance by OpenShift, run: 82 | 83 | ``` 84 | oc get route/jupyterhub 85 | ``` 86 | 87 | Access the host from a browser and a Jupyter notebook instance will be automatically started for you. Access the site using a different browser, or from a different computer, and you should get a second Jupyter notebook instance, separate to the first. 88 | 89 | To see a list of the pods corresponding to the notebook instances, run: 90 | 91 | ``` 92 | oc get pods --selector app=jupyterhub,component=singleuser-server 93 | ``` 94 | 95 | This should yield results similar to: 96 | 97 | ``` 98 | NAME READY STATUS RESTARTS AGE 99 | jupyterhub-nb-5b7eac5d-2da834-2d4219-2dac19-2dad7f2ee00e30 1/1 Running 0 5m 100 | ``` 101 | 102 | Note that the first notebook instance deployed may be slow to start up as the notebook image may need to be pulled down from the image registry. 103 | 104 | As this configuration doesn't provide access to the admin panel in JupyterHub, you can forcibly stop a notebook instance by running ``oc delete pod`` on the specific pod instance. 105 | 106 | To delete the JupyterHub instance along with all notebook instances, run: 107 | 108 | ``` 109 | oc delete all,configmap,pvc,serviceaccount,rolebinding --selector app=jupyterhub 110 | ``` 111 | 112 | Deploying with a Custom Notebook Image 113 | -------------------------------------- 114 | 115 | To deploy JupyterHub and have it build a custom notebook image for you, run: 116 | 117 | ``` 118 | oc new-app --template jupyterhub-quickstart \ 119 | --param APPLICATION_NAME=jakevdp \ 120 | --param GIT_REPOSITORY_URL=https://github.com/jakevdp/PythonDataScienceHandbook \ 121 | --param BUILDER_IMAGE=s2i-minimal-notebook:3.5 122 | ``` 123 | 124 | The ``s2i-minimal-notebook:3.5`` builder image is used in this specific case instead of the default ``s2i-minimal-notebook:3.6`` build image, as the repository being used as input to the S2I build only supports Python 3.5. 125 | 126 | The notebook image will be built in parallel to JupyterHub being deployed. You will need to wait until the build of the image has completed before you can visit JupyterHub the first time. You can monitor the build of the image using the command: 127 | 128 | ``` 129 | oc logs bc/jakevdp-nb --follow 130 | ``` 131 | 132 | To deploy JupyterHub using a custom notebook image you had already created, run: 133 | 134 | ``` 135 | oc new-app --template jupyterhub-deployer \ 136 | --param APPLICATION_NAME=jakevdp \ 137 | --param NOTEBOOK_IMAGE=jakevdp-nb:latest 138 | ``` 139 | 140 | Because ``APPLICATION_NAME`` was supplied, the JupyterHub instance and notebooks in this case will all be labelled with ``jakevdp``. 141 | 142 | To get the hostname assigned for the JupyterHub instance, run: 143 | 144 | ``` 145 | oc get route/jakevdp 146 | ``` 147 | 148 | To delete the JupyterHub instance along with all notebook instances, run: 149 | 150 | ``` 151 | oc delete all,configmap,pvc,serviceaccount,rolebinding --selector app=jakevdp 152 | ``` 153 | 154 | Using the OpenShift Web Console 155 | ------------------------------- 156 | 157 | JupyterHub can also be deployed from the web console by selecting _Select from Project_ from the _Add to Project_ menu, filtering on _jupyter_ and choosing the appropriate template. 158 | 159 | Customising the JupyterHub Deployment 160 | ------------------------------------- 161 | 162 | JupyterHub, and how notebook images are deployed, can be customised through a ``jupyterhub_config.py`` file. The JupyterHub image created from this repository has a default version of this file which sets a number of defaults required for running JupyterHub in OpenShift. You can provide your own customisations, including overriding any defaults, in a couple of ways. 163 | 164 | The first is that when using the supplied templates to deploy JupyterHub, you can provide your own configuration through the ``JUPYTERHUB_CONFIG`` template parameter. This configuration will be read after the default configuration, with any settings being merged with the existing settings. 165 | 166 | The second is to use the JupyterHub image built from this repository as an S2I builder, to incorporate your own ``jupyterhub_config.py`` file from a hosted Git repository, or local directory if using a binary input build. This will be merged with the default settings before any configuration supplied via ``JUPYTERHUB_CONFIG`` when using a template to deploy the JupyterHub image. 167 | 168 | When using an S2I build, the repository can include any additional files to be incorporated into the JupyterHub image which may be needed for your customisations. This includes being able to supply a ``requirements.txt`` file for additional Python packages to be installed, as may be required by an authenticator to be used with JupyterHub. 169 | 170 | To illustrate overriding the configuration when deploying JupyterHub using the quick start template, create a local file ``jupyterhub_config.py`` which contains: 171 | 172 | ``` 173 | c.KubeSpawner.start_timeout = 180 174 | c.KubeSpawner.http_timeout = 120 175 | ``` 176 | 177 | Deploy JupyterHub using the quick start template as was done previously, but this time set the ``JUPYTERHUB_CONFIG`` template parameter. 178 | 179 | ``` 180 | oc new-app --template jupyterhub-quickstart \ 181 | --param APPLICATION_NAME=jakevdp \ 182 | --param GIT_REPOSITORY_URL=https://github.com/jakevdp/PythonDataScienceHandbook \ 183 | --param JUPYTERHUB_CONFIG="`cat jupyterhub_config.py`" 184 | ``` 185 | 186 | If you need to edit the configuration after the deployment has been made, you can edit the config map which was created: 187 | 188 | ``` 189 | oc edit configmap/jakevdp-cfg 190 | ``` 191 | 192 | JupyterHub only reads the configuration on startup, so trigger a new deployment of JupyterHub. 193 | 194 | ``` 195 | oc rollout latest dc/jakevdp 196 | ``` 197 | 198 | Note that triggering a new deployment will result in any running notebook instances being shutdown, and users will need to start up a new notebook instance through the JupyterHub interface. 199 | 200 | Providing a Selection of Images to Deploy 201 | ----------------------------------------- 202 | 203 | When deploying JupyterHub using the templates, the ``NOTEBOOK_IMAGE`` template parameter is used to specify the name of the image which is to be deployed when starting an instance for a user. If you want to provide users a choice of images you will need to define what is called a profile list in the configuration. The list of images will be presented in a drop down menu when the user requests a notebook instance be started through the JupyterHub web interface. 204 | 205 | ``` 206 | c.KubeSpawner.profile_list = [ 207 | { 208 | 'display_name': 'Minimal Notebook (CentOS 7 / Python 3.5)', 209 | 'kubespawner_override': { 210 | 'image_spec': 's2i-minimal-notebook:3.5' 211 | } 212 | }, 213 | { 214 | 'display_name': 'Minimal Notebook (CentOS 7 / Python 3.6)', 215 | 'default': True, 216 | 'kubespawner_override': { 217 | 'image_spec': 's2i-minimal-notebook:3.6' 218 | } 219 | } 220 | ] 221 | ``` 222 | 223 | This will override any image defined by the ``NOTEBOOK_IMAGE`` template parameter. 224 | 225 | For further information on using the profile list configuration see the ``KubeSpawner`` documentation. 226 | 227 | * https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html 228 | 229 | Using the Jupyter Project Notebook Images 230 | ----------------------------------------- 231 | 232 | The official Jupyter Project notebook images: 233 | 234 | * jupyter/base-notebook 235 | * jupyter/r-notebook 236 | * jupyter/minimal-notebook 237 | * jupyter/scipy-notebook 238 | * jupyter/tensorflow-notebook 239 | * jupyter/datascience-notebook 240 | * jupyter/pyspark-notebook 241 | * jupyter/all-spark-notebook 242 | 243 | will not work out of the box with OpenShift. This is because they have not been designed to work with an arbitrarily assigned user ID without additional configuration. The images are also very large and the size exceeds what can be deployed to hosted OpenShift environments such as OpenShift Online. 244 | 245 | If you still want to run the official Jupyter Project notebook images, you can, but you will need to supply additional configuration to the ``KubeSpawner`` plugin for these images to have them work. For example: 246 | 247 | ``` 248 | c.KubeSpawner.profile_list = [ 249 | { 250 | 'display_name': 'Jupyter Project - Minimal Notebook', 251 | 'default': True, 252 | 'kubespawner_override': { 253 | 'image_spec': 'docker.io/jupyter/minimal-notebook:latest', 254 | 'supplemental_gids': [100] 255 | } 256 | }, 257 | { 258 | 'display_name': 'Jupyter Project - Scipy Notebook', 259 | 'kubespawner_override': { 260 | 'image_spec': 'docker.io/jupyter/scipy-notebook:latest', 261 | 'supplemental_gids': [100] 262 | } 263 | }, 264 | { 265 | 'display_name': 'Jupyter Project - Tensorflow Notebook', 266 | 'kubespawner_override': { 267 | 'image_spec': 'docker.io/jupyter/tensorflow-notebook:latest', 268 | 'supplemental_gids': [100] 269 | }, 270 | } 271 | ] 272 | ``` 273 | 274 | The special setting is ``supplemental_gids``, with it needing to be set to include the UNIX group ID of ``100``. 275 | 276 | If you want to set this globally for all images in place of defining it for each image, or you were not providing a choice of image, you could instead set: 277 | 278 | ``` 279 | c.KubeSpawner.supplemental_gids = [100] 280 | ``` 281 | 282 | Because of the size of these images, you may need to set a higher value for the spawner ``start_timeout`` setting to ensure starting a notebook instance from the image doesn't fail the first time a new node in the cluster is used for that image. Alternatively, you could have a cluster administrator pre-pull images to each node in the cluster. 283 | 284 | Enabling the JupyterLab Interface 285 | --------------------------------- 286 | 287 | By default Jupyter Notebook images still use the classic web interface by default. If you want to enable the newer JupyterLab web interface set the ``JUPYTER_ENABLE_LAB`` environment variable. 288 | 289 | ``` 290 | c.KubeSpawner.environment = { 'JUPYTER_ENABLE_LAB': 'true' } 291 | ``` 292 | 293 | If using a profile list and only want the JupyterLab interface enabled for certain images, add an ``environment`` setting to the dictionary of settings for just that image. 294 | 295 | ``` 296 | c.KubeSpawner.profile_list = [ 297 | { 298 | 'display_name': 'Minimal Notebook (Classic)', 299 | 'default': True, 300 | 'kubespawner_override': { 301 | 'image_spec': 's2i-minimal-notebook:3.6' 302 | } 303 | }, 304 | { 305 | 'display_name': 'Minimal Notebook (JupyterLab)', 306 | 'kubespawner_override': { 307 | 'image_spec': 's2i-minimal-notebook:3.6', 308 | 'environment': { 'JUPYTER_ENABLE_LAB': 'true' } 309 | } 310 | } 311 | ] 312 | ``` 313 | 314 | Controlling who can Access JupyterHub 315 | ------------------------------------- 316 | 317 | When the templates are used to deploy JupyterHub, anyone will be able to access it and create a notebook instance. To provide access to only selected users, you will need to define an authenticator as part of the JupyterHub configuration. For example, if using GitHub as an OAuth provider, you would use: 318 | 319 | ``` 320 | from oauthenticator.github import GitHubOAuthenticator 321 | c.JupyterHub.authenticator_class = GitHubOAuthenticator 322 | 323 | c.GitHubOAuthenticator.oauth_callback_url = 'https:///hub/oauth_callback' 324 | c.GitHubOAuthenticator.client_id = 'your-client-key-from-github' 325 | c.GitHubOAuthenticator.client_secret = 'your-client-secret-from-github' 326 | 327 | c.Authenticator.admin_users = {'your-github-username'} 328 | c.Authenticator.whitelist = {'user1', 'user2', 'user3', 'user4'} 329 | ``` 330 | 331 | The ``oauthenticator`` package is installed by default and includes a number of commonly used authenticators. If you need to use a third party authenticator which requires additional Python packages to be installed, you will need to use the JupyterHub image as an S2I builder, where the source it is applied to includes a ``requirements.txt`` file including the list of additional Python packages to install. This will create a custom JupyterHub image which you can then deploy by overriding the ``JUPYTERHUB_IMAGE`` template parameter. 332 | 333 | Allocating Persistent Storage to Users 334 | -------------------------------------- 335 | 336 | When a notebook instance is created and a user creates their own notebooks if the instance is stopped they will loose any work they have done. 337 | 338 | To avoid this, you can configure JupyterHub to make a persistent volume claim and mount storage into the containers when a notebook instance is run. 339 | 340 | For the S2I enabled notebook images built previously, where the working directory when the notebook is run is ``/opt/app-root/src``, you can add the following to the JupyterHub configuration. 341 | 342 | ``` 343 | c.KubeSpawner.user_storage_pvc_ensure = True 344 | 345 | c.KubeSpawner.pvc_name_template = '%s-nb-{username}' % c.KubeSpawner.hub_connect_ip 346 | c.KubeSpawner.user_storage_capacity = '1Gi' 347 | 348 | c.KubeSpawner.volumes = [ 349 | { 350 | 'name': 'data', 351 | 'persistentVolumeClaim': { 352 | 'claimName': c.KubeSpawner.pvc_name_template 353 | } 354 | } 355 | ] 356 | 357 | c.KubeSpawner.volume_mounts = [ 358 | { 359 | 'name': 'data', 360 | 'mountPath': '/opt/app-root/src' 361 | } 362 | ] 363 | ``` 364 | 365 | If you are presenting to users a list of images they can choose, if necessary you can add the spawner settings on selected images, and use a different mount path for the persistent volume if necessary. 366 | 367 | Note that you should only use persistent storage when you are also using an authenticator and you know you have enough persistent volumes available to satisfy the needs of all potential users. This is because once a persistent volume is claimed and associated with a user, it is retained, even if the users notebook instance was shut down. If you want to reclaim persistent volumes, you will need to delete them manually using ``oc delete pvc``. 368 | 369 | Also be aware that when you mount a persistent volume into a container, it will hide anything that was in the directory it is mounted on. If the working directory for the notebook in the image was pre-populated with files from an S2I build, these will be hidden if you use the same directory. When ``/opt/app-root/src`` is used as the mount point, only notebooks and other files create will be preserved. If you install additional Python packages, these will be lost when the notebook is shutdown, and you will need to reinstall them. 370 | 371 | If you want to be able to pre-populate the persistent volume with notebooks and other files from the S2I built image, you can use the following configuration. This will also preserve additional Python packages which you might install. 372 | 373 | ``` 374 | c.KubeSpawner.user_storage_pvc_ensure = True 375 | 376 | c.KubeSpawner.pvc_name_template = '%s-nb-{username}' % c.KubeSpawner.hub_connect_ip 377 | c.KubeSpawner.user_storage_capacity = '1Gi' 378 | 379 | c.KubeSpawner.volumes = [ 380 | { 381 | 'name': 'data', 382 | 'persistentVolumeClaim': { 383 | 'claimName': c.KubeSpawner.pvc_name_template 384 | } 385 | } 386 | ] 387 | 388 | c.KubeSpawner.volume_mounts = [ 389 | { 390 | 'name': 'data', 391 | 'mountPath': '/opt/app-root', 392 | 'subPath': 'app-root' 393 | } 394 | ] 395 | 396 | c.KubeSpawner.singleuser_init_containers = [ 397 | { 398 | 'name': 'setup-volume', 399 | 'image': 's2i-minimal-notebook:3.6', 400 | 'command': [ 401 | 'setup-volume.sh', 402 | '/opt/app-root', 403 | '/mnt/app-root' 404 | ], 405 | 'resources': { 406 | 'limits': { 407 | 'memory': '256Mi' 408 | } 409 | }, 410 | 'volumeMounts': [ 411 | { 412 | 'name': 'data', 413 | 'mountPath': '/mnt' 414 | } 415 | ] 416 | } 417 | ] 418 | ``` 419 | 420 | Because the Python virtual environment and installed packages are kept in the persistent volume in this case, you will need to ensure that you have adequate space in the persistent volume and may need to increase the requested storage capacity. 421 | 422 | Culling Idle Notebook Instances 423 | ------------------------------- 424 | 425 | When a notebook instance is created for a user, they will keep running until the user stops it, or OpenShift decides for some reason to stop them. In the latter, if the user was still using it, they would need to start it up again as notebook images will not be automatically restarted. 426 | 427 | If you have many more users using the JupyterHub instance than you have memory and CPU resources, but you know not all users will use it at the same time, that is okay, so long as you shut down notebook instances when they have been idle, to free up resources. 428 | 429 | To add culling of idle notebook instances, add to the JupyterHub configuration: 430 | 431 | ``` 432 | c.JupyterHub.services = [ 433 | { 434 | 'name': 'cull-idle', 435 | 'admin': True, 436 | 'command': ['cull-idle-servers', '--timeout=300'], 437 | } 438 | ] 439 | ``` 440 | 441 | The ``cull-idle-servers`` program is provided with the JupyterHub image. Adjust the value for the timeout argument as necessary. 442 | 443 | Multi User Developer Workspace 444 | ------------------------------ 445 | 446 | The ``jupyterhub-workspace`` template combines a number of the above configuration options into one template. These include: 447 | 448 | * Authentication of users using OpenShift cluster OAuth provider. 449 | * Optional specification of whitelisted users, including those who are admins. 450 | * Optional allocation of a persistent storage volume for each user. 451 | * Optional culling of idle sessions. 452 | 453 | Note that the template can only be used with Jupyter notebook images based on the ``s2i-minimal-notebook`` images. You cannot use official images from the Jupyter Project. 454 | 455 | The ``jupyterhub-workspace`` template can only be deployed by a cluster admin, as it needs to create an ``oauthclient`` resource definition, which requires cluster admin access. 456 | 457 | You will also need to supply template arguments giving the sub domain used for 458 | the cluster for hosting applications, and the name of the project the instance is 459 | being deployed to. 460 | 461 | To deploy the template and provide persistent storage and idle session culling you can use: 462 | 463 | ``` 464 | oc new-app --template jupyterhub-workspace --param CLUSTER_SUBDOMAIN=A.B.C.D.nip.io --SPAWNER_NAMESPACE=`oc project --short` --param VOLUME_SIZE=1Gi --param IDLE_TIMEOUT=3600 465 | ``` 466 | 467 | To delete the deployment first use: 468 | 469 | ``` 470 | oc delete all,configmap,pvc,serviceaccount,rolebinding --selector app=jupyterhub 471 | ``` 472 | 473 | You then need to delete the ``oauthclient`` resource. Because this is a global resource, verify you are deleting the correct resource first by running: 474 | 475 | ``` 476 | oc get oauthclient --selector app=jupyterhub 477 | ``` 478 | 479 | If it is correct, then delete it using: 480 | 481 | ``` 482 | oc delete oauthclient --selector app=jupyterhub 483 | ``` 484 | 485 | If there is more than one resource matching the label selector, delete by name the one corresponding to the project you created the deployment in. The project name will be part of the resource name. 486 | -------------------------------------------------------------------------------- /build-configs/jupyterhub.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "List", 3 | "apiVersion": "v1", 4 | "items": [ 5 | { 6 | "kind": "ImageStream", 7 | "apiVersion": "image.openshift.io/v1", 8 | "metadata": { 9 | "name": "jupyterhub", 10 | "labels": { 11 | "build": "jupyterhub" 12 | } 13 | }, 14 | "spec": { 15 | "lookupPolicy": { 16 | "local": true 17 | } 18 | } 19 | }, 20 | { 21 | "kind": "BuildConfig", 22 | "apiVersion": "build.openshift.io/v1", 23 | "metadata": { 24 | "name": "jupyterhub", 25 | "labels": { 26 | "build": "jupyterhub" 27 | } 28 | }, 29 | "spec": { 30 | "triggers": [ 31 | { 32 | "type": "ConfigChange" 33 | }, 34 | { 35 | "type": "ImageChange" 36 | } 37 | ], 38 | "source": { 39 | "type": "Git", 40 | "git": { 41 | "uri": "https://github.com/jupyter-on-openshift/jupyterhub-quickstart.git", 42 | "ref": "3.4.0" 43 | } 44 | }, 45 | "strategy": { 46 | "type": "Source", 47 | "sourceStrategy": { 48 | "from": { 49 | "kind": "ImageStreamTag", 50 | "name": "python:3.6", 51 | "namespace": "openshift" 52 | } 53 | } 54 | }, 55 | "output": { 56 | "to": { 57 | "kind": "ImageStreamTag", 58 | "name": "jupyterhub:3.4.0" 59 | } 60 | } 61 | } 62 | } 63 | ] 64 | } 65 | -------------------------------------------------------------------------------- /builder/assemble: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | set -eo pipefail 6 | 7 | /usr/libexec/s2i/assemble 8 | -------------------------------------------------------------------------------- /builder/image_metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "labels": [ 3 | {"io.k8s.display-name":"JupyterHub"}, 4 | {"io.k8s.description":"JupyterHub."}, 5 | {"io.openshift.s2i.scripts-url":"image:///opt/app-root/builder"}, 6 | {"io.s2i.scripts-url":"image:///opt/app-root/builder"}, 7 | {"io.openshift.expose-services":"8080:http,8081:http"}, 8 | {"io.openshift.tags":"builder,python,jupyter"} 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /builder/run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | # Start the JupyterHub instance. 6 | 7 | trap 'kill -TERM $PID' TERM INT 8 | 9 | start-jupyterhub.sh & 10 | 11 | PID=$! 12 | wait $PID 13 | trap - TERM INT 14 | wait $PID 15 | STATUS=$? 16 | exit $STATUS 17 | -------------------------------------------------------------------------------- /builder/save-artifacts: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | true 4 | -------------------------------------------------------------------------------- /image-streams/jupyterhub.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "List", 3 | "apiVersion": "v1", 4 | "items": [ 5 | { 6 | "kind": "ImageStream", 7 | "apiVersion": "image.openshift.io/v1", 8 | "metadata": { 9 | "name": "jupyterhub" 10 | }, 11 | "spec": { 12 | "lookupPolicy": { 13 | "local": true 14 | }, 15 | "tags": [ 16 | { 17 | "name": "3.4.0", 18 | "from": { 19 | "kind": "DockerImage", 20 | "name": "quay.io/jupyteronopenshift/jupyterhub:3.4.0" 21 | } 22 | } 23 | ] 24 | } 25 | } 26 | ] 27 | } 28 | -------------------------------------------------------------------------------- /jupyterhub_config-workspace.py: -------------------------------------------------------------------------------- 1 | # Authenticate users against OpenShift OAuth provider. 2 | 3 | c.JupyterHub.authenticator_class = "openshift" 4 | 5 | from oauthenticator.openshift import OpenShiftOAuthenticator 6 | OpenShiftOAuthenticator.scope = ['user:full'] 7 | 8 | client_id = '%s-%s-users' % (application_name, namespace) 9 | client_secret = os.environ['OAUTH_CLIENT_SECRET'] 10 | 11 | c.OpenShiftOAuthenticator.client_id = client_id 12 | c.OpenShiftOAuthenticator.client_secret = client_secret 13 | c.Authenticator.enable_auth_state = True 14 | 15 | c.CryptKeeper.keys = [ client_secret.encode('utf-8') ] 16 | 17 | c.OpenShiftOAuthenticator.oauth_callback_url = ( 18 | 'https://%s/hub/oauth_callback' % public_hostname) 19 | 20 | # Add any additional JupyterHub configuration settings. 21 | 22 | c.KubeSpawner.extra_labels = { 23 | 'spawner': 'workspace', 24 | 'class': 'session', 25 | 'user': '{username}' 26 | } 27 | 28 | # Set up list of registered users and any users nominated as admins. 29 | 30 | if os.path.exists('/opt/app-root/configs/admin_users.txt'): 31 | with open('/opt/app-root/configs/admin_users.txt') as fp: 32 | content = fp.read().strip() 33 | if content: 34 | c.Authenticator.admin_users = set(content.split()) 35 | 36 | if os.path.exists('/opt/app-root/configs/user_whitelist.txt'): 37 | with open('/opt/app-root/configs/user_whitelist.txt') as fp: 38 | c.Authenticator.whitelist = set(fp.read().strip().split()) 39 | 40 | # For workshops we provide each user with a persistent volume so they 41 | # don't loose their work. This is mounted on /opt/app-root, so we need 42 | # to copy the contents from the image into the persistent volume the 43 | # first time using an init container. 44 | 45 | volume_size = os.environ.get('JUPYTERHUB_VOLUME_SIZE') 46 | 47 | if volume_size: 48 | c.KubeSpawner.pvc_name_template = c.KubeSpawner.pod_name_template 49 | 50 | c.KubeSpawner.storage_pvc_ensure = True 51 | 52 | c.KubeSpawner.storage_capacity = volume_size 53 | 54 | c.KubeSpawner.storage_access_modes = ['ReadWriteOnce'] 55 | 56 | c.KubeSpawner.volumes.extend([ 57 | { 58 | 'name': 'data', 59 | 'persistentVolumeClaim': { 60 | 'claimName': c.KubeSpawner.pvc_name_template 61 | } 62 | } 63 | ]) 64 | 65 | c.KubeSpawner.volume_mounts.extend([ 66 | { 67 | 'name': 'data', 68 | 'mountPath': '/opt/app-root', 69 | 'subPath': 'workspace' 70 | } 71 | ]) 72 | 73 | c.KubeSpawner.init_containers.extend([ 74 | { 75 | 'name': 'setup-volume', 76 | 'image': '%s' % c.KubeSpawner.image_spec, 77 | 'command': [ 78 | '/opt/app-root/bin/setup-volume.sh', 79 | '/opt/app-root', 80 | '/mnt/workspace' 81 | ], 82 | "resources": { 83 | "limits": { 84 | "memory": os.environ.get('NOTEBOOK_MEMORY', '128Mi') 85 | }, 86 | "requests": { 87 | "memory": os.environ.get('NOTEBOOK_MEMORY', '128Mi') 88 | } 89 | }, 90 | 'volumeMounts': [ 91 | { 92 | 'name': 'data', 93 | 'mountPath': '/mnt' 94 | } 95 | ] 96 | } 97 | ]) 98 | 99 | # Make modifications to pod based on user and type of session. 100 | 101 | from tornado import gen 102 | 103 | @gen.coroutine 104 | def modify_pod_hook(spawner, pod): 105 | pod.spec.automount_service_account_token = True 106 | 107 | # Grab the OpenShift user access token from the login state. 108 | 109 | auth_state = yield spawner.user.get_auth_state() 110 | access_token = auth_state['access_token'] 111 | 112 | # Set the session access token from the OpenShift login. 113 | 114 | pod.spec.containers[0].env.append( 115 | dict(name='OPENSHIFT_TOKEN', value=access_token)) 116 | 117 | # See if a template for the project name has been specified. 118 | # Try expanding the name, substituting the username. If the 119 | # result is different then we use it, not if it is the same 120 | # which would suggest it isn't unique. 121 | 122 | project = os.environ.get('OPENSHIFT_PROJECT') 123 | 124 | if project: 125 | name = project.format(username=spawner.user.name) 126 | if name != project: 127 | pod.spec.containers[0].env.append( 128 | dict(name='PROJECT_NAMESPACE', value=name)) 129 | 130 | # Ensure project is created if it doesn't exist. 131 | 132 | pod.spec.containers[0].env.append( 133 | dict(name='OPENSHIFT_PROJECT', value=name)) 134 | 135 | return pod 136 | 137 | c.KubeSpawner.modify_pod_hook = modify_pod_hook 138 | 139 | # Setup culling of terminal instances if timeout parameter is supplied. 140 | 141 | idle_timeout = os.environ.get('JUPYTERHUB_IDLE_TIMEOUT') 142 | 143 | if idle_timeout and int(idle_timeout): 144 | cull_idle_servers_cmd = ['/opt/app-root/bin/cull-idle-servers'] 145 | 146 | cull_idle_servers_cmd.append('--timeout=%s' % idle_timeout) 147 | 148 | c.JupyterHub.services.extend([ 149 | { 150 | 'name': 'cull-idle', 151 | 'admin': True, 152 | 'command': cull_idle_servers_cmd, 153 | } 154 | ]) 155 | -------------------------------------------------------------------------------- /jupyterhub_config-workspace.sh: -------------------------------------------------------------------------------- 1 | KUBERNETES_SERVER_URL="https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT" 2 | OAUTH_METADATA_URL="$KUBERNETES_SERVER_URL/.well-known/oauth-authorization-server" 3 | OAUTH_ISSUER_ADDRESS=`curl -ks $OAUTH_METADATA_URL | grep '"issuer":' | sed -e 's%.*https://%https://%' -e 's%",%%'` 4 | 5 | export OPENSHIFT_URL=$OAUTH_ISSUER_ADDRESS 6 | export OPENSHIFT_REST_API_URL=$KUBERNETES_SERVER_URL 7 | export OPENSHIFT_AUTH_API_URL=$OAUTH_ISSUER_ADDRESS 8 | -------------------------------------------------------------------------------- /jupyterhub_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import wrapt 4 | 5 | from kubernetes.client.configuration import Configuration 6 | from kubernetes.config.incluster_config import load_incluster_config 7 | from kubernetes.client.api_client import ApiClient 8 | from kubernetes.client.rest import ApiException 9 | from openshift.dynamic import DynamicClient 10 | 11 | # Helper function for doing unit conversions or translations if needed. 12 | 13 | def convert_size_to_bytes(size): 14 | multipliers = { 15 | 'k': 1000, 16 | 'm': 1000**2, 17 | 'g': 1000**3, 18 | 't': 1000**4, 19 | 'ki': 1024, 20 | 'mi': 1024**2, 21 | 'gi': 1024**3, 22 | 'ti': 1024**4, 23 | } 24 | 25 | size = str(size) 26 | 27 | for suffix in multipliers: 28 | if size.lower().endswith(suffix): 29 | return int(size[0:-len(suffix)]) * multipliers[suffix] 30 | else: 31 | if size.lower().endswith('b'): 32 | return int(size[0:-1]) 33 | 34 | try: 35 | return int(size) 36 | except ValueError: 37 | raise RuntimeError('"%s" is not a valid memory specification. Must be an integer or a string with suffix K, M, G, T, Ki, Mi, Gi or Ti.' % size) 38 | 39 | # Initialise client for the REST API used doing configuration. 40 | # 41 | # XXX Currently have a workaround here for OpenShift 4.0 beta versions 42 | # which disables verification of the certificate. If don't use this the 43 | # Python openshift/kubernetes clients will fail. We also disable any 44 | # warnings from urllib3 to get rid of the noise in the logs this creates. 45 | 46 | load_incluster_config() 47 | 48 | import urllib3 49 | urllib3.disable_warnings() 50 | instance = Configuration() 51 | instance.verify_ssl = False 52 | Configuration.set_default(instance) 53 | 54 | api_client = DynamicClient(ApiClient()) 55 | 56 | image_stream_resource = api_client.resources.get( 57 | api_version='image.openshift.io/v1', kind='ImageStream') 58 | 59 | route_resource = api_client.resources.get( 60 | api_version='route.openshift.io/v1', kind='Route') 61 | 62 | # Work out the name of the JupyterHub deployment passed in environment. 63 | 64 | application_name = os.environ.get('APPLICATION_NAME', 'jupyterhub') 65 | 66 | # Work out the name of the namespace in which we are being deployed. 67 | 68 | service_account_path = '/var/run/secrets/kubernetes.io/serviceaccount' 69 | 70 | with open(os.path.join(service_account_path, 'namespace')) as fp: 71 | namespace = fp.read().strip() 72 | 73 | # Work out hostname for the exposed route of the JupyterHub server. 74 | 75 | routes = route_resource.get(namespace=namespace) 76 | 77 | def extract_hostname(routes, name): 78 | for route in routes.items: 79 | if route.metadata.name == name: 80 | return route.spec.host 81 | 82 | public_hostname = extract_hostname(routes, application_name) 83 | 84 | if not public_hostname: 85 | raise RuntimeError('Cannot calculate external host name for JupyterHub.') 86 | 87 | # Helper function for determining the correct name for the image. We 88 | # need to do this for references to image streams because of the image 89 | # lookup policy often not being correctly setup on OpenShift clusters. 90 | 91 | def resolve_image_name(name): 92 | # If the image name contains a slash, we assume it is already 93 | # referring to an image on some image registry. Even if it does 94 | # not contain a slash, it may still be hosted on docker.io. 95 | 96 | if name.find('/') != -1: 97 | return name 98 | 99 | # Separate actual source image name and tag for the image from the 100 | # name. If the tag is not supplied, default to 'latest'. 101 | 102 | parts = name.split(':', 1) 103 | 104 | if len(parts) == 1: 105 | source_image, tag = parts, 'latest' 106 | else: 107 | source_image, tag = parts 108 | 109 | # See if there is an image stream in the current project with the 110 | # target name. 111 | 112 | try: 113 | image_stream = image_stream_resource.get(namespace=namespace, 114 | name=source_image) 115 | 116 | except ApiException as e: 117 | if e.status not in (403, 404): 118 | raise 119 | 120 | return name 121 | 122 | # If we get here then the image stream exists with the target name. 123 | # We need to determine if the tag exists. If it does exist, we 124 | # extract out the full name of the image including the reference 125 | # to the image registry it is hosted on. 126 | 127 | if image_stream.status.tags: 128 | for entry in image_stream.status.tags: 129 | if entry.tag == tag: 130 | registry_image = image_stream.status.dockerImageRepository 131 | if registry_image: 132 | return '%s:%s' % (registry_image, tag) 133 | 134 | # Use original value if can't find a matching tag. 135 | 136 | return name 137 | 138 | # Define the default configuration for JupyterHub application. 139 | 140 | c.Spawner.environment = dict() 141 | 142 | c.JupyterHub.services = [] 143 | 144 | c.KubeSpawner.init_containers = [] 145 | 146 | c.KubeSpawner.extra_containers = [] 147 | 148 | c.JupyterHub.extra_handlers = [] 149 | 150 | c.JupyterHub.port = 8080 151 | 152 | c.JupyterHub.hub_ip = '0.0.0.0' 153 | c.JupyterHub.hub_port = 8081 154 | 155 | c.JupyterHub.hub_connect_ip = application_name 156 | 157 | c.ConfigurableHTTPProxy.api_url = 'http://127.0.0.1:8082' 158 | 159 | c.Spawner.start_timeout = 120 160 | c.Spawner.http_timeout = 60 161 | 162 | c.KubeSpawner.port = 8080 163 | 164 | c.KubeSpawner.common_labels = { 'app': application_name } 165 | 166 | c.KubeSpawner.uid = os.getuid() 167 | c.KubeSpawner.fs_gid = os.getuid() 168 | 169 | c.KubeSpawner.extra_annotations = { 170 | "alpha.image.policy.openshift.io/resolve-names": "*" 171 | } 172 | 173 | c.KubeSpawner.cmd = ['start-singleuser.sh'] 174 | 175 | c.KubeSpawner.pod_name_template = '%s-nb-{username}' % application_name 176 | 177 | c.JupyterHub.admin_access = True 178 | 179 | if os.environ.get('JUPYTERHUB_COOKIE_SECRET'): 180 | c.JupyterHub.cookie_secret = os.environ[ 181 | 'JUPYTERHUB_COOKIE_SECRET'].encode('UTF-8') 182 | else: 183 | c.JupyterHub.cookie_secret_file = '/opt/app-root/data/cookie_secret' 184 | 185 | if os.environ.get('JUPYTERHUB_DATABASE_PASSWORD'): 186 | c.JupyterHub.db_url = 'postgresql://jupyterhub:%s@%s:5432/%s' % ( 187 | os.environ['JUPYTERHUB_DATABASE_PASSWORD'], 188 | os.environ['JUPYTERHUB_DATABASE_HOST'], 189 | os.environ.get('JUPYTERHUB_DATABASE_NAME', 'jupyterhub')) 190 | else: 191 | c.JupyterHub.db_url = '/opt/app-root/data/database.sqlite' 192 | 193 | c.JupyterHub.authenticator_class = 'tmpauthenticator.TmpAuthenticator' 194 | 195 | c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner' 196 | 197 | c.KubeSpawner.image_spec = resolve_image_name( 198 | os.environ.get('JUPYTERHUB_NOTEBOOK_IMAGE', 199 | 's2i-minimal-notebook:3.6')) 200 | 201 | if os.environ.get('JUPYTERHUB_NOTEBOOK_MEMORY'): 202 | c.Spawner.mem_limit = convert_size_to_bytes(os.environ['JUPYTERHUB_NOTEBOOK_MEMORY']) 203 | 204 | notebook_interface = os.environ.get('JUPYTERHUB_NOTEBOOK_INTERFACE') 205 | 206 | if notebook_interface: 207 | c.Spawner.environment['JUPYTER_NOTEBOOK_INTERFACE'] = notebook_interface 208 | 209 | # Workaround bug in minishift where a service cannot be contacted from a 210 | # pod which backs the service. For further details see the minishift issue 211 | # https://github.com/minishift/minishift/issues/2400. 212 | # 213 | # What these workarounds do is monkey patch the JupyterHub proxy client 214 | # API code, and the code for creating the environment for local service 215 | # processes, and when it sees something which uses the service name as 216 | # the target in a URL, it replaces it with localhost. These work because 217 | # the proxy/service processes are in the same pod. It is not possible to 218 | # change hub_connect_ip to localhost because that is passed to other 219 | # pods which need to contact back to JupyterHub, and so it must be left 220 | # as the service name. 221 | 222 | @wrapt.patch_function_wrapper('jupyterhub.proxy', 'ConfigurableHTTPProxy.add_route') 223 | def _wrapper_add_route(wrapped, instance, args, kwargs): 224 | def _extract_args(routespec, target, data, *_args, **_kwargs): 225 | return (routespec, target, data, _args, _kwargs) 226 | 227 | routespec, target, data, _args, _kwargs = _extract_args(*args, **kwargs) 228 | 229 | old = 'http://%s:%s' % (c.JupyterHub.hub_connect_ip, c.JupyterHub.hub_port) 230 | new = 'http://127.0.0.1:%s' % c.JupyterHub.hub_port 231 | 232 | if target.startswith(old): 233 | target = target.replace(old, new) 234 | 235 | return wrapped(routespec, target, data, *_args, **_kwargs) 236 | 237 | @wrapt.patch_function_wrapper('jupyterhub.spawner', 'LocalProcessSpawner.get_env') 238 | def _wrapper_get_env(wrapped, instance, args, kwargs): 239 | env = wrapped(*args, **kwargs) 240 | 241 | target = env.get('JUPYTERHUB_API_URL') 242 | 243 | old = 'http://%s:%s' % (c.JupyterHub.hub_connect_ip, c.JupyterHub.hub_port) 244 | new = 'http://127.0.0.1:%s' % c.JupyterHub.hub_port 245 | 246 | if target and target.startswith(old): 247 | target = target.replace(old, new) 248 | env['JUPYTERHUB_API_URL'] = target 249 | 250 | return env 251 | 252 | # Load configuration overrides based on configuration type. 253 | 254 | configuration_type = os.environ.get('CONFIGURATION_TYPE') 255 | 256 | if configuration_type: 257 | config_file = '/opt/app-root/etc/jupyterhub_config-%s.py' % configuration_type 258 | 259 | if os.path.exists(config_file): 260 | with open(config_file) as fp: 261 | exec(compile(fp.read(), config_file, 'exec'), globals()) 262 | 263 | # Load configuration included in the image. 264 | 265 | image_config_file = '/opt/app-root/src/.jupyter/jupyterhub_config.py' 266 | 267 | if os.path.exists(image_config_file): 268 | with open(image_config_file) as fp: 269 | exec(compile(fp.read(), image_config_file, 'exec'), globals()) 270 | 271 | # Load configuration provided via the environment. 272 | 273 | environ_config_file = '/opt/app-root/configs/jupyterhub_config.py' 274 | 275 | if os.path.exists(environ_config_file): 276 | with open(environ_config_file) as fp: 277 | exec(compile(fp.read(), environ_config_file, 'exec'), globals()) 278 | -------------------------------------------------------------------------------- /jupyterhub_config.sh: -------------------------------------------------------------------------------- 1 | if [ x"$CONFIGURATION_TYPE" != x"" ]; then 2 | if [ -f /opt/app-root/etc/jupyterhub_config-$CONFIGURATION_TYPE.sh ]; then 3 | . /opt/app-root/etc/jupyterhub_config-$CONFIGURATION_TYPE.sh 4 | fi 5 | fi 6 | 7 | if [ -f /opt/app-root/src/.jupyter/jupyterhub_config.sh ]; then 8 | . /opt/app-root/src/.jupyter/jupyterhub_config.sh 9 | fi 10 | 11 | if [ -f /opt/app-root/configs/jupyterhub_config.sh ]; then 12 | . /opt/app-root/configs/jupyterhub_config.sh 13 | fi 14 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | kubernetes==9.0.1 2 | jupyterhub==1.0.0 3 | #jupyterhub-kubespawner==0.10.1 4 | git+https://github.com/jupyterhub/kubespawner.git@a945ef01410867b39e0c174d362a8702bbaa15e9#egg=jupyterhub-kubespawner 5 | git+https://github.com/jupyterhub/wrapspawner.git@5f2b7075f77d0c1c49066682a8e8adad0dab76db 6 | jupyterhub-tmpauthenticator==0.6 7 | oauthenticator==0.9.0 8 | jupyterhub-ldapauthenticator==1.2.2 9 | psycopg2==2.8.4 10 | openshift==0.10.0 11 | wrapt==1.11.2 12 | -------------------------------------------------------------------------------- /scripts/backup-user-details: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source /opt/app-root/etc/scl_enable 4 | 5 | export PYTHONUNBUFFERED=1 6 | 7 | exec python `dirname $0`/backup-user-details.py "$@" 8 | -------------------------------------------------------------------------------- /scripts/backup-user-details.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import json 5 | import time 6 | 7 | from functools import partial 8 | 9 | from tornado.gen import coroutine 10 | from tornado.httpclient import AsyncHTTPClient, HTTPRequest 11 | from tornado.ioloop import IOLoop, PeriodicCallback 12 | from tornado.options import define, options, parse_command_line 13 | 14 | from kubernetes import config, client 15 | from kubernetes.client.rest import ApiException 16 | from kubernetes.client.models import V1ConfigMap, V1ObjectMeta 17 | 18 | service_name = os.environ.get('JUPYTERHUB_SERVICE_NAME') 19 | 20 | with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace') as fp: 21 | namespace = fp.read().strip() 22 | 23 | os.environ['KUBERNETES_SERVICE_HOST'] = 'openshift.default.svc.cluster.local' 24 | os.environ['KUBERNETES_SERVICE_PORT'] = '443' 25 | 26 | config.load_incluster_config() 27 | 28 | corev1api = client.CoreV1Api() 29 | 30 | cached_admin_users = None 31 | cached_user_whitelist = None 32 | 33 | @coroutine 34 | def backup_details(url, api_token, interval, backups, config_map): 35 | # Fetch the list of users. 36 | 37 | global cached_admin_users 38 | global cached_user_whitelist 39 | 40 | auth_header = { 'Authorization': 'token %s' % api_token } 41 | req = HTTPRequest(url=url + '/users', headers=auth_header) 42 | client = AsyncHTTPClient() 43 | resp = yield client.fetch(req) 44 | users = json.loads(resp.body.decode('utf8', 'replace')) 45 | 46 | admin_users = set() 47 | user_whitelist = set() 48 | 49 | for user in users: 50 | if user['admin']: 51 | admin_users.add(user['name']) 52 | else: 53 | user_whitelist.add(user['name']) 54 | 55 | timestamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.gmtime()) 56 | 57 | os.makedirs(backups, exist_ok=True) 58 | 59 | if admin_users != cached_admin_users: 60 | name = 'admin_users-%s.txt' % timestamp 61 | path = os.path.join(backups, name) 62 | 63 | print('creating backup: %s' % path) 64 | 65 | with open(path, 'w') as fp: 66 | fp.write('\n'.join(admin_users)) 67 | fp.write('\n') 68 | 69 | cached_admin_users = admin_users 70 | 71 | try: 72 | latest = os.path.join(backups, 'admin_users-latest.txt') 73 | if os.path.exists(latest): 74 | os.unlink(latest) 75 | os.symlink(name, latest) 76 | 77 | except OSError: 78 | print('ERROR: could not update: admin_users-latest.txt') 79 | pass 80 | 81 | if user_whitelist != cached_user_whitelist: 82 | name = 'user_whitelist-%s.txt' % timestamp 83 | path = os.path.join(backups, name) 84 | 85 | print('creating backup: %s' % path) 86 | 87 | with open(path, 'w') as fp: 88 | fp.write('\n'.join(user_whitelist)) 89 | fp.write('\n') 90 | 91 | cached_user_whitelist = user_whitelist 92 | 93 | try: 94 | latest = os.path.join(backups, 'user_whitelist-latest.txt') 95 | if os.path.exists(latest): 96 | os.unlink(latest) 97 | os.symlink(name, latest) 98 | 99 | except OSError: 100 | print('ERROR: could not update: user_whitelist-latest.txt') 101 | pass 102 | 103 | if config_map: 104 | config_map_object = V1ConfigMap() 105 | config_map_object.kind = "ConfigMap" 106 | config_map_object.api_version = "v1" 107 | 108 | config_map_object.metadata = V1ObjectMeta( 109 | name=config_map, labels={'app': service_name}) 110 | 111 | config_map_object.data = { 112 | 'admin_users.txt': '\n'.join(admin_users)+'\n', 113 | 'user_whitelist.txt': '\n'.join(user_whitelist)+'\n' 114 | } 115 | 116 | try: 117 | corev1api.replace_namespaced_config_map(config_map, 118 | namespace, config_map_object) 119 | 120 | except ApiException as e: 121 | if e.status == 404: 122 | try: 123 | corev1api.create_namespaced_config_map( 124 | namespace, config_map_object) 125 | 126 | except Exception as e: 127 | print('cannot update config map %s: %s' % (config_map, e)) 128 | 129 | else: 130 | print('cannot update config map %s: %s' % (config_map, e)) 131 | 132 | except Exception as e: 133 | print('cannot update config map %s: %s' % (config_map, e)) 134 | 135 | if __name__ == '__main__': 136 | define('url', default=os.environ.get('JUPYTERHUB_API_URL'), 137 | help="The JupyterHub API URL") 138 | define('interval', default=300, 139 | help="Time (in seconds) between checking for changes.") 140 | define('backups', default='/tmp', 141 | help="Directory to save backup files.") 142 | define('config-map', default='', 143 | help="Name of config map to save backup files.") 144 | 145 | parse_command_line() 146 | 147 | api_token = os.environ['JUPYTERHUB_API_TOKEN'] 148 | 149 | loop = IOLoop.current() 150 | 151 | task = partial(backup_details, url=options.url, api_token=api_token, 152 | interval=options.interval, backups=options.backups, 153 | config_map=options.config_map) 154 | 155 | # Schedule the first backup immediately because the period callback 156 | # doesn't start until the end of the first interval 157 | 158 | loop.add_callback(task) 159 | 160 | # Schedule the periodic backup. 161 | 162 | periodic_callback = PeriodicCallback(task, 1000*options.interval) 163 | periodic_callback.start() 164 | 165 | try: 166 | loop.start() 167 | except KeyboardInterrupt: 168 | pass 169 | -------------------------------------------------------------------------------- /scripts/cull-idle-servers: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source /opt/app-root/etc/scl_enable 4 | 5 | export PYTHONUNBUFFERED=1 6 | 7 | exec python `dirname $0`/cull-idle-servers.py "$@" 8 | -------------------------------------------------------------------------------- /scripts/cull-idle-servers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """script to monitor and cull idle single-user servers 3 | 4 | Caveats: 5 | 6 | last_activity is not updated with high frequency, 7 | so cull timeout should be greater than the sum of: 8 | 9 | - single-user websocket ping interval (default: 30s) 10 | - JupyterHub.last_activity_interval (default: 5 minutes) 11 | 12 | You can run this as a service managed by JupyterHub with this in your config:: 13 | 14 | 15 | c.JupyterHub.services = [ 16 | { 17 | 'name': 'cull-idle', 18 | 'admin': True, 19 | 'command': 'python3 cull_idle_servers.py --timeout=3600'.split(), 20 | } 21 | ] 22 | 23 | Or run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`: 24 | 25 | export JUPYTERHUB_API_TOKEN=`jupyterhub token` 26 | python3 cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api] 27 | 28 | This script uses the same ``--timeout`` and ``--max-age`` values for 29 | culling users and users' servers. If you want a different value for 30 | users and servers, you should add this script to the services list 31 | twice, just with different ``name``s, different values, and one with 32 | the ``--cull-users`` option. 33 | """ 34 | 35 | from datetime import datetime, timezone 36 | from functools import partial 37 | import json 38 | import os 39 | 40 | try: 41 | from urllib.parse import quote 42 | except ImportError: 43 | from urllib import quote 44 | 45 | import dateutil.parser 46 | 47 | from tornado.gen import coroutine, multi 48 | from tornado.locks import Semaphore 49 | from tornado.log import app_log 50 | from tornado.httpclient import AsyncHTTPClient, HTTPRequest 51 | from tornado.ioloop import IOLoop, PeriodicCallback 52 | from tornado.options import define, options, parse_command_line 53 | 54 | 55 | def parse_date(date_string): 56 | """Parse a timestamp 57 | 58 | If it doesn't have a timezone, assume utc 59 | 60 | Returned datetime object will always be timezone-aware 61 | """ 62 | dt = dateutil.parser.parse(date_string) 63 | if not dt.tzinfo: 64 | # assume naïve timestamps are UTC 65 | dt = dt.replace(tzinfo=timezone.utc) 66 | return dt 67 | 68 | 69 | def format_td(td): 70 | """ 71 | Nicely format a timedelta object 72 | 73 | as HH:MM:SS 74 | """ 75 | if td is None: 76 | return "unknown" 77 | if isinstance(td, str): 78 | return td 79 | seconds = int(td.total_seconds()) 80 | h = seconds // 3600 81 | seconds = seconds % 3600 82 | m = seconds // 60 83 | seconds = seconds % 60 84 | return "{h:02}:{m:02}:{seconds:02}".format(h=h, m=m, seconds=seconds) 85 | 86 | 87 | @coroutine 88 | def cull_idle(url, api_token, inactive_limit, cull_users=False, max_age=0, concurrency=10): 89 | """Shutdown idle single-user servers 90 | 91 | If cull_users, inactive *users* will be deleted as well. 92 | """ 93 | auth_header = { 94 | 'Authorization': 'token %s' % api_token, 95 | } 96 | req = HTTPRequest( 97 | url=url + '/users', 98 | headers=auth_header, 99 | ) 100 | now = datetime.now(timezone.utc) 101 | client = AsyncHTTPClient() 102 | 103 | if concurrency: 104 | semaphore = Semaphore(concurrency) 105 | @coroutine 106 | def fetch(req): 107 | """client.fetch wrapped in a semaphore to limit concurrency""" 108 | yield semaphore.acquire() 109 | try: 110 | return (yield client.fetch(req)) 111 | finally: 112 | yield semaphore.release() 113 | else: 114 | fetch = client.fetch 115 | 116 | resp = yield fetch(req) 117 | users = json.loads(resp.body.decode('utf8', 'replace')) 118 | futures = [] 119 | 120 | @coroutine 121 | def handle_server(user, server_name, server): 122 | """Handle (maybe) culling a single server 123 | 124 | Returns True if server is now stopped (user removable), 125 | False otherwise. 126 | """ 127 | log_name = user['name'] 128 | if server_name: 129 | log_name = '%s/%s' % (user['name'], server_name) 130 | if server.get('pending'): 131 | app_log.warning( 132 | "Not culling server %s with pending %s", 133 | log_name, server['pending']) 134 | return False 135 | 136 | # jupyterhub < 0.9 defined 'server.url' once the server was ready 137 | # as an *implicit* signal that the server was ready. 138 | # 0.9 adds a dedicated, explicit 'ready' field. 139 | # By current (0.9) definitions, servers that have no pending 140 | # events and are not ready shouldn't be in the model, 141 | # but let's check just to be safe. 142 | 143 | if not server.get('ready', bool(server['url'])): 144 | app_log.warning( 145 | "Not culling not-ready not-pending server %s: %s", 146 | log_name, server) 147 | return False 148 | 149 | if server.get('started'): 150 | age = now - parse_date(server['started']) 151 | else: 152 | # started may be undefined on jupyterhub < 0.9 153 | age = None 154 | 155 | # check last activity 156 | # last_activity can be None in 0.9 157 | if server['last_activity']: 158 | inactive = now - parse_date(server['last_activity']) 159 | else: 160 | # no activity yet, use start date 161 | # last_activity may be None with jupyterhub 0.9, 162 | # which introduces the 'started' field which is never None 163 | # for running servers 164 | inactive = age 165 | 166 | should_cull = (inactive is not None and 167 | inactive.total_seconds() >= inactive_limit) 168 | if should_cull: 169 | app_log.info( 170 | "Culling server %s (inactive for %s)", 171 | log_name, format_td(inactive)) 172 | 173 | if max_age and not should_cull: 174 | # only check started if max_age is specified 175 | # so that we can still be compatible with jupyterhub 0.8 176 | # which doesn't define the 'started' field 177 | if age is not None and age.total_seconds() >= max_age: 178 | app_log.info( 179 | "Culling server %s (age: %s, inactive for %s)", 180 | log_name, format_td(age), format_td(inactive)) 181 | should_cull = True 182 | 183 | if not should_cull: 184 | app_log.debug( 185 | "Not culling server %s (age: %s, inactive for %s)", 186 | log_name, format_td(age), format_td(inactive)) 187 | return False 188 | 189 | req = HTTPRequest( 190 | url=url + '/users/%s/server' % quote(user['name']), 191 | method='DELETE', 192 | headers=auth_header, 193 | ) 194 | resp = yield fetch(req) 195 | if resp.code == 202: 196 | app_log.warning( 197 | "Server %s is slow to stop", 198 | log_name, 199 | ) 200 | # return False to prevent culling user with pending shutdowns 201 | return False 202 | return True 203 | 204 | @coroutine 205 | def handle_user(user): 206 | """Handle one user. 207 | 208 | Create a list of their servers, and async exec them. Wait for 209 | that to be done, and if all servers are stopped, possibly cull 210 | the user. 211 | """ 212 | # shutdown servers first. 213 | # Hub doesn't allow deleting users with running servers. 214 | # jupyterhub 0.9 always provides a 'servers' model. 215 | # 0.8 only does this when named servers are enabled. 216 | if 'servers' in user: 217 | servers = user['servers'] 218 | else: 219 | # jupyterhub < 0.9 without named servers enabled. 220 | # create servers dict with one entry for the default server 221 | # from the user model. 222 | # only if the server is running. 223 | servers = {} 224 | if user['server']: 225 | servers[''] = { 226 | 'last_activity': user['last_activity'], 227 | 'pending': user['pending'], 228 | 'url': user['server'], 229 | } 230 | server_futures = [ 231 | handle_server(user, server_name, server) 232 | for server_name, server in servers.items() 233 | ] 234 | results = yield multi(server_futures) 235 | if not cull_users: 236 | return 237 | # some servers are still running, cannot cull users 238 | still_alive = len(results) - sum(results) 239 | if still_alive: 240 | app_log.debug( 241 | "Not culling user %s with %i servers still alive", 242 | user['name'], still_alive) 243 | return False 244 | 245 | should_cull = False 246 | if user.get('created'): 247 | age = now - parse_date(user['created']) 248 | else: 249 | # created may be undefined on jupyterhub < 0.9 250 | age = None 251 | 252 | # check last activity 253 | # last_activity can be None in 0.9 254 | if user['last_activity']: 255 | inactive = now - parse_date(user['last_activity']) 256 | else: 257 | # no activity yet, use start date 258 | # last_activity may be None with jupyterhub 0.9, 259 | # which introduces the 'created' field which is never None 260 | inactive = age 261 | 262 | should_cull = (inactive is not None and 263 | inactive.total_seconds() >= inactive_limit) 264 | if should_cull: 265 | app_log.info( 266 | "Culling user %s (inactive for %s)", 267 | user['name'], inactive) 268 | 269 | if max_age and not should_cull: 270 | # only check created if max_age is specified 271 | # so that we can still be compatible with jupyterhub 0.8 272 | # which doesn't define the 'started' field 273 | if age is not None and age.total_seconds() >= max_age: 274 | app_log.info( 275 | "Culling user %s (age: %s, inactive for %s)", 276 | user['name'], format_td(age), format_td(inactive)) 277 | should_cull = True 278 | 279 | if not should_cull: 280 | app_log.debug( 281 | "Not culling user %s (created: %s, last active: %s)", 282 | user['name'], format_td(age), format_td(inactive)) 283 | return False 284 | 285 | req = HTTPRequest( 286 | url=url + '/users/%s' % user['name'], 287 | method='DELETE', 288 | headers=auth_header, 289 | ) 290 | yield fetch(req) 291 | return True 292 | 293 | for user in users: 294 | futures.append((user['name'], handle_user(user))) 295 | 296 | for (name, f) in futures: 297 | try: 298 | result = yield f 299 | except Exception: 300 | app_log.exception("Error processing %s", name) 301 | else: 302 | if result: 303 | app_log.debug("Finished culling %s", name) 304 | 305 | 306 | if __name__ == '__main__': 307 | define( 308 | 'url', 309 | default=os.environ.get('JUPYTERHUB_API_URL'), 310 | help="The JupyterHub API URL", 311 | ) 312 | define('timeout', default=600, help="The idle timeout (in seconds)") 313 | define('cull_every', default=0, 314 | help="The interval (in seconds) for checking for idle servers to cull") 315 | define('max_age', default=0, 316 | help="The maximum age (in seconds) of servers that should be culled even if they are active") 317 | define('cull_users', default=False, 318 | help="""Cull users in addition to servers. 319 | This is for use in temporary-user cases such as tmpnb.""", 320 | ) 321 | define('concurrency', default=10, 322 | help="""Limit the number of concurrent requests made to the Hub. 323 | 324 | Deleting a lot of users at the same time can slow down the Hub, 325 | so limit the number of API requests we have outstanding at any given time. 326 | """ 327 | ) 328 | 329 | parse_command_line() 330 | if not options.cull_every: 331 | options.cull_every = options.timeout // 2 332 | api_token = os.environ['JUPYTERHUB_API_TOKEN'] 333 | 334 | try: 335 | AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") 336 | except ImportError as e: 337 | app_log.warning( 338 | "Could not load pycurl: %s\n" 339 | "pycurl is recommended if you have a large number of users.", 340 | e) 341 | 342 | loop = IOLoop.current() 343 | cull = partial( 344 | cull_idle, 345 | url=options.url, 346 | api_token=api_token, 347 | inactive_limit=options.timeout, 348 | cull_users=options.cull_users, 349 | max_age=options.max_age, 350 | concurrency=options.concurrency, 351 | ) 352 | # schedule first cull immediately 353 | # because PeriodicCallback doesn't start until the end of the first interval 354 | loop.add_callback(cull) 355 | # schedule periodic cull 356 | pc = PeriodicCallback(cull, 1e3 * options.cull_every) 357 | pc.start() 358 | try: 359 | loop.start() 360 | except KeyboardInterrupt: 361 | pass 362 | -------------------------------------------------------------------------------- /scripts/wait-for-database: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | JUPYTERHUB_DATABASE_NAME=${JUPYTERHUB_DATABASE_NAME:-jupyterhub} 6 | 7 | while true; do 8 | psql "postgresql://jupyterhub:$JUPYTERHUB_DATABASE_PASSWORD@$JUPYTERHUB_DATABASE_HOST:5432/$JUPYTERHUB_DATABASE_NAME" -q -c 'SELECT 1' 9 | if [ $? -eq 0 ]; then 10 | exit 0 11 | fi 12 | sleep 1 13 | done 14 | -------------------------------------------------------------------------------- /start-jupyterhub.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | set -eo pipefail 6 | 7 | # Add scripts directory to program search path. 8 | 9 | PATH=$PATH:/opt/app-root/scripts 10 | 11 | # Read any custom environment variables. 12 | 13 | . /opt/app-root/etc/jupyterhub_config.sh 14 | 15 | # Start the JupyterHub instance. 16 | 17 | exec jupyterhub -f /opt/app-root/etc/jupyterhub_config.py 18 | -------------------------------------------------------------------------------- /templates/jupyterhub-builder.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Template", 3 | "apiVersion": "template.openshift.io/v1", 4 | "metadata": { 5 | "name": "jupyterhub-builder", 6 | "annotations": { 7 | "openshift.io/display-name": "JupyterHub Builder", 8 | "description": "Template for creating customised JupyterHub images.", 9 | "iconClass": "icon-python", 10 | "tags": "python,jupyter,jupyterhub" 11 | } 12 | }, 13 | "parameters": [ 14 | { 15 | "name": "JUPYTERHUB_NAME", 16 | "value": "custom-jupyterhub", 17 | "required": true 18 | }, 19 | { 20 | "name": "BUILDER_IMAGE", 21 | "value": "jupyterhub:3.4.0", 22 | "required": true 23 | }, 24 | { 25 | "name": "GIT_REPOSITORY_URL", 26 | "value": "", 27 | "required": true 28 | }, 29 | { 30 | "name": "GIT_REFERENCE", 31 | "value": "master", 32 | "required": true 33 | }, 34 | { 35 | "name": "CONTEXT_DIR", 36 | "value": "", 37 | "required": false 38 | } 39 | ], 40 | "objects": [ 41 | { 42 | "kind": "ImageStream", 43 | "apiVersion": "image.openshift.io/v1", 44 | "metadata": { 45 | "name": "${JUPYTERHUB_NAME}", 46 | "labels": { 47 | "build": "${JUPYTERHUB_NAME}" 48 | } 49 | }, 50 | "spec": { 51 | "lookupPolicy": { 52 | "local": true 53 | } 54 | } 55 | }, 56 | { 57 | "kind": "BuildConfig", 58 | "apiVersion": "build.openshift.io/v1", 59 | "metadata": { 60 | "name": "${JUPYTERHUB_NAME}", 61 | "labels": { 62 | "build": "${JUPYTERHUB_NAME}" 63 | } 64 | }, 65 | "spec": { 66 | "triggers": [ 67 | { 68 | "type": "ConfigChange" 69 | }, 70 | { 71 | "type": "ImageChange" 72 | } 73 | ], 74 | "source": { 75 | "type": "Git", 76 | "git": { 77 | "uri": "${GIT_REPOSITORY_URL}", 78 | "ref": "${GIT_REFERENCE}" 79 | }, 80 | "contextDir": "${CONTEXT_DIR}" 81 | }, 82 | "strategy": { 83 | "type": "Source", 84 | "sourceStrategy": { 85 | "from": { 86 | "kind": "ImageStreamTag", 87 | "name": "${BUILDER_IMAGE}" 88 | } 89 | } 90 | }, 91 | "output": { 92 | "to": { 93 | "kind": "ImageStreamTag", 94 | "name": "${JUPYTERHUB_NAME}:latest" 95 | } 96 | } 97 | } 98 | } 99 | ] 100 | } 101 | -------------------------------------------------------------------------------- /templates/jupyterhub-deployer.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Template", 3 | "apiVersion": "template.openshift.io/v1", 4 | "metadata": { 5 | "name": "jupyterhub-deployer", 6 | "annotations": { 7 | "openshift.io/display-name": "JupyterHub", 8 | "description": "Template for deploying a JupyterHub instance.", 9 | "iconClass": "icon-python", 10 | "tags": "python,jupyter,jupyterhub" 11 | } 12 | }, 13 | "parameters": [ 14 | { 15 | "name": "APPLICATION_NAME", 16 | "value": "jupyterhub", 17 | "required": true 18 | }, 19 | { 20 | "name": "JUPYTERHUB_IMAGE", 21 | "value": "jupyterhub:3.4.0", 22 | "required": true 23 | }, 24 | { 25 | "name": "NOTEBOOK_IMAGE", 26 | "value": "s2i-minimal-notebook:3.6", 27 | "required": true 28 | }, 29 | { 30 | "name": "JUPYTERHUB_CONFIG", 31 | "value": "", 32 | "required": false 33 | }, 34 | { 35 | "name": "JUPYTERHUB_ENVVARS", 36 | "value": "", 37 | "required": false 38 | }, 39 | { 40 | "name": "DATABASE_PASSWORD", 41 | "generate": "expression", 42 | "from": "[a-zA-Z0-9]{16}", 43 | "required": true 44 | }, 45 | { 46 | "name": "COOKIE_SECRET", 47 | "generate": "expression", 48 | "from": "[a-f0-9]{32}", 49 | "required": true 50 | }, 51 | { 52 | "name": "JUPYTERHUB_MEMORY", 53 | "description": "Amount of memory available to JupyterHub.", 54 | "value": "512Mi", 55 | "required": true 56 | }, 57 | { 58 | "name": "DATABASE_MEMORY", 59 | "description": "Amount of memory available to PostgreSQL.", 60 | "value": "512Mi", 61 | "required": true 62 | }, 63 | { 64 | "name": "NOTEBOOK_MEMORY", 65 | "description": "Amount of memory available to each notebook.", 66 | "value": "512Mi", 67 | "required": true 68 | } 69 | ], 70 | "objects": [ 71 | { 72 | "kind": "ConfigMap", 73 | "apiVersion": "v1", 74 | "metadata": { 75 | "name": "${APPLICATION_NAME}-cfg", 76 | "labels": { 77 | "app": "${APPLICATION_NAME}" 78 | } 79 | }, 80 | "data": { 81 | "jupyterhub_config.py": "${JUPYTERHUB_CONFIG}", 82 | "jupyterhub_config.sh": "${JUPYTERHUB_ENVVARS}" 83 | } 84 | }, 85 | { 86 | "kind": "ServiceAccount", 87 | "apiVersion": "v1", 88 | "metadata": { 89 | "name": "${APPLICATION_NAME}-hub", 90 | "labels": { 91 | "app": "${APPLICATION_NAME}" 92 | }, 93 | "annotations": { 94 | "serviceaccounts.openshift.io/oauth-redirectreference.first": "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"${APPLICATION_NAME}\"}}", 95 | "serviceaccounts.openshift.io/oauth-redirecturi.first": "hub/oauth_callback", 96 | "serviceaccounts.openshift.io/oauth-want-challenges": "false" 97 | } 98 | } 99 | }, 100 | { 101 | "kind": "RoleBinding", 102 | "apiVersion": "authorization.openshift.io/v1", 103 | "metadata": { 104 | "name": "${APPLICATION_NAME}-edit", 105 | "labels": { 106 | "app": "${APPLICATION_NAME}" 107 | } 108 | }, 109 | "subjects": [ 110 | { 111 | "kind": "ServiceAccount", 112 | "name": "${APPLICATION_NAME}-hub" 113 | } 114 | ], 115 | "roleRef": { 116 | "apiGroup": "rbac.authorization.k8s.io", 117 | "kind": "ClusterRole", 118 | "name": "edit" 119 | } 120 | }, 121 | { 122 | "kind": "DeploymentConfig", 123 | "apiVersion": "apps.openshift.io/v1", 124 | "metadata": { 125 | "name": "${APPLICATION_NAME}", 126 | "labels": { 127 | "app": "${APPLICATION_NAME}" 128 | } 129 | }, 130 | "spec": { 131 | "strategy": { 132 | "type": "Recreate" 133 | }, 134 | "triggers": [ 135 | { 136 | "type": "ConfigChange" 137 | }, 138 | { 139 | "type": "ImageChange", 140 | "imageChangeParams": { 141 | "automatic": true, 142 | "containerNames": [ 143 | "wait-for-database", 144 | "jupyterhub" 145 | ], 146 | "from": { 147 | "kind": "ImageStreamTag", 148 | "name": "${JUPYTERHUB_IMAGE}" 149 | } 150 | } 151 | } 152 | ], 153 | "replicas": 1, 154 | "selector": { 155 | "app": "${APPLICATION_NAME}", 156 | "deploymentconfig": "${APPLICATION_NAME}" 157 | }, 158 | "template": { 159 | "metadata": { 160 | "annotations": { 161 | "alpha.image.policy.openshift.io/resolve-names": "*" 162 | }, 163 | "labels": { 164 | "app": "${APPLICATION_NAME}", 165 | "deploymentconfig": "${APPLICATION_NAME}" 166 | } 167 | }, 168 | "spec": { 169 | "serviceAccountName": "${APPLICATION_NAME}-hub", 170 | "initContainers": [ 171 | { 172 | "name": "wait-for-database", 173 | "image": "${JUPYTERHUB_IMAGE}", 174 | "command": [ "wait-for-database" ], 175 | "resources": { 176 | "limits": { 177 | "memory": "${JUPYTERHUB_MEMORY}" 178 | } 179 | }, 180 | "env": [ 181 | { 182 | "name": "JUPYTERHUB_DATABASE_PASSWORD", 183 | "value": "${DATABASE_PASSWORD}" 184 | }, 185 | { 186 | "name": "JUPYTERHUB_DATABASE_HOST", 187 | "value": "${APPLICATION_NAME}-db" 188 | }, 189 | { 190 | "name": "JUPYTERHUB_DATABASE_NAME", 191 | "value": "postgres" 192 | } 193 | ] 194 | } 195 | ], 196 | "containers": [ 197 | { 198 | "name": "jupyterhub", 199 | "image": "${JUPYTERHUB_IMAGE}", 200 | "ports": [ 201 | { 202 | "containerPort": 8080, 203 | "protocol": "TCP" 204 | } 205 | ], 206 | "resources": { 207 | "limits": { 208 | "memory": "${JUPYTERHUB_MEMORY}" 209 | } 210 | }, 211 | "env": [ 212 | { 213 | "name": "APPLICATION_NAME", 214 | "value": "${APPLICATION_NAME}" 215 | }, 216 | { 217 | "name": "JUPYTERHUB_NOTEBOOK_IMAGE", 218 | "value": "${NOTEBOOK_IMAGE}" 219 | }, 220 | { 221 | "name": "JUPYTERHUB_NOTEBOOK_MEMORY", 222 | "value": "${NOTEBOOK_MEMORY}" 223 | }, 224 | { 225 | "name": "JUPYTERHUB_DATABASE_PASSWORD", 226 | "value": "${DATABASE_PASSWORD}" 227 | }, 228 | { 229 | "name": "JUPYTERHUB_DATABASE_HOST", 230 | "value": "${APPLICATION_NAME}-db" 231 | }, 232 | { 233 | "name": "JUPYTERHUB_DATABASE_NAME", 234 | "value": "postgres" 235 | }, 236 | { 237 | "name": "JUPYTERHUB_COOKIE_SECRET", 238 | "value": "${COOKIE_SECRET}" 239 | } 240 | ], 241 | "volumeMounts": [ 242 | { 243 | "name": "config", 244 | "mountPath": "/opt/app-root/configs" 245 | } 246 | ] 247 | } 248 | ], 249 | "volumes": [ 250 | { 251 | "name": "config", 252 | "configMap": { 253 | "name": "${APPLICATION_NAME}-cfg", 254 | "defaultMode": 420 255 | } 256 | } 257 | ] 258 | } 259 | } 260 | } 261 | }, 262 | { 263 | "kind": "Service", 264 | "apiVersion": "v1", 265 | "metadata": { 266 | "name": "${APPLICATION_NAME}", 267 | "labels": { 268 | "app": "${APPLICATION_NAME}" 269 | } 270 | }, 271 | "spec": { 272 | "ports": [ 273 | { 274 | "name": "8080-tcp", 275 | "protocol": "TCP", 276 | "port": 8080, 277 | "targetPort": 8080 278 | }, 279 | { 280 | "name": "8081-tcp", 281 | "protocol": "TCP", 282 | "port": 8081, 283 | "targetPort": 8081 284 | } 285 | ], 286 | "selector": { 287 | "app": "${APPLICATION_NAME}", 288 | "deploymentconfig": "${APPLICATION_NAME}" 289 | } 290 | } 291 | }, 292 | { 293 | "kind": "Route", 294 | "apiVersion": "route.openshift.io/v1", 295 | "metadata": { 296 | "name": "${APPLICATION_NAME}", 297 | "labels": { 298 | "app": "${APPLICATION_NAME}" 299 | } 300 | }, 301 | "spec": { 302 | "host": "", 303 | "to": { 304 | "kind": "Service", 305 | "name": "${APPLICATION_NAME}", 306 | "weight": 100 307 | }, 308 | "port": { 309 | "targetPort": "8080-tcp" 310 | }, 311 | "tls": { 312 | "termination": "edge", 313 | "insecureEdgeTerminationPolicy": "Redirect" 314 | } 315 | } 316 | }, 317 | { 318 | "kind": "PersistentVolumeClaim", 319 | "apiVersion": "v1", 320 | "metadata": { 321 | "name": "${APPLICATION_NAME}-db", 322 | "labels": { 323 | "app": "${APPLICATION_NAME}" 324 | } 325 | }, 326 | "spec": { 327 | "accessModes": [ 328 | "ReadWriteOnce" 329 | ], 330 | "resources": { 331 | "requests": { 332 | "storage": "1Gi" 333 | } 334 | } 335 | } 336 | }, 337 | { 338 | "kind": "DeploymentConfig", 339 | "apiVersion": "apps.openshift.io/v1", 340 | "metadata": { 341 | "name": "${APPLICATION_NAME}-db", 342 | "labels": { 343 | "app": "${APPLICATION_NAME}" 344 | } 345 | }, 346 | "spec": { 347 | "replicas": 1, 348 | "selector": { 349 | "app": "${APPLICATION_NAME}", 350 | "deploymentconfig": "${APPLICATION_NAME}-db" 351 | }, 352 | "strategy": { 353 | "type": "Recreate" 354 | }, 355 | "template": { 356 | "metadata": { 357 | "labels": { 358 | "app": "${APPLICATION_NAME}", 359 | "deploymentconfig": "${APPLICATION_NAME}-db" 360 | } 361 | }, 362 | "spec": { 363 | "containers": [ 364 | { 365 | "name": "postgresql", 366 | "env": [ 367 | { 368 | "name": "POSTGRESQL_USER", 369 | "value": "jupyterhub" 370 | }, 371 | { 372 | "name": "POSTGRESQL_PASSWORD", 373 | "value": "${DATABASE_PASSWORD}" 374 | }, 375 | { 376 | "name": "POSTGRESQL_DATABASE", 377 | "value": "postgres" 378 | } 379 | ], 380 | "livenessProbe": { 381 | "tcpSocket": { 382 | "port": 5432 383 | } 384 | }, 385 | "ports": [ 386 | { 387 | "containerPort": 5432, 388 | "protocol": "TCP" 389 | } 390 | ], 391 | "resources": { 392 | "limits": { 393 | "memory": "${DATABASE_MEMORY}" 394 | } 395 | }, 396 | "readinessProbe": { 397 | "exec": { 398 | "command": [ 399 | "/bin/sh", 400 | "-i", 401 | "-c", 402 | "psql -h 127.0.0.1 -U $POSTGRESQL_USER -q -d $POSTGRESQL_DATABASE -c 'SELECT 1'" 403 | ] 404 | } 405 | }, 406 | "volumeMounts": [ 407 | { 408 | "mountPath": "/var/lib/pgsql/data", 409 | "name": "data" 410 | } 411 | ] 412 | } 413 | ], 414 | "volumes": [ 415 | { 416 | "name": "data", 417 | "persistentVolumeClaim": { 418 | "claimName": "${APPLICATION_NAME}-db" 419 | } 420 | }, 421 | { 422 | "name": "config", 423 | "configMap": { 424 | "name": "${APPLICATION_NAME}-cfg", 425 | "defaultMode": 420 426 | } 427 | } 428 | ] 429 | } 430 | }, 431 | "triggers": [ 432 | { 433 | "imageChangeParams": { 434 | "automatic": true, 435 | "containerNames": [ 436 | "postgresql" 437 | ], 438 | "from": { 439 | "kind": "ImageStreamTag", 440 | "name": "postgresql:9.6", 441 | "namespace": "openshift" 442 | } 443 | }, 444 | "type": "ImageChange" 445 | }, 446 | { 447 | "type": "ConfigChange" 448 | } 449 | ] 450 | } 451 | }, 452 | { 453 | "kind": "Service", 454 | "apiVersion": "v1", 455 | "metadata": { 456 | "name": "${APPLICATION_NAME}-db", 457 | "labels": { 458 | "app": "${APPLICATION_NAME}" 459 | } 460 | }, 461 | "spec": { 462 | "ports": [ 463 | { 464 | "name": "5432-tcp", 465 | "protocol": "TCP", 466 | "port": 5432, 467 | "targetPort": 5432 468 | } 469 | ], 470 | "selector": { 471 | "app": "${APPLICATION_NAME}", 472 | "deploymentconfig": "${APPLICATION_NAME}-db" 473 | } 474 | } 475 | } 476 | ] 477 | } 478 | -------------------------------------------------------------------------------- /templates/jupyterhub-quickstart.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Template", 3 | "apiVersion": "template.openshift.io/v1", 4 | "metadata": { 5 | "name": "jupyterhub-quickstart", 6 | "annotations": { 7 | "openshift.io/display-name": "JupyterHub Quickstart", 8 | "description": "Template for creating and deploying customised Jupyter notebook images using JupyterHub.", 9 | "iconClass": "icon-python", 10 | "tags": "python,jupyter,jupyterhub" 11 | } 12 | }, 13 | "parameters": [ 14 | { 15 | "name": "APPLICATION_NAME", 16 | "value": "jupyterhub", 17 | "required": true 18 | }, 19 | { 20 | "name": "JUPYTERHUB_IMAGE", 21 | "value": "jupyterhub:3.4.0", 22 | "required": true 23 | }, 24 | { 25 | "name": "BUILDER_IMAGE", 26 | "value": "s2i-minimal-notebook:3.6", 27 | "required": true 28 | }, 29 | { 30 | "name": "GIT_REPOSITORY_URL", 31 | "value": "", 32 | "required": true 33 | }, 34 | { 35 | "name": "GIT_REFERENCE", 36 | "value": "master", 37 | "required": true 38 | }, 39 | { 40 | "name": "CONTEXT_DIR", 41 | "value": "", 42 | "required": false 43 | }, 44 | { 45 | "name": "JUPYTERHUB_CONFIG", 46 | "value": "", 47 | "required": false 48 | }, 49 | { 50 | "name": "JUPYTERHUB_ENVVARS", 51 | "value": "", 52 | "required": false 53 | }, 54 | { 55 | "name": "DATABASE_PASSWORD", 56 | "generate": "expression", 57 | "from": "[a-zA-Z0-9]{16}", 58 | "required": true 59 | }, 60 | { 61 | "name": "COOKIE_SECRET", 62 | "generate": "expression", 63 | "from": "[a-f0-9]{32}", 64 | "required": true 65 | }, 66 | { 67 | "name": "JUPYTERHUB_MEMORY", 68 | "description": "Amount of memory available to JupyterHub.", 69 | "value": "512Mi", 70 | "required": true 71 | }, 72 | { 73 | "name": "DATABASE_MEMORY", 74 | "description": "Amount of memory available to PostgreSQL.", 75 | "value": "512Mi", 76 | "required": true 77 | }, 78 | { 79 | "name": "NOTEBOOK_INTERFACE", 80 | "value": "classic" 81 | }, 82 | { 83 | "name": "NOTEBOOK_MEMORY", 84 | "description": "Amount of memory available to each notebook.", 85 | "value": "512Mi", 86 | "required": true 87 | } 88 | ], 89 | "objects": [ 90 | { 91 | "kind": "ImageStream", 92 | "apiVersion": "image.openshift.io/v1", 93 | "metadata": { 94 | "name": "${APPLICATION_NAME}-nb", 95 | "labels": { 96 | "app": "${APPLICATION_NAME}" 97 | } 98 | }, 99 | "spec": { 100 | "lookupPolicy": { 101 | "local": true 102 | } 103 | } 104 | }, 105 | { 106 | "kind": "BuildConfig", 107 | "apiVersion": "build.openshift.io/v1", 108 | "metadata": { 109 | "name": "${APPLICATION_NAME}-nb", 110 | "labels": { 111 | "app": "${APPLICATION_NAME}" 112 | } 113 | }, 114 | "spec": { 115 | "triggers": [ 116 | { 117 | "type": "ConfigChange" 118 | }, 119 | { 120 | "type": "ImageChange" 121 | } 122 | ], 123 | "source": { 124 | "type": "Git", 125 | "git": { 126 | "uri": "${GIT_REPOSITORY_URL}", 127 | "ref": "${GIT_REFERENCE}" 128 | }, 129 | "contextDir": "${CONTEXT_DIR}" 130 | }, 131 | "strategy": { 132 | "type": "Source", 133 | "sourceStrategy": { 134 | "from": { 135 | "kind": "ImageStreamTag", 136 | "name": "${BUILDER_IMAGE}" 137 | } 138 | } 139 | }, 140 | "output": { 141 | "to": { 142 | "kind": "ImageStreamTag", 143 | "name": "${APPLICATION_NAME}-nb:latest" 144 | } 145 | } 146 | } 147 | }, 148 | { 149 | "kind": "ConfigMap", 150 | "apiVersion": "v1", 151 | "metadata": { 152 | "name": "${APPLICATION_NAME}-cfg", 153 | "labels": { 154 | "app": "${APPLICATION_NAME}" 155 | } 156 | }, 157 | "data": { 158 | "jupyterhub_config.py": "${JUPYTERHUB_CONFIG}", 159 | "jupyterhub_config.sh": "${JUPYTERHUB_ENVVARS}" 160 | } 161 | }, 162 | { 163 | "kind": "ServiceAccount", 164 | "apiVersion": "v1", 165 | "metadata": { 166 | "name": "${APPLICATION_NAME}-hub", 167 | "labels": { 168 | "app": "${APPLICATION_NAME}" 169 | }, 170 | "annotations": { 171 | "serviceaccounts.openshift.io/oauth-redirectreference.first": "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"${APPLICATION_NAME}\"}}", 172 | "serviceaccounts.openshift.io/oauth-redirecturi.first": "hub/oauth_callback", 173 | "serviceaccounts.openshift.io/oauth-want-challenges": "false" 174 | } 175 | } 176 | }, 177 | { 178 | "kind": "RoleBinding", 179 | "apiVersion": "authorization.openshift.io/v1", 180 | "metadata": { 181 | "name": "${APPLICATION_NAME}-edit", 182 | "labels": { 183 | "app": "${APPLICATION_NAME}" 184 | } 185 | }, 186 | "subjects": [ 187 | { 188 | "kind": "ServiceAccount", 189 | "name": "${APPLICATION_NAME}-hub" 190 | } 191 | ], 192 | "roleRef": { 193 | "apiGroup": "rbac.authorization.k8s.io", 194 | "kind": "ClusterRole", 195 | "name": "edit" 196 | } 197 | }, 198 | { 199 | "kind": "DeploymentConfig", 200 | "apiVersion": "apps.openshift.io/v1", 201 | "metadata": { 202 | "name": "${APPLICATION_NAME}", 203 | "labels": { 204 | "app": "${APPLICATION_NAME}" 205 | } 206 | }, 207 | "spec": { 208 | "strategy": { 209 | "type": "Recreate" 210 | }, 211 | "triggers": [ 212 | { 213 | "type": "ConfigChange" 214 | }, 215 | { 216 | "type": "ImageChange", 217 | "imageChangeParams": { 218 | "automatic": true, 219 | "containerNames": [ 220 | "wait-for-database", 221 | "jupyterhub" 222 | ], 223 | "from": { 224 | "kind": "ImageStreamTag", 225 | "name": "${JUPYTERHUB_IMAGE}" 226 | } 227 | } 228 | } 229 | ], 230 | "replicas": 1, 231 | "selector": { 232 | "app": "${APPLICATION_NAME}", 233 | "deploymentconfig": "${APPLICATION_NAME}" 234 | }, 235 | "template": { 236 | "metadata": { 237 | "annotations": { 238 | "alpha.image.policy.openshift.io/resolve-names": "*" 239 | }, 240 | "labels": { 241 | "app": "${APPLICATION_NAME}", 242 | "deploymentconfig": "${APPLICATION_NAME}" 243 | } 244 | }, 245 | "spec": { 246 | "serviceAccountName": "${APPLICATION_NAME}-hub", 247 | "initContainers": [ 248 | { 249 | "name": "wait-for-database", 250 | "image": "${JUPYTERHUB_IMAGE}", 251 | "command": [ "wait-for-database" ], 252 | "resources": { 253 | "limits": { 254 | "memory": "${JUPYTERHUB_MEMORY}" 255 | } 256 | }, 257 | "env": [ 258 | { 259 | "name": "JUPYTERHUB_DATABASE_PASSWORD", 260 | "value": "${DATABASE_PASSWORD}" 261 | }, 262 | { 263 | "name": "JUPYTERHUB_DATABASE_HOST", 264 | "value": "${APPLICATION_NAME}-db" 265 | }, 266 | { 267 | "name": "JUPYTERHUB_DATABASE_NAME", 268 | "value": "postgres" 269 | } 270 | ] 271 | } 272 | ], 273 | "containers": [ 274 | { 275 | "name": "jupyterhub", 276 | "image": "${JUPYTERHUB_IMAGE}", 277 | "ports": [ 278 | { 279 | "containerPort": 8080, 280 | "protocol": "TCP" 281 | } 282 | ], 283 | "resources": { 284 | "limits": { 285 | "memory": "${JUPYTERHUB_MEMORY}" 286 | } 287 | }, 288 | "env": [ 289 | { 290 | "name": "APPLICATION_NAME", 291 | "value": "${APPLICATION_NAME}" 292 | }, 293 | { 294 | "name": "JUPYTERHUB_NOTEBOOK_IMAGE", 295 | "value": "${APPLICATION_NAME}-nb:latest" 296 | }, 297 | { 298 | "name": "JUPYTERHUB_NOTEBOOK_MEMORY", 299 | "value": "${NOTEBOOK_MEMORY}" 300 | }, 301 | { 302 | "name": "JUPYTERHUB_NOTEBOOK_INTERFACE", 303 | "value": "${NOTEBOOK_INTERFACE}" 304 | }, 305 | { 306 | "name": "JUPYTERHUB_DATABASE_PASSWORD", 307 | "value": "${DATABASE_PASSWORD}" 308 | }, 309 | { 310 | "name": "JUPYTERHUB_DATABASE_HOST", 311 | "value": "${APPLICATION_NAME}-db" 312 | }, 313 | { 314 | "name": "JUPYTERHUB_DATABASE_NAME", 315 | "value": "postgres" 316 | }, 317 | { 318 | "name": "JUPYTERHUB_COOKIE_SECRET", 319 | "value": "${COOKIE_SECRET}" 320 | } 321 | ], 322 | "volumeMounts": [ 323 | { 324 | "name": "config", 325 | "mountPath": "/opt/app-root/configs" 326 | } 327 | ] 328 | } 329 | ], 330 | "volumes": [ 331 | { 332 | "name": "config", 333 | "configMap": { 334 | "name": "${APPLICATION_NAME}-cfg", 335 | "defaultMode": 420 336 | } 337 | } 338 | ] 339 | } 340 | } 341 | } 342 | }, 343 | { 344 | "kind": "Service", 345 | "apiVersion": "v1", 346 | "metadata": { 347 | "name": "${APPLICATION_NAME}", 348 | "labels": { 349 | "app": "${APPLICATION_NAME}" 350 | } 351 | }, 352 | "spec": { 353 | "ports": [ 354 | { 355 | "name": "8080-tcp", 356 | "protocol": "TCP", 357 | "port": 8080, 358 | "targetPort": 8080 359 | }, 360 | { 361 | "name": "8081-tcp", 362 | "protocol": "TCP", 363 | "port": 8081, 364 | "targetPort": 8081 365 | } 366 | ], 367 | "selector": { 368 | "app": "${APPLICATION_NAME}", 369 | "deploymentconfig": "${APPLICATION_NAME}" 370 | } 371 | } 372 | }, 373 | { 374 | "kind": "Route", 375 | "apiVersion": "route.openshift.io/v1", 376 | "metadata": { 377 | "name": "${APPLICATION_NAME}", 378 | "labels": { 379 | "app": "${APPLICATION_NAME}" 380 | } 381 | }, 382 | "spec": { 383 | "host": "", 384 | "to": { 385 | "kind": "Service", 386 | "name": "${APPLICATION_NAME}", 387 | "weight": 100 388 | }, 389 | "port": { 390 | "targetPort": "8080-tcp" 391 | }, 392 | "tls": { 393 | "termination": "edge", 394 | "insecureEdgeTerminationPolicy": "Redirect" 395 | } 396 | } 397 | }, 398 | { 399 | "kind": "PersistentVolumeClaim", 400 | "apiVersion": "v1", 401 | "metadata": { 402 | "name": "${APPLICATION_NAME}-db", 403 | "labels": { 404 | "app": "${APPLICATION_NAME}" 405 | } 406 | }, 407 | "spec": { 408 | "accessModes": [ 409 | "ReadWriteOnce" 410 | ], 411 | "resources": { 412 | "requests": { 413 | "storage": "1Gi" 414 | } 415 | } 416 | } 417 | }, 418 | { 419 | "kind": "DeploymentConfig", 420 | "apiVersion": "apps.openshift.io/v1", 421 | "metadata": { 422 | "name": "${APPLICATION_NAME}-db", 423 | "labels": { 424 | "app": "${APPLICATION_NAME}" 425 | } 426 | }, 427 | "spec": { 428 | "replicas": 1, 429 | "selector": { 430 | "app": "${APPLICATION_NAME}", 431 | "deploymentconfig": "${APPLICATION_NAME}-db" 432 | }, 433 | "strategy": { 434 | "type": "Recreate" 435 | }, 436 | "template": { 437 | "metadata": { 438 | "labels": { 439 | "app": "${APPLICATION_NAME}", 440 | "deploymentconfig": "${APPLICATION_NAME}-db" 441 | } 442 | }, 443 | "spec": { 444 | "containers": [ 445 | { 446 | "name": "postgresql", 447 | "env": [ 448 | { 449 | "name": "POSTGRESQL_USER", 450 | "value": "jupyterhub" 451 | }, 452 | { 453 | "name": "POSTGRESQL_PASSWORD", 454 | "value": "${DATABASE_PASSWORD}" 455 | }, 456 | { 457 | "name": "POSTGRESQL_DATABASE", 458 | "value": "postgres" 459 | } 460 | ], 461 | "livenessProbe": { 462 | "tcpSocket": { 463 | "port": 5432 464 | } 465 | }, 466 | "ports": [ 467 | { 468 | "containerPort": 5432, 469 | "protocol": "TCP" 470 | } 471 | ], 472 | "resources": { 473 | "limits": { 474 | "memory": "${DATABASE_MEMORY}" 475 | } 476 | }, 477 | "readinessProbe": { 478 | "exec": { 479 | "command": [ 480 | "/bin/sh", 481 | "-i", 482 | "-c", 483 | "psql -h 127.0.0.1 -U $POSTGRESQL_USER -q -d $POSTGRESQL_DATABASE -c 'SELECT 1'" 484 | ] 485 | } 486 | }, 487 | "volumeMounts": [ 488 | { 489 | "mountPath": "/var/lib/pgsql/data", 490 | "name": "data" 491 | } 492 | ] 493 | } 494 | ], 495 | "volumes": [ 496 | { 497 | "name": "data", 498 | "persistentVolumeClaim": { 499 | "claimName": "${APPLICATION_NAME}-db" 500 | } 501 | }, 502 | { 503 | "name": "config", 504 | "configMap": { 505 | "name": "${APPLICATION_NAME}-cfg", 506 | "defaultMode": 420 507 | } 508 | } 509 | ] 510 | } 511 | }, 512 | "triggers": [ 513 | { 514 | "imageChangeParams": { 515 | "automatic": true, 516 | "containerNames": [ 517 | "postgresql" 518 | ], 519 | "from": { 520 | "kind": "ImageStreamTag", 521 | "name": "postgresql:9.6", 522 | "namespace": "openshift" 523 | } 524 | }, 525 | "type": "ImageChange" 526 | }, 527 | { 528 | "type": "ConfigChange" 529 | } 530 | ] 531 | } 532 | }, 533 | { 534 | "kind": "Service", 535 | "apiVersion": "v1", 536 | "metadata": { 537 | "name": "${APPLICATION_NAME}-db", 538 | "labels": { 539 | "app": "${APPLICATION_NAME}" 540 | } 541 | }, 542 | "spec": { 543 | "ports": [ 544 | { 545 | "name": "5432-tcp", 546 | "protocol": "TCP", 547 | "port": 5432, 548 | "targetPort": 5432 549 | } 550 | ], 551 | "selector": { 552 | "app": "${APPLICATION_NAME}", 553 | "deploymentconfig": "${APPLICATION_NAME}-db" 554 | } 555 | } 556 | } 557 | ] 558 | } 559 | -------------------------------------------------------------------------------- /templates/jupyterhub-workspace.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Template", 3 | "apiVersion": "template.openshift.io/v1", 4 | "metadata": { 5 | "name": "jupyterhub-workspace", 6 | "annotations": { 7 | "openshift.io/display-name": "JupyterHub Workspace", 8 | "description": "Template for deploying a JupyterHub instance with cluster access.", 9 | "iconClass": "icon-python", 10 | "tags": "python,jupyter,jupyterhub" 11 | } 12 | }, 13 | "parameters": [ 14 | { 15 | "name": "SPAWNER_NAMESPACE", 16 | "value": "", 17 | "required": true 18 | }, 19 | { 20 | "name": "CLUSTER_SUBDOMAIN", 21 | "value": "", 22 | "required": true 23 | }, 24 | { 25 | "name": "APPLICATION_NAME", 26 | "value": "jupyterhub", 27 | "required": true 28 | }, 29 | { 30 | "name": "JUPYTERHUB_IMAGE", 31 | "value": "jupyterhub:3.4.0", 32 | "required": true 33 | }, 34 | { 35 | "name": "NOTEBOOK_IMAGE", 36 | "value": "s2i-minimal-notebook:3.6", 37 | "required": true 38 | }, 39 | { 40 | "name": "JUPYTERHUB_CONFIG", 41 | "value": "", 42 | "required": false 43 | }, 44 | { 45 | "name": "JUPYTERHUB_ENVVARS", 46 | "value": "", 47 | "required": false 48 | }, 49 | { 50 | "name": "ADMIN_USERS", 51 | "value": "", 52 | "required": false 53 | }, 54 | { 55 | "name": "REGISTERED_USERS", 56 | "value": "", 57 | "required": false 58 | }, 59 | { 60 | "name": "DATABASE_PASSWORD", 61 | "generate": "expression", 62 | "from": "[a-zA-Z0-9]{16}", 63 | "required": true 64 | }, 65 | { 66 | "name": "COOKIE_SECRET", 67 | "generate": "expression", 68 | "from": "[a-f0-9]{32}", 69 | "required": true 70 | }, 71 | { 72 | "name": "JUPYTERHUB_MEMORY", 73 | "description": "Amount of memory available to JupyterHub.", 74 | "value": "512Mi", 75 | "required": true 76 | }, 77 | { 78 | "name": "DATABASE_MEMORY", 79 | "description": "Amount of memory available to PostgreSQL.", 80 | "value": "512Mi", 81 | "required": true 82 | }, 83 | { 84 | "name": "NOTEBOOK_MEMORY", 85 | "description": "Amount of memory available to each notebook.", 86 | "value": "512Mi", 87 | "required": true 88 | }, 89 | { 90 | "name": "NOTEBOOK_INTERFACE", 91 | "value": "classic" 92 | }, 93 | { 94 | "name": "OPENSHIFT_PROJECT", 95 | "value": "", 96 | "required": false 97 | }, 98 | { 99 | "name": "VOLUME_SIZE", 100 | "description": "Amount of storage available to each user.", 101 | "value": "" 102 | }, 103 | { 104 | "name": "IDLE_TIMEOUT", 105 | "description": "Time in seconds after which idle session is culled.", 106 | "value": "" 107 | }, 108 | { 109 | "name": "OAUTH_CLIENT_SECRET", 110 | "generate": "expression", 111 | "from": "[a-zA-Z0-9]{32}" 112 | } 113 | ], 114 | "objects": [ 115 | { 116 | "kind": "OAuthClient", 117 | "apiVersion": "oauth.openshift.io/v1", 118 | "metadata": { 119 | "name": "${APPLICATION_NAME}-${SPAWNER_NAMESPACE}-users", 120 | "labels": { 121 | "app": "${APPLICATION_NAME}" 122 | } 123 | }, 124 | "secret": "${OAUTH_CLIENT_SECRET}", 125 | "grantMethod": "auto", 126 | "redirectURIs": [ 127 | "https://${APPLICATION_NAME}-${SPAWNER_NAMESPACE}.${CLUSTER_SUBDOMAIN}/hub/oauth_callback" 128 | ] 129 | }, 130 | { 131 | "kind": "ConfigMap", 132 | "apiVersion": "v1", 133 | "metadata": { 134 | "name": "${APPLICATION_NAME}-cfg", 135 | "labels": { 136 | "app": "${APPLICATION_NAME}" 137 | } 138 | }, 139 | "data": { 140 | "jupyterhub_config.py": "${JUPYTERHUB_CONFIG}", 141 | "jupyterhub_config.sh": "${JUPYTERHUB_ENVVARS}", 142 | "admin_users.txt": "${ADMIN_USERS}", 143 | "user_whitelist.txt": "${REGISTERED_USERS}" 144 | } 145 | }, 146 | { 147 | "kind": "ServiceAccount", 148 | "apiVersion": "v1", 149 | "metadata": { 150 | "name": "${APPLICATION_NAME}-hub", 151 | "labels": { 152 | "app": "${APPLICATION_NAME}" 153 | }, 154 | "annotations": { 155 | "serviceaccounts.openshift.io/oauth-redirectreference.first": "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"${APPLICATION_NAME}\"}}", 156 | "serviceaccounts.openshift.io/oauth-redirecturi.first": "hub/oauth_callback", 157 | "serviceaccounts.openshift.io/oauth-want-challenges": "false" 158 | } 159 | } 160 | }, 161 | { 162 | "kind": "RoleBinding", 163 | "apiVersion": "authorization.openshift.io/v1", 164 | "metadata": { 165 | "name": "${APPLICATION_NAME}-edit", 166 | "labels": { 167 | "app": "${APPLICATION_NAME}" 168 | } 169 | }, 170 | "subjects": [ 171 | { 172 | "kind": "ServiceAccount", 173 | "name": "${APPLICATION_NAME}-hub" 174 | } 175 | ], 176 | "roleRef": { 177 | "apiGroup": "rbac.authorization.k8s.io", 178 | "kind": "ClusterRole", 179 | "name": "edit" 180 | } 181 | }, 182 | { 183 | "kind": "DeploymentConfig", 184 | "apiVersion": "apps.openshift.io/v1", 185 | "metadata": { 186 | "name": "${APPLICATION_NAME}", 187 | "labels": { 188 | "app": "${APPLICATION_NAME}" 189 | } 190 | }, 191 | "spec": { 192 | "strategy": { 193 | "type": "Recreate" 194 | }, 195 | "triggers": [ 196 | { 197 | "type": "ConfigChange" 198 | }, 199 | { 200 | "type": "ImageChange", 201 | "imageChangeParams": { 202 | "automatic": true, 203 | "containerNames": [ 204 | "wait-for-database", 205 | "jupyterhub" 206 | ], 207 | "from": { 208 | "kind": "ImageStreamTag", 209 | "name": "${JUPYTERHUB_IMAGE}" 210 | } 211 | } 212 | } 213 | ], 214 | "replicas": 1, 215 | "selector": { 216 | "app": "${APPLICATION_NAME}", 217 | "deploymentconfig": "${APPLICATION_NAME}" 218 | }, 219 | "template": { 220 | "metadata": { 221 | "annotations": { 222 | "alpha.image.policy.openshift.io/resolve-names": "*" 223 | }, 224 | "labels": { 225 | "app": "${APPLICATION_NAME}", 226 | "deploymentconfig": "${APPLICATION_NAME}" 227 | } 228 | }, 229 | "spec": { 230 | "serviceAccountName": "${APPLICATION_NAME}-hub", 231 | "initContainers": [ 232 | { 233 | "name": "wait-for-database", 234 | "image": "${JUPYTERHUB_IMAGE}", 235 | "command": [ "wait-for-database" ], 236 | "resources": { 237 | "limits": { 238 | "memory": "${JUPYTERHUB_MEMORY}" 239 | } 240 | }, 241 | "env": [ 242 | { 243 | "name": "JUPYTERHUB_DATABASE_PASSWORD", 244 | "value": "${DATABASE_PASSWORD}" 245 | }, 246 | { 247 | "name": "JUPYTERHUB_DATABASE_HOST", 248 | "value": "${APPLICATION_NAME}-db" 249 | }, 250 | { 251 | "name": "JUPYTERHUB_DATABASE_NAME", 252 | "value": "postgres" 253 | } 254 | ] 255 | } 256 | ], 257 | "containers": [ 258 | { 259 | "name": "jupyterhub", 260 | "image": "${JUPYTERHUB_IMAGE}", 261 | "ports": [ 262 | { 263 | "containerPort": 8080, 264 | "protocol": "TCP" 265 | } 266 | ], 267 | "resources": { 268 | "limits": { 269 | "memory": "${JUPYTERHUB_MEMORY}" 270 | } 271 | }, 272 | "env": [ 273 | { 274 | "name": "CONFIGURATION_TYPE", 275 | "value": "workspace" 276 | }, 277 | { 278 | "name": "APPLICATION_NAME", 279 | "value": "${APPLICATION_NAME}" 280 | }, 281 | { 282 | "name": "JUPYTERHUB_NOTEBOOK_IMAGE", 283 | "value": "${NOTEBOOK_IMAGE}" 284 | }, 285 | { 286 | "name": "JUPYTERHUB_NOTEBOOK_MEMORY", 287 | "value": "${NOTEBOOK_MEMORY}" 288 | }, 289 | { 290 | "name": "JUPYTERHUB_NOTEBOOK_INTERFACE", 291 | "value": "${NOTEBOOK_INTERFACE}" 292 | }, 293 | { 294 | "name": "OPENSHIFT_PROJECT", 295 | "value": "${OPENSHIFT_PROJECT}" 296 | }, 297 | { 298 | "name": "JUPYTERHUB_VOLUME_SIZE", 299 | "value": "${VOLUME_SIZE}" 300 | }, 301 | { 302 | "name": "JUPYTERHUB_IDLE_TIMEOUT", 303 | "value": "${IDLE_TIMEOUT}" 304 | }, 305 | { 306 | "name": "JUPYTERHUB_DATABASE_PASSWORD", 307 | "value": "${DATABASE_PASSWORD}" 308 | }, 309 | { 310 | "name": "JUPYTERHUB_DATABASE_HOST", 311 | "value": "${APPLICATION_NAME}-db" 312 | }, 313 | { 314 | "name": "JUPYTERHUB_DATABASE_NAME", 315 | "value": "postgres" 316 | }, 317 | { 318 | "name": "JUPYTERHUB_COOKIE_SECRET", 319 | "value": "${COOKIE_SECRET}" 320 | }, 321 | { 322 | "name": "OAUTH_CLIENT_SECRET", 323 | "value": "${OAUTH_CLIENT_SECRET}" 324 | } 325 | ], 326 | "volumeMounts": [ 327 | { 328 | "name": "config", 329 | "mountPath": "/opt/app-root/configs" 330 | } 331 | ] 332 | } 333 | ], 334 | "volumes": [ 335 | { 336 | "name": "config", 337 | "configMap": { 338 | "name": "${APPLICATION_NAME}-cfg", 339 | "defaultMode": 420 340 | } 341 | } 342 | ] 343 | } 344 | } 345 | } 346 | }, 347 | { 348 | "kind": "Service", 349 | "apiVersion": "v1", 350 | "metadata": { 351 | "name": "${APPLICATION_NAME}", 352 | "labels": { 353 | "app": "${APPLICATION_NAME}" 354 | } 355 | }, 356 | "spec": { 357 | "ports": [ 358 | { 359 | "name": "8080-tcp", 360 | "protocol": "TCP", 361 | "port": 8080, 362 | "targetPort": 8080 363 | }, 364 | { 365 | "name": "8081-tcp", 366 | "protocol": "TCP", 367 | "port": 8081, 368 | "targetPort": 8081 369 | } 370 | ], 371 | "selector": { 372 | "app": "${APPLICATION_NAME}", 373 | "deploymentconfig": "${APPLICATION_NAME}" 374 | } 375 | } 376 | }, 377 | { 378 | "kind": "Route", 379 | "apiVersion": "route.openshift.io/v1", 380 | "metadata": { 381 | "name": "${APPLICATION_NAME}", 382 | "labels": { 383 | "app": "${APPLICATION_NAME}" 384 | } 385 | }, 386 | "spec": { 387 | "host": "", 388 | "to": { 389 | "kind": "Service", 390 | "name": "${APPLICATION_NAME}", 391 | "weight": 100 392 | }, 393 | "port": { 394 | "targetPort": "8080-tcp" 395 | }, 396 | "tls": { 397 | "termination": "edge", 398 | "insecureEdgeTerminationPolicy": "Redirect" 399 | } 400 | } 401 | }, 402 | { 403 | "kind": "PersistentVolumeClaim", 404 | "apiVersion": "v1", 405 | "metadata": { 406 | "name": "${APPLICATION_NAME}-db", 407 | "labels": { 408 | "app": "${APPLICATION_NAME}" 409 | } 410 | }, 411 | "spec": { 412 | "accessModes": [ 413 | "ReadWriteOnce" 414 | ], 415 | "resources": { 416 | "requests": { 417 | "storage": "1Gi" 418 | } 419 | } 420 | } 421 | }, 422 | { 423 | "kind": "DeploymentConfig", 424 | "apiVersion": "apps.openshift.io/v1", 425 | "metadata": { 426 | "name": "${APPLICATION_NAME}-db", 427 | "labels": { 428 | "app": "${APPLICATION_NAME}" 429 | } 430 | }, 431 | "spec": { 432 | "replicas": 1, 433 | "selector": { 434 | "app": "${APPLICATION_NAME}", 435 | "deploymentconfig": "${APPLICATION_NAME}-db" 436 | }, 437 | "strategy": { 438 | "type": "Recreate" 439 | }, 440 | "template": { 441 | "metadata": { 442 | "labels": { 443 | "app": "${APPLICATION_NAME}", 444 | "deploymentconfig": "${APPLICATION_NAME}-db" 445 | } 446 | }, 447 | "spec": { 448 | "containers": [ 449 | { 450 | "name": "postgresql", 451 | "env": [ 452 | { 453 | "name": "POSTGRESQL_USER", 454 | "value": "jupyterhub" 455 | }, 456 | { 457 | "name": "POSTGRESQL_PASSWORD", 458 | "value": "${DATABASE_PASSWORD}" 459 | }, 460 | { 461 | "name": "POSTGRESQL_DATABASE", 462 | "value": "postgres" 463 | } 464 | ], 465 | "livenessProbe": { 466 | "tcpSocket": { 467 | "port": 5432 468 | } 469 | }, 470 | "ports": [ 471 | { 472 | "containerPort": 5432, 473 | "protocol": "TCP" 474 | } 475 | ], 476 | "resources": { 477 | "limits": { 478 | "memory": "${DATABASE_MEMORY}" 479 | } 480 | }, 481 | "readinessProbe": { 482 | "exec": { 483 | "command": [ 484 | "/bin/sh", 485 | "-i", 486 | "-c", 487 | "psql -h 127.0.0.1 -U $POSTGRESQL_USER -q -d $POSTGRESQL_DATABASE -c 'SELECT 1'" 488 | ] 489 | } 490 | }, 491 | "volumeMounts": [ 492 | { 493 | "mountPath": "/var/lib/pgsql/data", 494 | "name": "data" 495 | } 496 | ] 497 | } 498 | ], 499 | "volumes": [ 500 | { 501 | "name": "data", 502 | "persistentVolumeClaim": { 503 | "claimName": "${APPLICATION_NAME}-db" 504 | } 505 | }, 506 | { 507 | "name": "config", 508 | "configMap": { 509 | "name": "${APPLICATION_NAME}-cfg", 510 | "defaultMode": 420 511 | } 512 | } 513 | ] 514 | } 515 | }, 516 | "triggers": [ 517 | { 518 | "imageChangeParams": { 519 | "automatic": true, 520 | "containerNames": [ 521 | "postgresql" 522 | ], 523 | "from": { 524 | "kind": "ImageStreamTag", 525 | "name": "postgresql:9.6", 526 | "namespace": "openshift" 527 | } 528 | }, 529 | "type": "ImageChange" 530 | }, 531 | { 532 | "type": "ConfigChange" 533 | } 534 | ] 535 | } 536 | }, 537 | { 538 | "kind": "Service", 539 | "apiVersion": "v1", 540 | "metadata": { 541 | "name": "${APPLICATION_NAME}-db", 542 | "labels": { 543 | "app": "${APPLICATION_NAME}" 544 | } 545 | }, 546 | "spec": { 547 | "ports": [ 548 | { 549 | "name": "5432-tcp", 550 | "protocol": "TCP", 551 | "port": 5432, 552 | "targetPort": 5432 553 | } 554 | ], 555 | "selector": { 556 | "app": "${APPLICATION_NAME}", 557 | "deploymentconfig": "${APPLICATION_NAME}-db" 558 | } 559 | } 560 | } 561 | ] 562 | } 563 | --------------------------------------------------------------------------------